]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/fs-io.c
Update bcachefs sources to 61ebcb532a bcachefs: Fix for allocating before backpointer...
[bcachefs-tools-debian] / libbcachefs / fs-io.c
index bcfd9e5f3c2f7c40a04993f8fc74f91631a8d4bd..3900995d2277c54bc56c2f3a3eaf9d43f8194820 100644 (file)
@@ -151,7 +151,7 @@ static void bch2_quota_reservation_put(struct bch_fs *c,
 static int bch2_quota_reservation_add(struct bch_fs *c,
                                      struct bch_inode_info *inode,
                                      struct quota_res *res,
-                                     unsigned sectors,
+                                     u64 sectors,
                                      bool check_enospc)
 {
        int ret;
@@ -409,7 +409,7 @@ retry:
        offset = iter.pos.offset;
        bch2_trans_iter_exit(&trans, &iter);
 err:
-       if (ret == -EINTR)
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
                goto retry;
        bch2_trans_exit(&trans);
 
@@ -434,22 +434,20 @@ static void mark_pagecache_unallocated(struct bch_inode_info *inode,
 {
        pgoff_t index = start >> PAGE_SECTORS_SHIFT;
        pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
-       struct pagevec pvec;
+       struct folio_batch fbatch;
+       unsigned i, j;
 
        if (end <= start)
                return;
 
-       pagevec_init(&pvec);
-
-       do {
-               unsigned nr_pages, i, j;
+       folio_batch_init(&fbatch);
 
-               nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
-                                               &index, end_index);
-               for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
-                       u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
-                       u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
+       while (filemap_get_folios(inode->v.i_mapping,
+                                 &index, end_index, &fbatch)) {
+               for (i = 0; i < folio_batch_count(&fbatch); i++) {
+                       struct folio *folio = fbatch.folios[i];
+                       u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
+                       u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
                        unsigned pg_offset = max(start, pg_start) - pg_start;
                        unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
                        struct bch_page_state *s;
@@ -458,8 +456,8 @@ static void mark_pagecache_unallocated(struct bch_inode_info *inode,
                        BUG_ON(pg_offset >= PAGE_SECTORS);
                        BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
 
-                       lock_page(page);
-                       s = bch2_page_state(page);
+                       folio_lock(folio);
+                       s = bch2_page_state(&folio->page);
 
                        if (s) {
                                spin_lock(&s->lock);
@@ -468,10 +466,11 @@ static void mark_pagecache_unallocated(struct bch_inode_info *inode,
                                spin_unlock(&s->lock);
                        }
 
-                       unlock_page(page);
+                       folio_unlock(folio);
                }
-               pagevec_release(&pvec);
-       } while (index <= end_index);
+               folio_batch_release(&fbatch);
+               cond_resched();
+       }
 }
 
 static void mark_pagecache_reserved(struct bch_inode_info *inode,
@@ -480,23 +479,21 @@ static void mark_pagecache_reserved(struct bch_inode_info *inode,
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
        pgoff_t index = start >> PAGE_SECTORS_SHIFT;
        pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
-       struct pagevec pvec;
+       struct folio_batch fbatch;
        s64 i_sectors_delta = 0;
+       unsigned i, j;
 
        if (end <= start)
                return;
 
-       pagevec_init(&pvec);
+       folio_batch_init(&fbatch);
 
-       do {
-               unsigned nr_pages, i, j;
-
-               nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
-                                               &index, end_index);
-               for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
-                       u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
-                       u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
+       while (filemap_get_folios(inode->v.i_mapping,
+                                 &index, end_index, &fbatch)) {
+               for (i = 0; i < folio_batch_count(&fbatch); i++) {
+                       struct folio *folio = fbatch.folios[i];
+                       u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
+                       u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
                        unsigned pg_offset = max(start, pg_start) - pg_start;
                        unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
                        struct bch_page_state *s;
@@ -505,8 +502,8 @@ static void mark_pagecache_reserved(struct bch_inode_info *inode,
                        BUG_ON(pg_offset >= PAGE_SECTORS);
                        BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
 
-                       lock_page(page);
-                       s = bch2_page_state(page);
+                       folio_lock(folio);
+                       s = bch2_page_state(&folio->page);
 
                        if (s) {
                                spin_lock(&s->lock);
@@ -525,10 +522,11 @@ static void mark_pagecache_reserved(struct bch_inode_info *inode,
                                spin_unlock(&s->lock);
                        }
 
-                       unlock_page(page);
+                       folio_unlock(folio);
                }
-               pagevec_release(&pvec);
-       } while (index <= end_index);
+               folio_batch_release(&fbatch);
+               cond_resched();
+       }
 
        i_sectors_acct(c, inode, NULL, i_sectors_delta);
 }
@@ -608,7 +606,7 @@ static void bch2_page_reservation_put(struct bch_fs *c,
 static int bch2_page_reservation_get(struct bch_fs *c,
                        struct bch_inode_info *inode, struct page *page,
                        struct bch2_page_reservation *res,
-                       unsigned offset, unsigned len, bool check_enospc)
+                       unsigned offset, unsigned len)
 {
        struct bch_page_state *s = bch2_page_state_create(page, 0);
        unsigned i, disk_sectors = 0, quota_sectors = 0;
@@ -628,19 +626,14 @@ static int bch2_page_reservation_get(struct bch_fs *c,
        }
 
        if (disk_sectors) {
-               ret = bch2_disk_reservation_add(c, &res->disk,
-                                               disk_sectors,
-                                               !check_enospc
-                                               ? BCH_DISK_RESERVATION_NOFAIL
-                                               : 0);
+               ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0);
                if (unlikely(ret))
                        return ret;
        }
 
        if (quota_sectors) {
                ret = bch2_quota_reservation_add(c, inode, &res->quota,
-                                                quota_sectors,
-                                                check_enospc);
+                                                quota_sectors, true);
                if (unlikely(ret)) {
                        struct disk_reservation tmp = {
                                .sectors = disk_sectors
@@ -824,7 +817,7 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
                }
        }
 
-       if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
+       if (bch2_page_reservation_get(c, inode, page, &res, 0, len)) {
                unlock_page(page);
                ret = VM_FAULT_SIGBUS;
                goto out;
@@ -850,38 +843,14 @@ void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
        bch2_clear_page_bits(&folio->page);
 }
 
-int bch2_releasepage(struct page *page, gfp_t gfp_mask)
-{
-       if (PageDirty(page))
-               return 0;
-
-       bch2_clear_page_bits(page);
-       return 1;
-}
-
-#ifdef CONFIG_MIGRATION
-int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
-                     struct page *page, enum migrate_mode mode)
+bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
 {
-       int ret;
-
-       EBUG_ON(!PageLocked(page));
-       EBUG_ON(!PageLocked(newpage));
-
-       ret = migrate_page_move_mapping(mapping, newpage, page, 0);
-       if (ret != MIGRATEPAGE_SUCCESS)
-               return ret;
+       if (folio_test_dirty(folio) || folio_test_writeback(folio))
+               return false;
 
-       if (PagePrivate(page))
-               attach_page_private(newpage, detach_page_private(page));
-
-       if (mode != MIGRATE_SYNC_NO_COPY)
-               migrate_page_copy(newpage, page);
-       else
-               migrate_page_states(newpage, page);
-       return MIGRATEPAGE_SUCCESS;
+       bch2_clear_page_bits(&folio->page);
+       return true;
 }
-#endif
 
 /* readpage(s): */
 
@@ -1045,10 +1014,9 @@ retry:
                 * read_extent -> io_time_reset may cause a transaction restart
                 * without returning an error, we need to check for that here:
                 */
-               if (!bch2_trans_relock(trans)) {
-                       ret = -EINTR;
+               ret = bch2_trans_relock(trans);
+               if (ret)
                        break;
-               }
 
                bch2_btree_iter_set_pos(&iter,
                                POS(inum.inum, rbio->bio.bi_iter.bi_sector));
@@ -1101,7 +1069,7 @@ retry:
 err:
        bch2_trans_iter_exit(trans, &iter);
 
-       if (ret == -EINTR)
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
                goto retry;
 
        if (ret) {
@@ -1175,20 +1143,6 @@ static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
        bch2_trans_exit(&trans);
 }
 
-int bch2_readpage(struct file *file, struct page *page)
-{
-       struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
-       struct bch_fs *c = inode->v.i_sb->s_fs_info;
-       struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
-       struct bch_read_bio *rbio;
-
-       rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read), opts);
-       rbio->bio.bi_end_io = bch2_readpages_end_io;
-
-       __bchfs_readpage(c, rbio, inode_inum(inode), page);
-       return 0;
-}
-
 static void bch2_read_single_page_end_io(struct bio *bio)
 {
        complete(bio->bi_private);
@@ -1221,6 +1175,16 @@ static int bch2_read_single_page(struct page *page,
        return 0;
 }
 
+int bch2_read_folio(struct file *file, struct folio *folio)
+{
+       struct page *page = &folio->page;
+       int ret;
+
+       ret = bch2_read_single_page(page, page->mapping);
+       folio_unlock(folio);
+       return bch2_err_class(ret);
+}
+
 /* writepages: */
 
 struct bch_writepage_state {
@@ -1254,8 +1218,6 @@ static void bch2_writepage_io_done(struct closure *cl)
        struct bio_vec *bvec;
        unsigned i;
 
-       up(&io->op.c->io_in_flight);
-
        if (io->op.error) {
                set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
 
@@ -1318,8 +1280,6 @@ static void bch2_writepage_do_io(struct bch_writepage_state *w)
 {
        struct bch_writepage_io *io = w->io;
 
-       down(&io->op.c->io_in_flight);
-
        w->io = NULL;
        closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
        continue_at(&io->cl, bch2_writepage_io_done, NULL);
@@ -1506,13 +1466,13 @@ int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc
        if (w.io)
                bch2_writepage_do_io(&w);
        blk_finish_plug(&plug);
-       return ret;
+       return bch2_err_class(ret);
 }
 
 /* buffered writes: */
 
 int bch2_write_begin(struct file *file, struct address_space *mapping,
-                    loff_t pos, unsigned len, unsigned flags,
+                    loff_t pos, unsigned len,
                     struct page **pagep, void **fsdata)
 {
        struct bch_inode_info *inode = to_bch_ei(mapping->host);
@@ -1532,7 +1492,7 @@ int bch2_write_begin(struct file *file, struct address_space *mapping,
 
        bch2_pagecache_add_get(&inode->ei_pagecache_lock);
 
-       page = grab_cache_page_write_begin(mapping, index, flags);
+       page = grab_cache_page_write_begin(mapping, index);
        if (!page)
                goto err_unlock;
 
@@ -1562,11 +1522,10 @@ out:
        if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
                ret = bch2_page_state_set(c, inode_inum(inode), &page, 1);
                if (ret)
-                       goto out;
+                       goto err;
        }
 
-       ret = bch2_page_reservation_get(c, inode, page, res,
-                                       offset, len, true);
+       ret = bch2_page_reservation_get(c, inode, page, res, offset, len);
        if (ret) {
                if (!PageUptodate(page)) {
                        /*
@@ -1591,7 +1550,7 @@ err_unlock:
        bch2_pagecache_add_put(&inode->ei_pagecache_lock);
        kfree(res);
        *fsdata = NULL;
-       return ret;
+       return bch2_err_class(ret);
 }
 
 int bch2_write_end(struct file *file, struct address_space *mapping,
@@ -1663,7 +1622,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
        bch2_page_reservation_init(c, inode, &res);
 
        for (i = 0; i < nr_pages; i++) {
-               pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
+               pages[i] = grab_cache_page_write_begin(mapping, index + i);
                if (!pages[i]) {
                        nr_pages = i;
                        if (!i) {
@@ -1707,10 +1666,21 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
                                goto out;
                }
 
+               /*
+                * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
+                * supposed to write as much as we have disk space for.
+                *
+                * On failure here we should still write out a partial page if
+                * we aren't completely out of disk space - we don't do that
+                * yet:
+                */
                ret = bch2_page_reservation_get(c, inode, page, &res,
-                                               pg_offset, pg_len, true);
-               if (ret)
-                       goto out;
+                                               pg_offset, pg_len);
+               if (unlikely(ret)) {
+                       if (!reserved)
+                               goto out;
+                       break;
+               }
 
                reserved += pg_len;
        }
@@ -1719,13 +1689,13 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
                for (i = 0; i < nr_pages; i++)
                        flush_dcache_page(pages[i]);
 
-       while (copied < len) {
+       while (copied < reserved) {
                struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
                unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
-               unsigned pg_len = min_t(unsigned, len - copied,
+               unsigned pg_len = min_t(unsigned, reserved - copied,
                                        PAGE_SIZE - pg_offset);
                unsigned pg_copied = copy_page_from_iter_atomic(page,
-                                               pg_offset, pg_len,iter);
+                                               pg_offset, pg_len, iter);
 
                if (!pg_copied)
                        break;
@@ -2015,7 +1985,7 @@ ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
                                        iocb->ki_pos,
                                        iocb->ki_pos + count - 1);
                if (ret < 0)
-                       return ret;
+                       goto out;
 
                file_accessed(file);
 
@@ -2030,8 +2000,8 @@ ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
                ret = generic_file_read_iter(iocb, iter);
                bch2_pagecache_add_put(&inode->ei_pagecache_lock);
        }
-
-       return ret;
+out:
+       return bch2_err_class(ret);
 }
 
 /* O_DIRECT writes */
@@ -2073,7 +2043,7 @@ retry:
        offset = iter.pos.offset;
        bch2_trans_iter_exit(&trans, &iter);
 err:
-       if (err == -EINTR)
+       if (bch2_err_matches(err, BCH_ERR_transaction_restart))
                goto retry;
        bch2_trans_exit(&trans);
 
@@ -2099,8 +2069,6 @@ static long bch2_dio_write_loop(struct dio_write *dio)
        if (dio->loop)
                goto loop;
 
-       down(&c->io_in_flight);
-
        while (1) {
                iter_count = dio->iter.count;
 
@@ -2180,8 +2148,8 @@ static long bch2_dio_write_loop(struct dio_write *dio)
                        struct iovec *iov = dio->inline_vecs;
 
                        if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
-                               iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
-                                             GFP_KERNEL);
+                               iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
+                                                   GFP_KERNEL);
                                if (unlikely(!iov)) {
                                        dio->sync = sync = true;
                                        goto do_io;
@@ -2231,7 +2199,6 @@ loop:
 
        ret = dio->op.error ?: ((long) dio->written << 9);
 err:
-       up(&c->io_in_flight);
        bch2_pagecache_block_put(&inode->ei_pagecache_lock);
        bch2_quota_reservation_put(c, inode, &dio->quota_res);
 
@@ -2246,6 +2213,9 @@ err:
        /* inode->i_dio_count is our ref on inode and thus bch_fs */
        inode_dio_end(&inode->v);
 
+       if (ret < 0)
+               ret = bch2_err_class(ret);
+
        if (!sync) {
                req->ki_complete(req, ret);
                ret = -EIOCBQUEUED;
@@ -2352,8 +2322,10 @@ ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct bch_inode_info *inode = file_bch_inode(file);
        ssize_t ret;
 
-       if (iocb->ki_flags & IOCB_DIRECT)
-               return bch2_direct_write(iocb, from);
+       if (iocb->ki_flags & IOCB_DIRECT) {
+               ret = bch2_direct_write(iocb, from);
+               goto out;
+       }
 
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = inode_to_bdi(&inode->v);
@@ -2380,8 +2352,8 @@ unlock:
 
        if (ret > 0)
                ret = generic_write_sync(iocb, ret);
-
-       return ret;
+out:
+       return bch2_err_class(ret);
 }
 
 /* fsync: */
@@ -2415,7 +2387,7 @@ int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
        ret2 = sync_inode_metadata(&inode->v, 1);
        ret3 = bch2_flush_inode(c, inode_inum(inode));
 
-       return ret ?: ret2 ?: ret3;
+       return bch2_err_class(ret ?: ret2 ?: ret3);
 }
 
 /* truncate: */
@@ -2449,7 +2421,7 @@ retry:
        start = iter.pos;
        bch2_trans_iter_exit(&trans, &iter);
 err:
-       if (ret == -EINTR)
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
                goto retry;
 
        bch2_trans_exit(&trans);
@@ -2721,7 +2693,7 @@ int bch2_truncate(struct user_namespace *mnt_userns,
        ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
 err:
        bch2_pagecache_block_put(&inode->ei_pagecache_lock);
-       return ret;
+       return bch2_err_class(ret);
 }
 
 /* fallocate: */
@@ -2752,7 +2724,7 @@ static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len
 
        truncate_pagecache_range(&inode->v, offset, end - 1);
 
-       if (block_start < block_end ) {
+       if (block_start < block_end) {
                s64 i_sectors_delta = 0;
 
                ret = bch2_fpunch(c, inode_inum(inode),
@@ -2839,7 +2811,8 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
        bch2_trans_copy_iter(&dst, &src);
        bch2_trans_copy_iter(&del, &src);
 
-       while (ret == 0 || ret == -EINTR) {
+       while (ret == 0 ||
+              bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
                struct disk_reservation disk_res =
                        bch2_disk_reservation_init(c, 0);
                struct bkey_i delete;
@@ -3041,14 +3014,14 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
 bkey_err:
                bch2_quota_reservation_put(c, inode, &quota_res);
                bch2_disk_reservation_put(c, &disk_res);
-               if (ret == -EINTR)
+               if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
                        ret = 0;
        }
 
        bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
        mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
 
-       if (ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)) {
+       if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
                struct quota_res quota_res = { 0 };
                s64 i_sectors_delta = 0;
 
@@ -3099,7 +3072,7 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
         * so that the VFS cache i_size is consistent with the btree i_size:
         */
        if (ret &&
-           !(ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)))
+           !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
                return ret;
 
        if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
@@ -3134,6 +3107,10 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
        inode_dio_wait(&inode->v);
        bch2_pagecache_block_get(&inode->ei_pagecache_lock);
 
+       ret = file_modified(file);
+       if (ret)
+               goto err;
+
        if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
                ret = bchfs_fallocate(inode, mode, offset, len);
        else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
@@ -3144,13 +3121,61 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
                ret = bchfs_fcollapse_finsert(inode, offset, len, false);
        else
                ret = -EOPNOTSUPP;
-
-
+err:
        bch2_pagecache_block_put(&inode->ei_pagecache_lock);
        inode_unlock(&inode->v);
        percpu_ref_put(&c->writes);
 
-       return ret;
+       return bch2_err_class(ret);
+}
+
+static int quota_reserve_range(struct bch_inode_info *inode,
+                              struct quota_res *res,
+                              u64 start, u64 end)
+{
+       struct bch_fs *c = inode->v.i_sb->s_fs_info;
+       struct btree_trans trans;
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       u32 snapshot;
+       u64 sectors = end - start;
+       u64 pos = start;
+       int ret;
+
+       bch2_trans_init(&trans, c, 0, 0);
+retry:
+       bch2_trans_begin(&trans);
+
+       ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
+       if (ret)
+               goto err;
+
+       bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
+                            SPOS(inode->v.i_ino, pos, snapshot), 0);
+
+       while (!(ret = btree_trans_too_many_iters(&trans)) &&
+              (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
+              !(ret = bkey_err(k))) {
+               if (bkey_extent_is_allocation(k.k)) {
+                       u64 s = min(end, k.k->p.offset) -
+                               max(start, bkey_start_offset(k.k));
+                       BUG_ON(s > sectors);
+                       sectors -= s;
+               }
+               bch2_btree_iter_advance(&iter);
+       }
+       pos = iter.pos.offset;
+       bch2_trans_iter_exit(&trans, &iter);
+err:
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+               goto retry;
+
+       bch2_trans_exit(&trans);
+
+       if (ret)
+               return ret;
+
+       return bch2_quota_reservation_add(c, inode, res, sectors, true);
 }
 
 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
@@ -3160,6 +3185,7 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
        struct bch_inode_info *src = file_bch_inode(file_src);
        struct bch_inode_info *dst = file_bch_inode(file_dst);
        struct bch_fs *c = src->v.i_sb->s_fs_info;
+       struct quota_res quota_res = { 0 };
        s64 i_sectors_delta = 0;
        u64 aligned_len;
        loff_t ret = 0;
@@ -3180,8 +3206,6 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
 
        bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
 
-       file_update_time(file_dst);
-
        inode_dio_wait(&src->v);
        inode_dio_wait(&dst->v);
 
@@ -3198,6 +3222,13 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
        if (ret)
                goto err;
 
+       ret = quota_reserve_range(dst, &quota_res, pos_dst >> 9,
+                                 (pos_dst + aligned_len) >> 9);
+       if (ret)
+               goto err;
+
+       file_update_time(file_dst);
+
        mark_pagecache_unallocated(src, pos_src >> 9,
                                   (pos_src + aligned_len) >> 9);
 
@@ -3214,8 +3245,7 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
         */
        ret = min((u64) ret << 9, (u64) len);
 
-       /* XXX get a quota reservation */
-       i_sectors_acct(c, dst, NULL, i_sectors_delta);
+       i_sectors_acct(c, dst, &quota_res, i_sectors_delta);
 
        spin_lock(&dst->v.i_lock);
        if (pos_dst + ret > dst->v.i_size)
@@ -3226,9 +3256,10 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
            IS_SYNC(file_inode(file_dst)))
                ret = bch2_flush_inode(c, inode_inum(dst));
 err:
+       bch2_quota_reservation_put(c, dst, &quota_res);
        bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
 
-       return ret;
+       return bch2_err_class(ret);
 }
 
 /* fseek: */
@@ -3250,36 +3281,40 @@ static loff_t bch2_seek_pagecache_data(struct inode *vinode,
                                       loff_t start_offset,
                                       loff_t end_offset)
 {
-       struct address_space *mapping = vinode->i_mapping;
-       struct page *page;
+       struct folio_batch fbatch;
        pgoff_t start_index     = start_offset >> PAGE_SHIFT;
        pgoff_t end_index       = end_offset >> PAGE_SHIFT;
        pgoff_t index           = start_index;
+       unsigned i;
        loff_t ret;
        int offset;
 
-       while (index <= end_index) {
-               if (find_get_pages_range(mapping, &index, end_index, 1, &page)) {
-                       lock_page(page);
+       folio_batch_init(&fbatch);
+
+       while (filemap_get_folios(vinode->i_mapping,
+                                 &index, end_index, &fbatch)) {
+               for (i = 0; i < folio_batch_count(&fbatch); i++) {
+                       struct folio *folio = fbatch.folios[i];
+
+                       folio_lock(folio);
 
-                       offset = page_data_offset(page,
-                                       page->index == start_index
+                       offset = page_data_offset(&folio->page,
+                                       folio->index == start_index
                                        ? start_offset & (PAGE_SIZE - 1)
                                        : 0);
                        if (offset >= 0) {
-                               ret = clamp(((loff_t) page->index << PAGE_SHIFT) +
+                               ret = clamp(((loff_t) folio->index << PAGE_SHIFT) +
                                            offset,
                                            start_offset, end_offset);
-                               unlock_page(page);
-                               put_page(page);
+                               folio_unlock(folio);
+                               folio_batch_release(&fbatch);
                                return ret;
                        }
 
-                       unlock_page(page);
-                       put_page(page);
-               } else {
-                       break;
+                       folio_unlock(folio);
                }
+               folio_batch_release(&fbatch);
+               cond_resched();
        }
 
        return end_offset;
@@ -3321,7 +3356,7 @@ retry:
        }
        bch2_trans_iter_exit(&trans, &iter);
 err:
-       if (ret == -EINTR)
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
                goto retry;
 
        bch2_trans_exit(&trans);
@@ -3436,7 +3471,7 @@ retry:
        }
        bch2_trans_iter_exit(&trans, &iter);
 err:
-       if (ret == -EINTR)
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
                goto retry;
 
        bch2_trans_exit(&trans);
@@ -3451,18 +3486,26 @@ err:
 
 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
 {
+       loff_t ret;
+
        switch (whence) {
        case SEEK_SET:
        case SEEK_CUR:
        case SEEK_END:
-               return generic_file_llseek(file, offset, whence);
+               ret = generic_file_llseek(file, offset, whence);
+               break;
        case SEEK_DATA:
-               return bch2_seek_data(file, offset);
+               ret = bch2_seek_data(file, offset);
+               break;
        case SEEK_HOLE:
-               return bch2_seek_hole(file, offset);
+               ret = bch2_seek_hole(file, offset);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
        }
 
-       return -EINVAL;
+       return bch2_err_class(ret);
 }
 
 void bch2_fs_fsio_exit(struct bch_fs *c)