]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/fs-io.c
Update bcachefs sources to 3e93567c51 bcachefs: Switch to local_clock() for fastpath...
[bcachefs-tools-debian] / libbcachefs / fs-io.c
index 0a7f172f11c64c15e07beb16d591baf65286fa13..02ef3430a30b69f4d321d162f3b2d52b52fedfb4 100644 (file)
@@ -151,7 +151,7 @@ static void bch2_quota_reservation_put(struct bch_fs *c,
 static int bch2_quota_reservation_add(struct bch_fs *c,
                                      struct bch_inode_info *inode,
                                      struct quota_res *res,
-                                     unsigned sectors,
+                                     u64 sectors,
                                      bool check_enospc)
 {
        int ret;
@@ -434,22 +434,20 @@ static void mark_pagecache_unallocated(struct bch_inode_info *inode,
 {
        pgoff_t index = start >> PAGE_SECTORS_SHIFT;
        pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
-       struct pagevec pvec;
+       struct folio_batch fbatch;
+       unsigned i, j;
 
        if (end <= start)
                return;
 
-       pagevec_init(&pvec);
-
-       do {
-               unsigned nr_pages, i, j;
+       folio_batch_init(&fbatch);
 
-               nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
-                                               &index, end_index);
-               for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
-                       u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
-                       u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
+       while (filemap_get_folios(inode->v.i_mapping,
+                                 &index, end_index, &fbatch)) {
+               for (i = 0; i < folio_batch_count(&fbatch); i++) {
+                       struct folio *folio = fbatch.folios[i];
+                       u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
+                       u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
                        unsigned pg_offset = max(start, pg_start) - pg_start;
                        unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
                        struct bch_page_state *s;
@@ -458,8 +456,8 @@ static void mark_pagecache_unallocated(struct bch_inode_info *inode,
                        BUG_ON(pg_offset >= PAGE_SECTORS);
                        BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
 
-                       lock_page(page);
-                       s = bch2_page_state(page);
+                       folio_lock(folio);
+                       s = bch2_page_state(&folio->page);
 
                        if (s) {
                                spin_lock(&s->lock);
@@ -468,10 +466,11 @@ static void mark_pagecache_unallocated(struct bch_inode_info *inode,
                                spin_unlock(&s->lock);
                        }
 
-                       unlock_page(page);
+                       folio_unlock(folio);
                }
-               pagevec_release(&pvec);
-       } while (index <= end_index);
+               folio_batch_release(&fbatch);
+               cond_resched();
+       }
 }
 
 static void mark_pagecache_reserved(struct bch_inode_info *inode,
@@ -480,23 +479,21 @@ static void mark_pagecache_reserved(struct bch_inode_info *inode,
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
        pgoff_t index = start >> PAGE_SECTORS_SHIFT;
        pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
-       struct pagevec pvec;
+       struct folio_batch fbatch;
        s64 i_sectors_delta = 0;
+       unsigned i, j;
 
        if (end <= start)
                return;
 
-       pagevec_init(&pvec);
-
-       do {
-               unsigned nr_pages, i, j;
+       folio_batch_init(&fbatch);
 
-               nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
-                                               &index, end_index);
-               for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
-                       u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
-                       u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
+       while (filemap_get_folios(inode->v.i_mapping,
+                                 &index, end_index, &fbatch)) {
+               for (i = 0; i < folio_batch_count(&fbatch); i++) {
+                       struct folio *folio = fbatch.folios[i];
+                       u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
+                       u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
                        unsigned pg_offset = max(start, pg_start) - pg_start;
                        unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
                        struct bch_page_state *s;
@@ -505,8 +502,8 @@ static void mark_pagecache_reserved(struct bch_inode_info *inode,
                        BUG_ON(pg_offset >= PAGE_SECTORS);
                        BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
 
-                       lock_page(page);
-                       s = bch2_page_state(page);
+                       folio_lock(folio);
+                       s = bch2_page_state(&folio->page);
 
                        if (s) {
                                spin_lock(&s->lock);
@@ -525,10 +522,11 @@ static void mark_pagecache_reserved(struct bch_inode_info *inode,
                                spin_unlock(&s->lock);
                        }
 
-                       unlock_page(page);
+                       folio_unlock(folio);
                }
-               pagevec_release(&pvec);
-       } while (index <= end_index);
+               folio_batch_release(&fbatch);
+               cond_resched();
+       }
 
        i_sectors_acct(c, inode, NULL, i_sectors_delta);
 }
@@ -859,30 +857,6 @@ bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
        return true;
 }
 
-#ifdef CONFIG_MIGRATION
-int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
-                     struct page *page, enum migrate_mode mode)
-{
-       int ret;
-
-       EBUG_ON(!PageLocked(page));
-       EBUG_ON(!PageLocked(newpage));
-
-       ret = migrate_page_move_mapping(mapping, newpage, page, 0);
-       if (ret != MIGRATEPAGE_SUCCESS)
-               return ret;
-
-       if (PagePrivate(page))
-               attach_page_private(newpage, detach_page_private(page));
-
-       if (mode != MIGRATE_SYNC_NO_COPY)
-               migrate_page_copy(newpage, page);
-       else
-               migrate_page_states(newpage, page);
-       return MIGRATEPAGE_SUCCESS;
-}
-#endif
-
 /* readpage(s): */
 
 static void bch2_readpages_end_io(struct bio *bio)
@@ -1213,7 +1187,7 @@ int bch2_read_folio(struct file *file, struct folio *folio)
 
        ret = bch2_read_single_page(page, page->mapping);
        folio_unlock(folio);
-       return ret;
+       return bch2_err_class(ret);
 }
 
 /* writepages: */
@@ -1249,8 +1223,6 @@ static void bch2_writepage_io_done(struct closure *cl)
        struct bio_vec *bvec;
        unsigned i;
 
-       up(&io->op.c->io_in_flight);
-
        if (io->op.error) {
                set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
 
@@ -1313,8 +1285,6 @@ static void bch2_writepage_do_io(struct bch_writepage_state *w)
 {
        struct bch_writepage_io *io = w->io;
 
-       down(&io->op.c->io_in_flight);
-
        w->io = NULL;
        closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
        continue_at(&io->cl, bch2_writepage_io_done, NULL);
@@ -1501,7 +1471,7 @@ int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc
        if (w.io)
                bch2_writepage_do_io(&w);
        blk_finish_plug(&plug);
-       return ret;
+       return bch2_err_class(ret);
 }
 
 /* buffered writes: */
@@ -1557,7 +1527,7 @@ out:
        if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
                ret = bch2_page_state_set(c, inode_inum(inode), &page, 1);
                if (ret)
-                       goto out;
+                       goto err;
        }
 
        ret = bch2_page_reservation_get(c, inode, page, res,
@@ -1586,7 +1556,7 @@ err_unlock:
        bch2_pagecache_add_put(&inode->ei_pagecache_lock);
        kfree(res);
        *fsdata = NULL;
-       return ret;
+       return bch2_err_class(ret);
 }
 
 int bch2_write_end(struct file *file, struct address_space *mapping,
@@ -2010,7 +1980,7 @@ ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
                                        iocb->ki_pos,
                                        iocb->ki_pos + count - 1);
                if (ret < 0)
-                       return ret;
+                       goto out;
 
                file_accessed(file);
 
@@ -2025,8 +1995,8 @@ ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
                ret = generic_file_read_iter(iocb, iter);
                bch2_pagecache_add_put(&inode->ei_pagecache_lock);
        }
-
-       return ret;
+out:
+       return bch2_err_class(ret);
 }
 
 /* O_DIRECT writes */
@@ -2094,8 +2064,6 @@ static long bch2_dio_write_loop(struct dio_write *dio)
        if (dio->loop)
                goto loop;
 
-       down(&c->io_in_flight);
-
        while (1) {
                iter_count = dio->iter.count;
 
@@ -2226,7 +2194,6 @@ loop:
 
        ret = dio->op.error ?: ((long) dio->written << 9);
 err:
-       up(&c->io_in_flight);
        bch2_pagecache_block_put(&inode->ei_pagecache_lock);
        bch2_quota_reservation_put(c, inode, &dio->quota_res);
 
@@ -2241,6 +2208,9 @@ err:
        /* inode->i_dio_count is our ref on inode and thus bch_fs */
        inode_dio_end(&inode->v);
 
+       if (ret < 0)
+               ret = bch2_err_class(ret);
+
        if (!sync) {
                req->ki_complete(req, ret);
                ret = -EIOCBQUEUED;
@@ -2347,8 +2317,10 @@ ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct bch_inode_info *inode = file_bch_inode(file);
        ssize_t ret;
 
-       if (iocb->ki_flags & IOCB_DIRECT)
-               return bch2_direct_write(iocb, from);
+       if (iocb->ki_flags & IOCB_DIRECT) {
+               ret = bch2_direct_write(iocb, from);
+               goto out;
+       }
 
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = inode_to_bdi(&inode->v);
@@ -2375,8 +2347,8 @@ unlock:
 
        if (ret > 0)
                ret = generic_write_sync(iocb, ret);
-
-       return ret;
+out:
+       return bch2_err_class(ret);
 }
 
 /* fsync: */
@@ -2410,7 +2382,7 @@ int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
        ret2 = sync_inode_metadata(&inode->v, 1);
        ret3 = bch2_flush_inode(c, inode_inum(inode));
 
-       return ret ?: ret2 ?: ret3;
+       return bch2_err_class(ret ?: ret2 ?: ret3);
 }
 
 /* truncate: */
@@ -2716,7 +2688,7 @@ int bch2_truncate(struct user_namespace *mnt_userns,
        ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
 err:
        bch2_pagecache_block_put(&inode->ei_pagecache_lock);
-       return ret;
+       return bch2_err_class(ret);
 }
 
 /* fallocate: */
@@ -3044,7 +3016,7 @@ bkey_err:
        bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
        mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
 
-       if (ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)) {
+       if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
                struct quota_res quota_res = { 0 };
                s64 i_sectors_delta = 0;
 
@@ -3095,7 +3067,7 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
         * so that the VFS cache i_size is consistent with the btree i_size:
         */
        if (ret &&
-           !(ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)))
+           !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
                return ret;
 
        if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
@@ -3130,6 +3102,10 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
        inode_dio_wait(&inode->v);
        bch2_pagecache_block_get(&inode->ei_pagecache_lock);
 
+       ret = file_modified(file);
+       if (ret)
+               goto err;
+
        if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
                ret = bchfs_fallocate(inode, mode, offset, len);
        else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
@@ -3140,13 +3116,61 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
                ret = bchfs_fcollapse_finsert(inode, offset, len, false);
        else
                ret = -EOPNOTSUPP;
-
-
+err:
        bch2_pagecache_block_put(&inode->ei_pagecache_lock);
        inode_unlock(&inode->v);
        percpu_ref_put(&c->writes);
 
-       return ret;
+       return bch2_err_class(ret);
+}
+
+static int quota_reserve_range(struct bch_inode_info *inode,
+                              struct quota_res *res,
+                              u64 start, u64 end)
+{
+       struct bch_fs *c = inode->v.i_sb->s_fs_info;
+       struct btree_trans trans;
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       u32 snapshot;
+       u64 sectors = end - start;
+       u64 pos = start;
+       int ret;
+
+       bch2_trans_init(&trans, c, 0, 0);
+retry:
+       bch2_trans_begin(&trans);
+
+       ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
+       if (ret)
+               goto err;
+
+       bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
+                            SPOS(inode->v.i_ino, pos, snapshot), 0);
+
+       while (!(ret = btree_trans_too_many_iters(&trans)) &&
+              (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
+              !(ret = bkey_err(k))) {
+               if (bkey_extent_is_allocation(k.k)) {
+                       u64 s = min(end, k.k->p.offset) -
+                               max(start, bkey_start_offset(k.k));
+                       BUG_ON(s > sectors);
+                       sectors -= s;
+               }
+               bch2_btree_iter_advance(&iter);
+       }
+       pos = iter.pos.offset;
+       bch2_trans_iter_exit(&trans, &iter);
+err:
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+               goto retry;
+
+       bch2_trans_exit(&trans);
+
+       if (ret)
+               return ret;
+
+       return bch2_quota_reservation_add(c, inode, res, sectors, true);
 }
 
 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
@@ -3156,6 +3180,7 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
        struct bch_inode_info *src = file_bch_inode(file_src);
        struct bch_inode_info *dst = file_bch_inode(file_dst);
        struct bch_fs *c = src->v.i_sb->s_fs_info;
+       struct quota_res quota_res = { 0 };
        s64 i_sectors_delta = 0;
        u64 aligned_len;
        loff_t ret = 0;
@@ -3176,8 +3201,6 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
 
        bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
 
-       file_update_time(file_dst);
-
        inode_dio_wait(&src->v);
        inode_dio_wait(&dst->v);
 
@@ -3194,6 +3217,13 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
        if (ret)
                goto err;
 
+       ret = quota_reserve_range(dst, &quota_res, pos_dst >> 9,
+                                 (pos_dst + aligned_len) >> 9);
+       if (ret)
+               goto err;
+
+       file_update_time(file_dst);
+
        mark_pagecache_unallocated(src, pos_src >> 9,
                                   (pos_src + aligned_len) >> 9);
 
@@ -3210,8 +3240,7 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
         */
        ret = min((u64) ret << 9, (u64) len);
 
-       /* XXX get a quota reservation */
-       i_sectors_acct(c, dst, NULL, i_sectors_delta);
+       i_sectors_acct(c, dst, &quota_res, i_sectors_delta);
 
        spin_lock(&dst->v.i_lock);
        if (pos_dst + ret > dst->v.i_size)
@@ -3222,9 +3251,10 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
            IS_SYNC(file_inode(file_dst)))
                ret = bch2_flush_inode(c, inode_inum(dst));
 err:
+       bch2_quota_reservation_put(c, dst, &quota_res);
        bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
 
-       return ret;
+       return bch2_err_class(ret);
 }
 
 /* fseek: */
@@ -3246,36 +3276,40 @@ static loff_t bch2_seek_pagecache_data(struct inode *vinode,
                                       loff_t start_offset,
                                       loff_t end_offset)
 {
-       struct address_space *mapping = vinode->i_mapping;
-       struct page *page;
+       struct folio_batch fbatch;
        pgoff_t start_index     = start_offset >> PAGE_SHIFT;
        pgoff_t end_index       = end_offset >> PAGE_SHIFT;
        pgoff_t index           = start_index;
+       unsigned i;
        loff_t ret;
        int offset;
 
-       while (index <= end_index) {
-               if (find_get_pages_range(mapping, &index, end_index, 1, &page)) {
-                       lock_page(page);
+       folio_batch_init(&fbatch);
+
+       while (filemap_get_folios(vinode->i_mapping,
+                                 &index, end_index, &fbatch)) {
+               for (i = 0; i < folio_batch_count(&fbatch); i++) {
+                       struct folio *folio = fbatch.folios[i];
 
-                       offset = page_data_offset(page,
-                                       page->index == start_index
+                       folio_lock(folio);
+
+                       offset = page_data_offset(&folio->page,
+                                       folio->index == start_index
                                        ? start_offset & (PAGE_SIZE - 1)
                                        : 0);
                        if (offset >= 0) {
-                               ret = clamp(((loff_t) page->index << PAGE_SHIFT) +
+                               ret = clamp(((loff_t) folio->index << PAGE_SHIFT) +
                                            offset,
                                            start_offset, end_offset);
-                               unlock_page(page);
-                               put_page(page);
+                               folio_unlock(folio);
+                               folio_batch_release(&fbatch);
                                return ret;
                        }
 
-                       unlock_page(page);
-                       put_page(page);
-               } else {
-                       break;
+                       folio_unlock(folio);
                }
+               folio_batch_release(&fbatch);
+               cond_resched();
        }
 
        return end_offset;
@@ -3447,18 +3481,26 @@ err:
 
 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
 {
+       loff_t ret;
+
        switch (whence) {
        case SEEK_SET:
        case SEEK_CUR:
        case SEEK_END:
-               return generic_file_llseek(file, offset, whence);
+               ret = generic_file_llseek(file, offset, whence);
+               break;
        case SEEK_DATA:
-               return bch2_seek_data(file, offset);
+               ret = bch2_seek_data(file, offset);
+               break;
        case SEEK_HOLE:
-               return bch2_seek_hole(file, offset);
+               ret = bch2_seek_hole(file, offset);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
        }
 
-       return -EINVAL;
+       return bch2_err_class(ret);
 }
 
 void bch2_fs_fsio_exit(struct bch_fs *c)