static int bch2_quota_reservation_add(struct bch_fs *c,
struct bch_inode_info *inode,
struct quota_res *res,
- unsigned sectors,
+ u64 sectors,
bool check_enospc)
{
int ret;
{
pgoff_t index = start >> PAGE_SECTORS_SHIFT;
pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
- struct pagevec pvec;
+ struct folio_batch fbatch;
+ unsigned i, j;
if (end <= start)
return;
- pagevec_init(&pvec);
-
- do {
- unsigned nr_pages, i, j;
+ folio_batch_init(&fbatch);
- nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
- &index, end_index);
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
- u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
- u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
+ while (filemap_get_folios(inode->v.i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+ u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
+ u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
unsigned pg_offset = max(start, pg_start) - pg_start;
unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
struct bch_page_state *s;
BUG_ON(pg_offset >= PAGE_SECTORS);
BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
- lock_page(page);
- s = bch2_page_state(page);
+ folio_lock(folio);
+ s = bch2_page_state(&folio->page);
if (s) {
spin_lock(&s->lock);
spin_unlock(&s->lock);
}
- unlock_page(page);
+ folio_unlock(folio);
}
- pagevec_release(&pvec);
- } while (index <= end_index);
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
}
static void mark_pagecache_reserved(struct bch_inode_info *inode,
struct bch_fs *c = inode->v.i_sb->s_fs_info;
pgoff_t index = start >> PAGE_SECTORS_SHIFT;
pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
- struct pagevec pvec;
+ struct folio_batch fbatch;
s64 i_sectors_delta = 0;
+ unsigned i, j;
if (end <= start)
return;
- pagevec_init(&pvec);
-
- do {
- unsigned nr_pages, i, j;
+ folio_batch_init(&fbatch);
- nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
- &index, end_index);
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
- u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
- u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
+ while (filemap_get_folios(inode->v.i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+ u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
+ u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
unsigned pg_offset = max(start, pg_start) - pg_start;
unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
struct bch_page_state *s;
BUG_ON(pg_offset >= PAGE_SECTORS);
BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
- lock_page(page);
- s = bch2_page_state(page);
+ folio_lock(folio);
+ s = bch2_page_state(&folio->page);
if (s) {
spin_lock(&s->lock);
spin_unlock(&s->lock);
}
- unlock_page(page);
+ folio_unlock(folio);
}
- pagevec_release(&pvec);
- } while (index <= end_index);
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
i_sectors_acct(c, inode, NULL, i_sectors_delta);
}
return true;
}
-#ifdef CONFIG_MIGRATION
-int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
- struct page *page, enum migrate_mode mode)
-{
- int ret;
-
- EBUG_ON(!PageLocked(page));
- EBUG_ON(!PageLocked(newpage));
-
- ret = migrate_page_move_mapping(mapping, newpage, page, 0);
- if (ret != MIGRATEPAGE_SUCCESS)
- return ret;
-
- if (PagePrivate(page))
- attach_page_private(newpage, detach_page_private(page));
-
- if (mode != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
- else
- migrate_page_states(newpage, page);
- return MIGRATEPAGE_SUCCESS;
-}
-#endif
-
/* readpage(s): */
static void bch2_readpages_end_io(struct bio *bio)
ret = bch2_read_single_page(page, page->mapping);
folio_unlock(folio);
- return ret;
+ return bch2_err_class(ret);
}
/* writepages: */
struct bio_vec *bvec;
unsigned i;
- up(&io->op.c->io_in_flight);
-
if (io->op.error) {
set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
{
struct bch_writepage_io *io = w->io;
- down(&io->op.c->io_in_flight);
-
w->io = NULL;
closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
continue_at(&io->cl, bch2_writepage_io_done, NULL);
if (w.io)
bch2_writepage_do_io(&w);
blk_finish_plug(&plug);
- return ret;
+ return bch2_err_class(ret);
}
/* buffered writes: */
if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
ret = bch2_page_state_set(c, inode_inum(inode), &page, 1);
if (ret)
- goto out;
+ goto err;
}
ret = bch2_page_reservation_get(c, inode, page, res,
bch2_pagecache_add_put(&inode->ei_pagecache_lock);
kfree(res);
*fsdata = NULL;
- return ret;
+ return bch2_err_class(ret);
}
int bch2_write_end(struct file *file, struct address_space *mapping,
iocb->ki_pos,
iocb->ki_pos + count - 1);
if (ret < 0)
- return ret;
+ goto out;
file_accessed(file);
ret = generic_file_read_iter(iocb, iter);
bch2_pagecache_add_put(&inode->ei_pagecache_lock);
}
-
- return ret;
+out:
+ return bch2_err_class(ret);
}
/* O_DIRECT writes */
if (dio->loop)
goto loop;
- down(&c->io_in_flight);
-
while (1) {
iter_count = dio->iter.count;
ret = dio->op.error ?: ((long) dio->written << 9);
err:
- up(&c->io_in_flight);
bch2_pagecache_block_put(&inode->ei_pagecache_lock);
bch2_quota_reservation_put(c, inode, &dio->quota_res);
/* inode->i_dio_count is our ref on inode and thus bch_fs */
inode_dio_end(&inode->v);
+ if (ret < 0)
+ ret = bch2_err_class(ret);
+
if (!sync) {
req->ki_complete(req, ret);
ret = -EIOCBQUEUED;
struct bch_inode_info *inode = file_bch_inode(file);
ssize_t ret;
- if (iocb->ki_flags & IOCB_DIRECT)
- return bch2_direct_write(iocb, from);
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ ret = bch2_direct_write(iocb, from);
+ goto out;
+ }
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(&inode->v);
if (ret > 0)
ret = generic_write_sync(iocb, ret);
-
- return ret;
+out:
+ return bch2_err_class(ret);
}
/* fsync: */
ret2 = sync_inode_metadata(&inode->v, 1);
ret3 = bch2_flush_inode(c, inode_inum(inode));
- return ret ?: ret2 ?: ret3;
+ return bch2_err_class(ret ?: ret2 ?: ret3);
}
/* truncate: */
ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
err:
bch2_pagecache_block_put(&inode->ei_pagecache_lock);
- return ret;
+ return bch2_err_class(ret);
}
/* fallocate: */
bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
- if (ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)) {
+ if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
struct quota_res quota_res = { 0 };
s64 i_sectors_delta = 0;
* so that the VFS cache i_size is consistent with the btree i_size:
*/
if (ret &&
- !(ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)))
+ !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
return ret;
if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
inode_dio_wait(&inode->v);
bch2_pagecache_block_get(&inode->ei_pagecache_lock);
+ ret = file_modified(file);
+ if (ret)
+ goto err;
+
if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
ret = bchfs_fallocate(inode, mode, offset, len);
else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
ret = bchfs_fcollapse_finsert(inode, offset, len, false);
else
ret = -EOPNOTSUPP;
-
-
+err:
bch2_pagecache_block_put(&inode->ei_pagecache_lock);
inode_unlock(&inode->v);
percpu_ref_put(&c->writes);
- return ret;
+ return bch2_err_class(ret);
+}
+
+static int quota_reserve_range(struct bch_inode_info *inode,
+ struct quota_res *res,
+ u64 start, u64 end)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u32 snapshot;
+ u64 sectors = end - start;
+ u64 pos = start;
+ int ret;
+
+ bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
+
+ ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
+ SPOS(inode->v.i_ino, pos, snapshot), 0);
+
+ while (!(ret = btree_trans_too_many_iters(&trans)) &&
+ (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
+ !(ret = bkey_err(k))) {
+ if (bkey_extent_is_allocation(k.k)) {
+ u64 s = min(end, k.k->p.offset) -
+ max(start, bkey_start_offset(k.k));
+ BUG_ON(s > sectors);
+ sectors -= s;
+ }
+ bch2_btree_iter_advance(&iter);
+ }
+ pos = iter.pos.offset;
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_trans_exit(&trans);
+
+ if (ret)
+ return ret;
+
+ return bch2_quota_reservation_add(c, inode, res, sectors, true);
}
loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
struct bch_inode_info *src = file_bch_inode(file_src);
struct bch_inode_info *dst = file_bch_inode(file_dst);
struct bch_fs *c = src->v.i_sb->s_fs_info;
+ struct quota_res quota_res = { 0 };
s64 i_sectors_delta = 0;
u64 aligned_len;
loff_t ret = 0;
bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
- file_update_time(file_dst);
-
inode_dio_wait(&src->v);
inode_dio_wait(&dst->v);
if (ret)
goto err;
+ ret = quota_reserve_range(dst, "a_res, pos_dst >> 9,
+ (pos_dst + aligned_len) >> 9);
+ if (ret)
+ goto err;
+
+ file_update_time(file_dst);
+
mark_pagecache_unallocated(src, pos_src >> 9,
(pos_src + aligned_len) >> 9);
*/
ret = min((u64) ret << 9, (u64) len);
- /* XXX get a quota reservation */
- i_sectors_acct(c, dst, NULL, i_sectors_delta);
+ i_sectors_acct(c, dst, "a_res, i_sectors_delta);
spin_lock(&dst->v.i_lock);
if (pos_dst + ret > dst->v.i_size)
IS_SYNC(file_inode(file_dst)))
ret = bch2_flush_inode(c, inode_inum(dst));
err:
+ bch2_quota_reservation_put(c, dst, "a_res);
bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
- return ret;
+ return bch2_err_class(ret);
}
/* fseek: */
loff_t start_offset,
loff_t end_offset)
{
- struct address_space *mapping = vinode->i_mapping;
- struct page *page;
+ struct folio_batch fbatch;
pgoff_t start_index = start_offset >> PAGE_SHIFT;
pgoff_t end_index = end_offset >> PAGE_SHIFT;
pgoff_t index = start_index;
+ unsigned i;
loff_t ret;
int offset;
- while (index <= end_index) {
- if (find_get_pages_range(mapping, &index, end_index, 1, &page)) {
- lock_page(page);
+ folio_batch_init(&fbatch);
+
+ while (filemap_get_folios(vinode->i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
- offset = page_data_offset(page,
- page->index == start_index
+ folio_lock(folio);
+
+ offset = page_data_offset(&folio->page,
+ folio->index == start_index
? start_offset & (PAGE_SIZE - 1)
: 0);
if (offset >= 0) {
- ret = clamp(((loff_t) page->index << PAGE_SHIFT) +
+ ret = clamp(((loff_t) folio->index << PAGE_SHIFT) +
offset,
start_offset, end_offset);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_batch_release(&fbatch);
return ret;
}
- unlock_page(page);
- put_page(page);
- } else {
- break;
+ folio_unlock(folio);
}
+ folio_batch_release(&fbatch);
+ cond_resched();
}
return end_offset;
loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
{
+ loff_t ret;
+
switch (whence) {
case SEEK_SET:
case SEEK_CUR:
case SEEK_END:
- return generic_file_llseek(file, offset, whence);
+ ret = generic_file_llseek(file, offset, whence);
+ break;
case SEEK_DATA:
- return bch2_seek_data(file, offset);
+ ret = bch2_seek_data(file, offset);
+ break;
case SEEK_HOLE:
- return bch2_seek_hole(file, offset);
+ ret = bch2_seek_hole(file, offset);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
- return -EINVAL;
+ return bch2_err_class(ret);
}
void bch2_fs_fsio_exit(struct bch_fs *c)