X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Ffs-io.c;h=c07755c6916d2a6b010679e55b13cd35f8bc4713;hb=e61b61c03bf1f1eedc5e2dbd6887f77e45144a31;hp=0aa3afade4ea03900518727f60e6b6949f8c073e;hpb=a62d8713f84f49d723aebc9d0271abf4c9dae335;p=bcachefs-tools-debian diff --git a/libbcachefs/fs-io.c b/libbcachefs/fs-io.c index 0aa3afa..c07755c 100644 --- a/libbcachefs/fs-io.c +++ b/libbcachefs/fs-io.c @@ -3,7 +3,7 @@ #include "bcachefs.h" #include "alloc_foreground.h" -#include "bkey_on_stack.h" +#include "bkey_buf.h" #include "btree_update.h" #include "buckets.h" #include "clock.h" @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -34,6 +35,22 @@ #include #include +static inline struct address_space *faults_disabled_mapping(void) +{ + return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL); +} + +static inline void set_fdm_dropped_locks(void) +{ + current->faults_disabled_mapping = + (void *) (((unsigned long) current->faults_disabled_mapping)|1); +} + +static inline bool fdm_dropped_locks(void) +{ + return ((unsigned long) current->faults_disabled_mapping) & 1; +} + struct quota_res { u64 sectors; }; @@ -54,6 +71,7 @@ struct dio_write { sync:1, free_iov:1; struct quota_res quota_res; + u64 written; struct iov_iter iter; struct iovec inline_vecs[2]; @@ -66,6 +84,7 @@ struct dio_read { struct closure cl; struct kiocb *req; long ret; + bool should_dirty; struct bch_read_bio rbio; }; @@ -80,8 +99,7 @@ static int write_invalidate_inode_pages_range(struct address_space *mapping, * is continually redirtying a specific page */ do { - if (!mapping->nrpages && - !mapping->nrexceptional) + if (!mapping->nrpages) return 0; ret = filemap_write_and_wait_range(mapping, start, end); @@ -263,28 +281,13 @@ static inline struct bch_page_state *bch2_page_state(struct page *page) /* for newly allocated pages: */ static void __bch2_page_state_release(struct page *page) { - struct bch_page_state *s = __bch2_page_state(page); - - if (!s) - return; - - ClearPagePrivate(page); - set_page_private(page, 0); - put_page(page); - kfree(s); + kfree(detach_page_private(page)); } static void bch2_page_state_release(struct page *page) { - struct bch_page_state *s = bch2_page_state(page); - - if (!s) - return; - - ClearPagePrivate(page); - set_page_private(page, 0); - put_page(page); - kfree(s); + EBUG_ON(!PageLocked(page)); + __bch2_page_state_release(page); } /* for newly allocated pages: */ @@ -298,13 +301,7 @@ static struct bch_page_state *__bch2_page_state_create(struct page *page, return NULL; spin_lock_init(&s->lock); - /* - * migrate_page_move_mapping() assumes that pages with private data - * have their count elevated by 1. - */ - get_page(page); - set_page_private(page, (unsigned long) s); - SetPagePrivate(page); + attach_page_private(page, s); return s; } @@ -512,10 +509,35 @@ static void bch2_set_page_dirty(struct bch_fs *c, vm_fault_t bch2_page_fault(struct vm_fault *vmf) { struct file *file = vmf->vma->vm_file; + struct address_space *mapping = file->f_mapping; + struct address_space *fdm = faults_disabled_mapping(); struct bch_inode_info *inode = file_bch_inode(file); int ret; + if (fdm == mapping) + return VM_FAULT_SIGBUS; + + /* Lock ordering: */ + if (fdm > mapping) { + struct bch_inode_info *fdm_host = to_bch_ei(fdm->host); + + if (bch2_pagecache_add_tryget(&inode->ei_pagecache_lock)) + goto got_lock; + + bch2_pagecache_block_put(&fdm_host->ei_pagecache_lock); + + bch2_pagecache_add_get(&inode->ei_pagecache_lock); + bch2_pagecache_add_put(&inode->ei_pagecache_lock); + + bch2_pagecache_block_get(&fdm_host->ei_pagecache_lock); + + /* Signal that lock has been dropped: */ + set_fdm_dropped_locks(); + return VM_FAULT_SIGBUS; + } + bch2_pagecache_add_get(&inode->ei_pagecache_lock); +got_lock: ret = filemap_fault(vmf); bch2_pagecache_add_put(&inode->ei_pagecache_lock); @@ -606,14 +628,8 @@ int bch2_migrate_page(struct address_space *mapping, struct page *newpage, if (ret != MIGRATEPAGE_SUCCESS) return ret; - if (PagePrivate(page)) { - ClearPagePrivate(page); - get_page(newpage); - set_page_private(newpage, page_private(page)); - set_page_private(page, 0); - put_page(page); - SetPagePrivate(newpage); - } + if (PagePrivate(page)) + attach_page_private(newpage, detach_page_private(page)); if (mode != MIGRATE_SYNC_NO_COPY) migrate_page_copy(newpage, page); @@ -645,41 +661,33 @@ static void bch2_readpages_end_io(struct bio *bio) bio_put(bio); } -static inline void page_state_init_for_read(struct page *page) -{ - SetPagePrivate(page); - page->private = 0; -} - struct readpages_iter { struct address_space *mapping; struct page **pages; unsigned nr_pages; - unsigned nr_added; unsigned idx; pgoff_t offset; }; static int readpages_iter_init(struct readpages_iter *iter, - struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) + struct readahead_control *ractl) { + unsigned i, nr_pages = readahead_count(ractl); + memset(iter, 0, sizeof(*iter)); - iter->mapping = mapping; - iter->offset = list_last_entry(pages, struct page, lru)->index; + iter->mapping = ractl->mapping; + iter->offset = readahead_index(ractl); + iter->nr_pages = nr_pages; iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS); if (!iter->pages) return -ENOMEM; - while (!list_empty(pages)) { - struct page *page = list_last_entry(pages, struct page, lru); - - __bch2_page_state_create(page, __GFP_NOFAIL); - - iter->pages[iter->nr_pages++] = page; - list_del(&page->lru); + nr_pages = __readahead_batch(ractl, iter->pages, nr_pages); + for (i = 0; i < nr_pages; i++) { + __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL); + put_page(iter->pages[i]); } return 0; @@ -687,41 +695,9 @@ static int readpages_iter_init(struct readpages_iter *iter, static inline struct page *readpage_iter_next(struct readpages_iter *iter) { - struct page *page; - unsigned i; - int ret; - - BUG_ON(iter->idx > iter->nr_added); - BUG_ON(iter->nr_added > iter->nr_pages); - - if (iter->idx < iter->nr_added) - goto out; - - while (1) { - if (iter->idx == iter->nr_pages) - return NULL; - - ret = add_to_page_cache_lru_vec(iter->mapping, - iter->pages + iter->nr_added, - iter->nr_pages - iter->nr_added, - iter->offset + iter->nr_added, - GFP_NOFS); - if (ret > 0) - break; - - page = iter->pages[iter->nr_added]; - iter->idx++; - iter->nr_added++; - - __bch2_page_state_release(page); - put_page(page); - } - - iter->nr_added += ret; + if (iter->idx >= iter->nr_pages) + return NULL; - for (i = iter->idx; i < iter->nr_added; i++) - put_page(iter->pages[i]); -out: EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx); return iter->pages[iter->idx]; @@ -810,45 +786,70 @@ static void readpage_bio_extend(struct readpages_iter *iter, } } -static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter, - struct bch_read_bio *rbio, u64 inum, +static void bchfs_read(struct btree_trans *trans, + struct bch_read_bio *rbio, + subvol_inum inum, struct readpages_iter *readpages_iter) { struct bch_fs *c = trans->c; - struct bkey_on_stack sk; + struct btree_iter iter; + struct bkey_buf sk; int flags = BCH_READ_RETRY_IF_STALE| BCH_READ_MAY_PROMOTE; + u32 snapshot; int ret = 0; rbio->c = c; rbio->start_time = local_clock(); + rbio->subvol = inum.subvol; - bkey_on_stack_init(&sk); + bch2_bkey_buf_init(&sk); retry: + bch2_trans_begin(trans); + iter = (struct btree_iter) { NULL }; + + ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); + if (ret) + goto err; + + bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, + SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot), + BTREE_ITER_SLOTS|BTREE_ITER_FILTER_SNAPSHOTS); while (1) { struct bkey_s_c k; unsigned bytes, sectors, offset_into_extent; + enum btree_id data_btree = BTREE_ID_extents; - bch2_btree_iter_set_pos(iter, - POS(inum, rbio->bio.bi_iter.bi_sector)); + /* + * read_extent -> io_time_reset may cause a transaction restart + * without returning an error, we need to check for that here: + */ + if (!bch2_trans_relock(trans)) { + ret = -EINTR; + break; + } - k = bch2_btree_iter_peek_slot(iter); + bch2_btree_iter_set_pos(&iter, + POS(inum.inum, rbio->bio.bi_iter.bi_sector)); + + k = bch2_btree_iter_peek_slot(&iter); ret = bkey_err(k); if (ret) break; - bkey_on_stack_reassemble(&sk, c, k); - k = bkey_i_to_s_c(sk.k); - - offset_into_extent = iter->pos.offset - + offset_into_extent = iter.pos.offset - bkey_start_offset(k.k); sectors = k.k->size - offset_into_extent; - ret = bch2_read_indirect_extent(trans, - &offset_into_extent, sk.k); + bch2_bkey_buf_reassemble(&sk, c, k); + + ret = bch2_read_indirect_extent(trans, &data_btree, + &offset_into_extent, &sk); if (ret) break; + k = bkey_i_to_s_c(sk.k); + sectors = min(sectors, k.k->size - offset_into_extent); bch2_trans_unlock(trans); @@ -866,7 +867,8 @@ retry: if (bkey_extent_is_allocation(k.k)) bch2_add_page_sectors(&rbio->bio, k); - bch2_read_extent(c, rbio, k, offset_into_extent, flags); + bch2_read_extent(trans, rbio, iter.pos, + data_btree, k, offset_into_extent, flags); if (flags & BCH_READ_LAST_FRAGMENT) break; @@ -874,38 +876,37 @@ retry: swap(rbio->bio.bi_iter.bi_size, bytes); bio_advance(&rbio->bio, bytes); } +err: + bch2_trans_iter_exit(trans, &iter); if (ret == -EINTR) goto retry; if (ret) { - bcache_io_error(c, &rbio->bio, "btree IO error %i", ret); + bch_err_inum_ratelimited(c, inum.inum, + "read error %i from btree lookup", ret); + rbio->bio.bi_status = BLK_STS_IOERR; bio_endio(&rbio->bio); } - bkey_on_stack_exit(&sk, c); + bch2_bkey_buf_exit(&sk, c); } -int bch2_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +void bch2_readahead(struct readahead_control *ractl) { - struct bch_inode_info *inode = to_bch_ei(mapping->host); + struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host); struct bch_fs *c = inode->v.i_sb->s_fs_info; struct bch_io_opts opts = io_opts(c, &inode->ei_inode); struct btree_trans trans; - struct btree_iter *iter; struct page *page; struct readpages_iter readpages_iter; int ret; - ret = readpages_iter_init(&readpages_iter, mapping, pages, nr_pages); + ret = readpages_iter_init(&readpages_iter, ractl); BUG_ON(ret); bch2_trans_init(&trans, c, 0, 0); - iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN, - BTREE_ITER_SLOTS); - bch2_pagecache_add_get(&inode->ei_pagecache_lock); while ((page = readpage_iter_next(&readpages_iter))) { @@ -913,7 +914,7 @@ int bch2_readpages(struct file *file, struct address_space *mapping, unsigned n = min_t(unsigned, readpages_iter.nr_pages - readpages_iter.idx, - BIO_MAX_PAGES); + BIO_MAX_VECS); struct bch_read_bio *rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read), opts); @@ -925,7 +926,7 @@ int bch2_readpages(struct file *file, struct address_space *mapping, rbio->bio.bi_end_io = bch2_readpages_end_io; BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0)); - bchfs_read(&trans, iter, rbio, inode->v.i_ino, + bchfs_read(&trans, rbio, inode_inum(inode), &readpages_iter); } @@ -933,15 +934,12 @@ int bch2_readpages(struct file *file, struct address_space *mapping, bch2_trans_exit(&trans); kfree(readpages_iter.pages); - - return 0; } static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio, - u64 inum, struct page *page) + subvol_inum inum, struct page *page) { struct btree_trans trans; - struct btree_iter *iter; bch2_page_state_create(page, __GFP_NOFAIL); @@ -951,11 +949,7 @@ static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio, BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0)); bch2_trans_init(&trans, c, 0, 0); - iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN, - BTREE_ITER_SLOTS); - - bchfs_read(&trans, iter, rbio, inum, NULL); - + bchfs_read(&trans, rbio, inum, NULL); bch2_trans_exit(&trans); } @@ -969,7 +963,7 @@ int bch2_readpage(struct file *file, struct page *page) rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts); rbio->bio.bi_end_io = bch2_readpages_end_io; - __bchfs_readpage(c, rbio, inode->v.i_ino, page); + __bchfs_readpage(c, rbio, inode_inum(inode), page); return 0; } @@ -992,7 +986,7 @@ static int bch2_read_single_page(struct page *page, rbio->bio.bi_private = &done; rbio->bio.bi_end_io = bch2_read_single_page_end_io; - __bchfs_readpage(c, rbio, inode->v.i_ino, page); + __bchfs_readpage(c, rbio, inode_inum(inode), page); wait_for_completion(&done); ret = blk_status_to_errno(rbio->bio.bi_status); @@ -1038,7 +1032,11 @@ static void bch2_writepage_io_done(struct closure *cl) struct bio_vec *bvec; unsigned i; + up(&io->op.c->io_in_flight); + if (io->op.error) { + set_bit(EI_INODE_ERROR, &io->inode->ei_flags); + bio_for_each_segment_all(bvec, bio, iter) { struct bch_page_state *s; @@ -1098,6 +1096,8 @@ static void bch2_writepage_do_io(struct bch_writepage_state *w) { struct bch_writepage_io *io = w->io; + down(&io->op.c->io_in_flight); + w->io = NULL; closure_call(&io->op.cl, bch2_write, NULL, &io->cl); continue_at(&io->cl, bch2_writepage_io_done, NULL); @@ -1116,8 +1116,7 @@ static void bch2_writepage_io_alloc(struct bch_fs *c, { struct bch_write_op *op; - w->io = container_of(bio_alloc_bioset(GFP_NOFS, - BIO_MAX_PAGES, + w->io = container_of(bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &c->writepage_bioset), struct bch_writepage_io, op.wbio.bio); @@ -1131,6 +1130,7 @@ static void bch2_writepage_io_alloc(struct bch_fs *c, op->nr_replicas = nr_replicas; op->res.nr_replicas = nr_replicas; op->write_point = writepoint_hashed(inode->ei_last_dirtied); + op->subvol = inode->ei_subvol; op->pos = POS(inode->v.i_ino, sector); op->wbio.bio.bi_iter.bi_sector = sector; op->wbio.bio.bi_opf = wbc_to_write_flags(wbc); @@ -1239,7 +1239,8 @@ do_io: if (w->io && (w->io->op.res.nr_replicas != nr_replicas_this_write || bio_full(&w->io->op.wbio.bio, PAGE_SIZE) || - w->io->op.wbio.bio.bi_iter.bi_size >= (256U << 20) || + w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >= + (BIO_MAX_VECS * PAGE_SIZE) || bio_end_sector(&w->io->op.wbio.bio) != sector)) bch2_writepage_do_io(w); @@ -1514,24 +1515,24 @@ retry_reservation: if (!pg_copied) break; + if (!PageUptodate(page) && + pg_copied != PAGE_SIZE && + pos + copied + pg_copied < inode->v.i_size) { + zero_user(page, 0, PAGE_SIZE); + break; + } + flush_dcache_page(page); iov_iter_advance(iter, pg_copied); copied += pg_copied; + + if (pg_copied != pg_len) + break; } if (!copied) goto out; - if (copied < len && - ((offset + copied) & (PAGE_SIZE - 1))) { - struct page *page = pages[(offset + copied) >> PAGE_SHIFT]; - - if (!PageUptodate(page)) { - zero_user(page, 0, PAGE_SIZE); - copied -= (offset + copied) & (PAGE_SIZE - 1); - } - } - spin_lock(&inode->v.i_lock); if (pos + copied > inode->v.i_size) i_size_write(&inode->v, pos + copied); @@ -1628,6 +1629,7 @@ again: } pos += ret; written += ret; + ret = 0; balance_dirty_pages_ratelimited(mapping); } while (iov_iter_count(iter)); @@ -1639,12 +1641,22 @@ again: /* O_DIRECT reads */ +static void bio_check_or_release(struct bio *bio, bool check_dirty) +{ + if (check_dirty) { + bio_check_pages_dirty(bio); + } else { + bio_release_pages(bio, false); + bio_put(bio); + } +} + static void bch2_dio_read_complete(struct closure *cl) { struct dio_read *dio = container_of(cl, struct dio_read, cl); dio->req->ki_complete(dio->req, dio->ret, 0); - bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */ + bio_check_or_release(&dio->rbio.bio, dio->should_dirty); } static void bch2_direct_IO_read_endio(struct bio *bio) @@ -1659,8 +1671,11 @@ static void bch2_direct_IO_read_endio(struct bio *bio) static void bch2_direct_IO_read_split_endio(struct bio *bio) { + struct dio_read *dio = bio->bi_private; + bool should_dirty = dio->should_dirty; + bch2_direct_IO_read_endio(bio); - bio_check_pages_dirty(bio); /* transfers ownership */ + bio_check_or_release(bio, should_dirty); } static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter) @@ -1689,7 +1704,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter) iter->count -= shorten; bio = bio_alloc_bioset(GFP_KERNEL, - iov_iter_npages(iter, BIO_MAX_PAGES), + iov_iter_npages(iter, BIO_MAX_VECS), &c->dio_read_bioset); bio->bi_end_io = bch2_direct_IO_read_endio; @@ -1714,11 +1729,17 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter) dio->req = req; dio->ret = ret; + /* + * This is one of the sketchier things I've encountered: we have to skip + * the dirtying of requests that are internal from the kernel (i.e. from + * loopback), because we'll deadlock on page_lock. + */ + dio->should_dirty = iter_is_iovec(iter); goto start; while (iter->count) { bio = bio_alloc_bioset(GFP_KERNEL, - iov_iter_npages(iter, BIO_MAX_PAGES), + iov_iter_npages(iter, BIO_MAX_VECS), &c->bio_read); bio->bi_end_io = bch2_direct_IO_read_split_endio; start: @@ -1735,12 +1756,14 @@ start: } offset += bio->bi_iter.bi_size; - bio_set_pages_dirty(bio); + + if (dio->should_dirty) + bio_set_pages_dirty(bio); if (iter->count) closure_get(&dio->cl); - bch2_read(c, rbio_init(bio, opts), inode->v.i_ino); + bch2_read(c, rbio_init(bio, opts), inode_inum(inode)); } iter->count += shorten; @@ -1749,7 +1772,7 @@ start: closure_sync(&dio->cl); closure_debug_destroy(&dio->cl); ret = dio->ret; - bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */ + bio_check_or_release(&dio->rbio.bio, dio->should_dirty); return ret; } else { return -EIOCBQUEUED; @@ -1795,39 +1818,109 @@ ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter) /* O_DIRECT writes */ +static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum, + u64 offset, u64 size, + unsigned nr_replicas, bool compressed) +{ + struct btree_trans trans; + struct btree_iter iter; + struct bkey_s_c k; + u64 end = offset + size; + u32 snapshot; + bool ret = true; + int err; + + bch2_trans_init(&trans, c, 0, 0); +retry: + bch2_trans_begin(&trans); + + err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot); + if (err) + goto err; + + for_each_btree_key(&trans, iter, BTREE_ID_extents, + SPOS(inum.inum, offset, snapshot), + BTREE_ITER_SLOTS, k, err) { + if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0) + break; + + if (k.k->p.snapshot != snapshot || + nr_replicas > bch2_bkey_replicas(c, k) || + (!compressed && bch2_bkey_sectors_compressed(k))) { + ret = false; + break; + } + } + + offset = iter.pos.offset; + bch2_trans_iter_exit(&trans, &iter); +err: + if (err == -EINTR) + goto retry; + bch2_trans_exit(&trans); + + return err ? false : ret; +} + +static void bch2_dio_write_loop_async(struct bch_write_op *); + static long bch2_dio_write_loop(struct dio_write *dio) { bool kthread = (current->flags & PF_KTHREAD) != 0; - struct bch_fs *c = dio->op.c; struct kiocb *req = dio->req; struct address_space *mapping = req->ki_filp->f_mapping; struct bch_inode_info *inode = file_bch_inode(req->ki_filp); + struct bch_fs *c = inode->v.i_sb->s_fs_info; struct bio *bio = &dio->op.wbio.bio; struct bvec_iter_all iter; struct bio_vec *bv; - unsigned unaligned; - u64 new_i_size; - bool sync = dio->sync; + unsigned unaligned, iter_count; + bool sync = dio->sync, dropped_locks; long ret; if (dio->loop) goto loop; + down(&c->io_in_flight); + while (1) { + iter_count = dio->iter.count; + if (kthread) - use_mm(dio->mm); + kthread_use_mm(dio->mm); BUG_ON(current->faults_disabled_mapping); current->faults_disabled_mapping = mapping; ret = bio_iov_iter_get_pages(bio, &dio->iter); + dropped_locks = fdm_dropped_locks(); + current->faults_disabled_mapping = NULL; if (kthread) - unuse_mm(dio->mm); + kthread_unuse_mm(dio->mm); + + /* + * If the fault handler returned an error but also signalled + * that it dropped & retook ei_pagecache_lock, we just need to + * re-shoot down the page cache and retry: + */ + if (dropped_locks && ret) + ret = 0; if (unlikely(ret < 0)) goto err; + if (unlikely(dropped_locks)) { + ret = write_invalidate_inode_pages_range(mapping, + req->ki_pos, + req->ki_pos + iter_count - 1); + if (unlikely(ret)) + goto err; + + if (!bio->bi_iter.bi_size) + continue; + } + unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1); bio->bi_iter.bi_size -= unaligned; iov_iter_revert(&dio->iter, unaligned); @@ -1837,14 +1930,32 @@ static long bch2_dio_write_loop(struct dio_write *dio) * bio_iov_iter_get_pages was only able to get < * blocksize worth of pages: */ - bio_for_each_segment_all(bv, bio, iter) - put_page(bv->bv_page); ret = -EFAULT; goto err; } - dio->op.pos = POS(inode->v.i_ino, - (req->ki_pos >> 9) + dio->op.written); + bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode)); + dio->op.end_io = bch2_dio_write_loop_async; + dio->op.target = dio->op.opts.foreground_target; + op_journal_seq_set(&dio->op, &inode->ei_journal_seq); + dio->op.write_point = writepoint_hashed((unsigned long) current); + dio->op.nr_replicas = dio->op.opts.data_replicas; + dio->op.subvol = inode->ei_subvol; + dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9); + + if ((req->ki_flags & IOCB_DSYNC) && + !c->opts.journal_flush_disabled) + dio->op.flags |= BCH_WRITE_FLUSH; + dio->op.flags |= BCH_WRITE_CHECK_ENOSPC; + + ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio), + dio->op.opts.data_replicas, 0); + if (unlikely(ret) && + !bch2_check_range_allocated(c, inode_inum(inode), + dio->op.pos.offset, bio_sectors(bio), + dio->op.opts.data_replicas, + dio->op.opts.compression != 0)) + goto err; task_io_account_write(bio->bi_iter.bi_size); @@ -1876,33 +1987,43 @@ do_io: loop: i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta); - dio->op.i_sectors_delta = 0; - - new_i_size = req->ki_pos + ((u64) dio->op.written << 9); + req->ki_pos += (u64) dio->op.written << 9; + dio->written += dio->op.written; spin_lock(&inode->v.i_lock); - if (new_i_size > inode->v.i_size) - i_size_write(&inode->v, new_i_size); + if (req->ki_pos > inode->v.i_size) + i_size_write(&inode->v, req->ki_pos); spin_unlock(&inode->v.i_lock); - bio_for_each_segment_all(bv, bio, iter) - put_page(bv->bv_page); - if (!dio->iter.count || dio->op.error) + if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF))) + bio_for_each_segment_all(bv, bio, iter) + put_page(bv->bv_page); + bio->bi_vcnt = 0; + + if (dio->op.error) { + set_bit(EI_INODE_ERROR, &inode->ei_flags); + break; + } + + if (!dio->iter.count) break; bio_reset(bio); reinit_completion(&dio->done); } - ret = dio->op.error ?: ((long) dio->op.written << 9); + ret = dio->op.error ?: ((long) dio->written << 9); err: + up(&c->io_in_flight); bch2_pagecache_block_put(&inode->ei_pagecache_lock); - bch2_disk_reservation_put(c, &dio->op.res); bch2_quota_reservation_put(c, inode, &dio->quota_res); if (dio->free_iov) kfree(dio->iter.iov); + if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF))) + bio_for_each_segment_all(bv, bio, iter) + put_page(bv->bv_page); bio_put(bio); /* inode->i_dio_count is our ref on inode and thus bch_fs */ @@ -1932,7 +2053,6 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter) struct address_space *mapping = file->f_mapping; struct bch_inode_info *inode = file_bch_inode(file); struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct bch_io_opts opts = io_opts(c, &inode->ei_inode); struct dio_write *dio; struct bio *bio; bool locked = true, extending; @@ -1970,7 +2090,9 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter) } bio = bio_alloc_bioset(GFP_KERNEL, - iov_iter_npages(iter, BIO_MAX_PAGES), + iov_iter_is_bvec(iter) + ? 0 + : iov_iter_npages(iter, BIO_MAX_VECS), &c->dio_write_bioset); dio = container_of(bio, struct dio_write, op.wbio.bio); init_completion(&dio->done); @@ -1980,35 +2102,14 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter) dio->sync = is_sync_kiocb(req) || extending; dio->free_iov = false; dio->quota_res.sectors = 0; + dio->written = 0; dio->iter = *iter; - bch2_write_op_init(&dio->op, c, opts); - dio->op.end_io = bch2_dio_write_loop_async; - dio->op.target = opts.foreground_target; - op_journal_seq_set(&dio->op, &inode->ei_journal_seq); - dio->op.write_point = writepoint_hashed((unsigned long) current); - dio->op.flags |= BCH_WRITE_NOPUT_RESERVATION; - - if ((req->ki_flags & IOCB_DSYNC) && - !c->opts.journal_flush_disabled) - dio->op.flags |= BCH_WRITE_FLUSH; - ret = bch2_quota_reservation_add(c, inode, &dio->quota_res, iter->count >> 9, true); if (unlikely(ret)) goto err_put_bio; - dio->op.nr_replicas = dio->op.opts.data_replicas; - - ret = bch2_disk_reservation_get(c, &dio->op.res, iter->count >> 9, - dio->op.opts.data_replicas, 0); - if (unlikely(ret) && - !bch2_check_range_allocated(c, POS(inode->v.i_ino, - req->ki_pos >> 9), - iter->count >> 9, - dio->op.opts.data_replicas)) - goto err_put_bio; - ret = write_invalidate_inode_pages_range(mapping, req->ki_pos, req->ki_pos + iter->count - 1); @@ -2019,12 +2120,9 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter) err: if (locked) inode_unlock(&inode->v); - if (ret > 0) - req->ki_pos += ret; return ret; err_put_bio: bch2_pagecache_block_put(&inode->ei_pagecache_lock); - bch2_disk_reservation_put(c, &dio->op.res); bch2_quota_reservation_put(c, inode, &dio->quota_res); bio_put(bio); inode_dio_end(&inode->v); @@ -2098,18 +2196,24 @@ out: /* truncate: */ -static inline int range_has_data(struct bch_fs *c, - struct bpos start, - struct bpos end) +static inline int range_has_data(struct bch_fs *c, u32 subvol, + struct bpos start, + struct bpos end) { struct btree_trans trans; - struct btree_iter *iter; + struct btree_iter iter; struct bkey_s_c k; int ret = 0; bch2_trans_init(&trans, c, 0, 0); +retry: + bch2_trans_begin(&trans); - for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) { + ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot); + if (ret) + goto err; + + for_each_btree_key(&trans, iter, BTREE_ID_extents, start, 0, k, ret) { if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) break; @@ -2118,6 +2222,11 @@ static inline int range_has_data(struct bch_fs *c, break; } } + start = iter.pos; + bch2_trans_iter_exit(&trans, &iter); +err: + if (ret == -EINTR) + goto retry; return bch2_trans_exit(&trans) ?: ret; } @@ -2149,7 +2258,7 @@ static int __bch2_truncate_page(struct bch_inode_info *inode, * XXX: we're doing two index lookups when we end up reading the * page */ - ret = range_has_data(c, + ret = range_has_data(c, inode->ei_subvol, POS(inode->v.i_ino, index << PAGE_SECTOR_SHIFT), POS(inode->v.i_ino, (index + 1) << PAGE_SECTOR_SHIFT)); if (ret <= 0) @@ -2197,6 +2306,12 @@ static int __bch2_truncate_page(struct bch_inode_info *inode, ret = bch2_get_page_disk_reservation(c, inode, page, false); BUG_ON(ret); + /* + * This removes any writeable userspace mappings; we need to force + * .page_mkwrite to be called again before any mmapped writes, to + * redirty the full page: + */ + page_mkclean(page); __set_page_dirty_nobuffers(page); unlock: unlock_page(page); @@ -2211,11 +2326,11 @@ static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from) from, round_up(from, PAGE_SIZE)); } -static int bch2_extend(struct bch_inode_info *inode, +static int bch2_extend(struct user_namespace *mnt_userns, + struct bch_inode_info *inode, struct bch_inode_unpacked *inode_u, struct iattr *iattr) { - struct bch_fs *c = inode->v.i_sb->s_fs_info; struct address_space *mapping = inode->v.i_mapping; int ret; @@ -2229,24 +2344,15 @@ static int bch2_extend(struct bch_inode_info *inode, return ret; truncate_setsize(&inode->v, iattr->ia_size); - setattr_copy(&inode->v, iattr); - mutex_lock(&inode->ei_update_lock); - ret = bch2_write_inode_size(c, inode, inode->v.i_size, - ATTR_MTIME|ATTR_CTIME); - mutex_unlock(&inode->ei_update_lock); - - return ret; + return bch2_setattr_nonsize(mnt_userns, inode, iattr); } static int bch2_truncate_finish_fn(struct bch_inode_info *inode, struct bch_inode_unpacked *bi, void *p) { - struct bch_fs *c = inode->v.i_sb->s_fs_info; - bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY; - bi->bi_mtime = bi->bi_ctime = bch2_current_time(c); return 0; } @@ -2260,29 +2366,33 @@ static int bch2_truncate_start_fn(struct bch_inode_info *inode, return 0; } -int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr) +int bch2_truncate(struct user_namespace *mnt_userns, + struct bch_inode_info *inode, struct iattr *iattr) { struct bch_fs *c = inode->v.i_sb->s_fs_info; struct address_space *mapping = inode->v.i_mapping; struct bch_inode_unpacked inode_u; - struct btree_trans trans; - struct btree_iter *iter; u64 new_i_size = iattr->ia_size; s64 i_sectors_delta = 0; int ret = 0; - inode_dio_wait(&inode->v); - bch2_pagecache_block_get(&inode->ei_pagecache_lock); - /* - * fetch current on disk i_size: inode is locked, i_size can only - * increase underneath us: + * If the truncate call with change the size of the file, the + * cmtimes should be updated. If the size will not change, we + * do not need to update the cmtimes. */ - bch2_trans_init(&trans, c, 0, 0); - iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino, 0); - ret = PTR_ERR_OR_ZERO(iter); - bch2_trans_exit(&trans); + if (iattr->ia_size != inode->v.i_size) { + if (!(iattr->ia_valid & ATTR_MTIME)) + ktime_get_coarse_real_ts64(&iattr->ia_mtime); + if (!(iattr->ia_valid & ATTR_CTIME)) + ktime_get_coarse_real_ts64(&iattr->ia_ctime); + iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME; + } + + inode_dio_wait(&inode->v); + bch2_pagecache_block_get(&inode->ei_pagecache_lock); + ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u); if (ret) goto err; @@ -2295,13 +2405,16 @@ int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr) if (ret) goto err; - BUG_ON(inode->v.i_size < inode_u.bi_size); + WARN_ON(!test_bit(EI_INODE_ERROR, &inode->ei_flags) && + inode->v.i_size < inode_u.bi_size); if (iattr->ia_size > inode->v.i_size) { - ret = bch2_extend(inode, &inode_u, iattr); + ret = bch2_extend(mnt_userns, inode, &inode_u, iattr); goto err; } + iattr->ia_valid &= ~ATTR_SIZE; + ret = bch2_truncate_page(inode, iattr->ia_size); if (unlikely(ret)) goto err; @@ -2337,7 +2450,7 @@ int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr) truncate_setsize(&inode->v, iattr->ia_size); - ret = bch2_fpunch(c, inode->v.i_ino, + ret = bch2_fpunch(c, inode_inum(inode), round_up(iattr->ia_size, block_bytes(c)) >> 9, U64_MAX, &inode->ei_journal_seq, &i_sectors_delta); i_sectors_acct(c, inode, NULL, i_sectors_delta); @@ -2345,12 +2458,11 @@ int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr) if (unlikely(ret)) goto err; - setattr_copy(&inode->v, iattr); - mutex_lock(&inode->ei_update_lock); - ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, - ATTR_MTIME|ATTR_CTIME); + ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0); mutex_unlock(&inode->ei_update_lock); + + ret = bch2_setattr_nonsize(mnt_userns, inode, iattr); err: bch2_pagecache_block_put(&inode->ei_pagecache_lock); return ret; @@ -2358,6 +2470,15 @@ err: /* fallocate: */ +static int inode_update_times_fn(struct bch_inode_info *inode, + struct bch_inode_unpacked *bi, void *p) +{ + struct bch_fs *c = inode->v.i_sb->s_fs_info; + + bi->bi_mtime = bi->bi_ctime = bch2_current_time(c); + return 0; +} + static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len) { struct bch_fs *c = inode->v.i_sb->s_fs_info; @@ -2389,12 +2510,17 @@ static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len if (discard_start < discard_end) { s64 i_sectors_delta = 0; - ret = bch2_fpunch(c, inode->v.i_ino, + ret = bch2_fpunch(c, inode_inum(inode), discard_start, discard_end, &inode->ei_journal_seq, &i_sectors_delta); i_sectors_acct(c, inode, NULL, i_sectors_delta); } + + mutex_lock(&inode->ei_update_lock); + ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL, + ATTR_MTIME|ATTR_CTIME) ?: ret; + mutex_unlock(&inode->ei_update_lock); err: bch2_pagecache_block_put(&inode->ei_pagecache_lock); inode_unlock(&inode->v); @@ -2408,19 +2534,16 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode, { struct bch_fs *c = inode->v.i_sb->s_fs_info; struct address_space *mapping = inode->v.i_mapping; - struct bkey_on_stack copy; + struct bkey_buf copy; struct btree_trans trans; - struct btree_iter *src, *dst; + struct btree_iter src, dst, del; loff_t shift, new_size; u64 src_start; - int ret; + int ret = 0; if ((offset | len) & (block_bytes(c) - 1)) return -EINVAL; - bkey_on_stack_init(©); - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256); - /* * We need i_mutex to keep the page cache consistent with the extents * btree, and the btree consistent with i_size - we don't need outside @@ -2466,7 +2589,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode, } else { s64 i_sectors_delta = 0; - ret = bch2_fpunch(c, inode->v.i_ino, + ret = bch2_fpunch(c, inode_inum(inode), offset >> 9, (offset + len) >> 9, &inode->ei_journal_seq, &i_sectors_delta); @@ -2476,15 +2599,15 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode, goto err; } - src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, + bch2_bkey_buf_init(©); + bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024); + bch2_trans_iter_init(&trans, &src, BTREE_ID_extents, POS(inode->v.i_ino, src_start >> 9), BTREE_ITER_INTENT); - BUG_ON(IS_ERR_OR_NULL(src)); + bch2_trans_copy_iter(&dst, &src); + bch2_trans_copy_iter(&del, &src); - dst = bch2_trans_copy_iter(&trans, src); - BUG_ON(IS_ERR_OR_NULL(dst)); - - while (1) { + while (ret == 0 || ret == -EINTR) { struct disk_reservation disk_res = bch2_disk_reservation_init(c, 0); struct bkey_i delete; @@ -2493,34 +2616,46 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode, struct bpos move_pos = POS(inode->v.i_ino, offset >> 9); struct bpos atomic_end; unsigned trigger_flags = 0; + u32 snapshot; + + bch2_trans_begin(&trans); + + ret = bch2_subvolume_get_snapshot(&trans, + inode->ei_subvol, &snapshot); + if (ret) + continue; + + bch2_btree_iter_set_snapshot(&src, snapshot); + bch2_btree_iter_set_snapshot(&dst, snapshot); + bch2_btree_iter_set_snapshot(&del, snapshot); + + bch2_trans_begin(&trans); k = insert - ? bch2_btree_iter_peek_prev(src) - : bch2_btree_iter_peek(src); + ? bch2_btree_iter_peek_prev(&src) + : bch2_btree_iter_peek(&src); if ((ret = bkey_err(k))) - goto bkey_err; + continue; if (!k.k || k.k->p.inode != inode->v.i_ino) break; - BUG_ON(bkey_cmp(src->pos, bkey_start_pos(k.k))); - if (insert && bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0) break; reassemble: - bkey_on_stack_reassemble(©, c, k); + bch2_bkey_buf_reassemble(©, c, k); if (insert && bkey_cmp(bkey_start_pos(k.k), move_pos) < 0) bch2_cut_front(move_pos, copy.k); copy.k->k.p.offset += shift >> 9; - bch2_btree_iter_set_pos(dst, bkey_start_pos(©.k->k)); + bch2_btree_iter_set_pos(&dst, bkey_start_pos(©.k->k)); - ret = bch2_extent_atomic_end(dst, copy.k, &atomic_end); + ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end); if (ret) - goto bkey_err; + continue; if (bkey_cmp(atomic_end, copy.k->k.p)) { if (insert) { @@ -2536,6 +2671,7 @@ reassemble: delete.k.p = copy.k->k.p; delete.k.size = copy.k->k.size; delete.k.p.offset -= shift >> 9; + bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k)); next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p; @@ -2556,26 +2692,25 @@ reassemble: BUG_ON(ret); } - bch2_btree_iter_set_pos(src, bkey_start_pos(&delete.k)); - - ret = bch2_trans_update(&trans, src, &delete, trigger_flags) ?: - bch2_trans_update(&trans, dst, copy.k, trigger_flags) ?: + ret = bch2_btree_iter_traverse(&del) ?: + bch2_trans_update(&trans, &del, &delete, trigger_flags) ?: + bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?: bch2_trans_commit(&trans, &disk_res, &inode->ei_journal_seq, BTREE_INSERT_NOFAIL); bch2_disk_reservation_put(c, &disk_res); -bkey_err: - if (!ret) - bch2_btree_iter_set_pos(src, next_pos); - if (ret == -EINTR) - ret = 0; - if (ret) - goto err; - - bch2_trans_cond_resched(&trans); + if (!ret) + bch2_btree_iter_set_pos(&src, next_pos); } - bch2_trans_unlock(&trans); + bch2_trans_iter_exit(&trans, &del); + bch2_trans_iter_exit(&trans, &dst); + bch2_trans_iter_exit(&trans, &src); + bch2_trans_exit(&trans); + bch2_bkey_buf_exit(©, c); + + if (ret) + goto err; if (!insert) { i_size_write(&inode->v, new_size); @@ -2585,85 +2720,59 @@ bkey_err: mutex_unlock(&inode->ei_update_lock); } err: - bch2_trans_exit(&trans); - bkey_on_stack_exit(©, c); bch2_pagecache_block_put(&inode->ei_pagecache_lock); inode_unlock(&inode->v); return ret; } -static long bchfs_fallocate(struct bch_inode_info *inode, int mode, - loff_t offset, loff_t len) +static int __bchfs_fallocate(struct bch_inode_info *inode, int mode, + u64 start_sector, u64 end_sector) { - struct address_space *mapping = inode->v.i_mapping; struct bch_fs *c = inode->v.i_sb->s_fs_info; struct btree_trans trans; - struct btree_iter *iter; - struct bpos end_pos; - loff_t end = offset + len; - loff_t block_start = round_down(offset, block_bytes(c)); - loff_t block_end = round_up(end, block_bytes(c)); - unsigned sectors; + struct btree_iter iter; + struct bpos end_pos = POS(inode->v.i_ino, end_sector); unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas; - int ret; - - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); - - inode_lock(&inode->v); - inode_dio_wait(&inode->v); - bch2_pagecache_block_get(&inode->ei_pagecache_lock); - - if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) { - ret = inode_newsize_ok(&inode->v, end); - if (ret) - goto err; - } - - if (mode & FALLOC_FL_ZERO_RANGE) { - ret = __bch2_truncate_page(inode, - offset >> PAGE_SHIFT, - offset, end); - - if (!ret && - offset >> PAGE_SHIFT != end >> PAGE_SHIFT) - ret = __bch2_truncate_page(inode, - end >> PAGE_SHIFT, - offset, end); - - if (unlikely(ret)) - goto err; + int ret = 0; - truncate_pagecache_range(&inode->v, offset, end - 1); - } + bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512); - iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, - POS(inode->v.i_ino, block_start >> 9), + bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, + POS(inode->v.i_ino, start_sector), BTREE_ITER_SLOTS|BTREE_ITER_INTENT); - end_pos = POS(inode->v.i_ino, block_end >> 9); - while (bkey_cmp(iter->pos, end_pos) < 0) { + while (!ret && bkey_cmp(iter.pos, end_pos) < 0) { s64 i_sectors_delta = 0; struct disk_reservation disk_res = { 0 }; struct quota_res quota_res = { 0 }; struct bkey_i_reservation reservation; struct bkey_s_c k; + unsigned sectors; + u32 snapshot; bch2_trans_begin(&trans); - k = bch2_btree_iter_peek_slot(iter); + ret = bch2_subvolume_get_snapshot(&trans, + inode->ei_subvol, &snapshot); + if (ret) + goto bkey_err; + + bch2_btree_iter_set_snapshot(&iter, snapshot); + + k = bch2_btree_iter_peek_slot(&iter); if ((ret = bkey_err(k))) goto bkey_err; /* already reserved */ if (k.k->type == KEY_TYPE_reservation && bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) { - bch2_btree_iter_next_slot(iter); + bch2_btree_iter_advance(&iter); continue; } if (bkey_extent_is_data(k.k) && !(mode & FALLOC_FL_ZERO_RANGE)) { - bch2_btree_iter_next_slot(iter); + bch2_btree_iter_advance(&iter); continue; } @@ -2672,7 +2781,7 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode, reservation.k.p = k.k->p; reservation.k.size = k.k->size; - bch2_cut_front(iter->pos, &reservation.k_i); + bch2_cut_front(iter.pos, &reservation.k_i); bch2_cut_back(end_pos, &reservation.k_i); sectors = reservation.k.size; @@ -2696,19 +2805,63 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode, reservation.v.nr_replicas = disk_res.nr_replicas; } - ret = bch2_extent_update(&trans, iter, &reservation.k_i, + ret = bch2_extent_update(&trans, inode_inum(inode), &iter, + &reservation.k_i, &disk_res, &inode->ei_journal_seq, - 0, &i_sectors_delta); + 0, &i_sectors_delta, true); i_sectors_acct(c, inode, "a_res, i_sectors_delta); bkey_err: bch2_quota_reservation_put(c, inode, "a_res); bch2_disk_reservation_put(c, &disk_res); if (ret == -EINTR) ret = 0; + } + bch2_trans_iter_exit(&trans, &iter); + bch2_trans_exit(&trans); + return ret; +} + +static long bchfs_fallocate(struct bch_inode_info *inode, int mode, + loff_t offset, loff_t len) +{ + struct address_space *mapping = inode->v.i_mapping; + struct bch_fs *c = inode->v.i_sb->s_fs_info; + loff_t end = offset + len; + loff_t block_start = round_down(offset, block_bytes(c)); + loff_t block_end = round_up(end, block_bytes(c)); + int ret; + + inode_lock(&inode->v); + inode_dio_wait(&inode->v); + bch2_pagecache_block_get(&inode->ei_pagecache_lock); + + if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) { + ret = inode_newsize_ok(&inode->v, end); if (ret) goto err; } + if (mode & FALLOC_FL_ZERO_RANGE) { + ret = __bch2_truncate_page(inode, + offset >> PAGE_SHIFT, + offset, end); + + if (!ret && + offset >> PAGE_SHIFT != end >> PAGE_SHIFT) + ret = __bch2_truncate_page(inode, + end >> PAGE_SHIFT, + offset, end); + + if (unlikely(ret)) + goto err; + + truncate_pagecache_range(&inode->v, offset, end - 1); + } + + ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9); + if (ret) + goto err; + /* * Do we need to extend the file? * @@ -2719,27 +2872,13 @@ bkey_err: if (end >= inode->v.i_size && (!(mode & FALLOC_FL_KEEP_SIZE) || (mode & FALLOC_FL_ZERO_RANGE))) { - struct btree_iter *inode_iter; - struct bch_inode_unpacked inode_u; - - do { - bch2_trans_begin(&trans); - inode_iter = bch2_inode_peek(&trans, &inode_u, - inode->v.i_ino, 0); - ret = PTR_ERR_OR_ZERO(inode_iter); - } while (ret == -EINTR); - - bch2_trans_unlock(&trans); - - if (ret) - goto err; /* * Sync existing appends before extending i_size, * as in bch2_extend(): */ ret = filemap_write_and_wait_range(mapping, - inode_u.bi_size, S64_MAX); + inode->ei_inode.bi_size, S64_MAX); if (ret) goto err; @@ -2753,7 +2892,6 @@ bkey_err: mutex_unlock(&inode->ei_update_lock); } err: - bch2_trans_exit(&trans); bch2_pagecache_block_put(&inode->ei_pagecache_lock); inode_unlock(&inode->v); return ret; @@ -2870,8 +3008,8 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src, mark_range_unallocated(src, pos_src, pos_src + aligned_len); ret = bch2_remap_range(c, - POS(dst->v.i_ino, pos_dst >> 9), - POS(src->v.i_ino, pos_src >> 9), + inode_inum(dst), pos_dst >> 9, + inode_inum(src), pos_src >> 9, aligned_len >> 9, &dst->ei_journal_seq, pos_dst + len, &i_sectors_delta); @@ -2890,6 +3028,11 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src, if (pos_dst + ret > dst->v.i_size) i_size_write(&dst->v, pos_dst + ret); spin_unlock(&dst->v.i_lock); + + if (((file_dst->f_flags & (__O_SYNC | O_DSYNC)) || + IS_SYNC(file_inode(file_dst))) && + !c->opts.journal_flush_disabled) + ret = bch2_journal_flush_seq(&c->journal, dst->ei_journal_seq); err: bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst); @@ -2955,9 +3098,11 @@ static loff_t bch2_seek_data(struct file *file, u64 offset) struct bch_inode_info *inode = file_bch_inode(file); struct bch_fs *c = inode->v.i_sb->s_fs_info; struct btree_trans trans; - struct btree_iter *iter; + struct btree_iter iter; struct bkey_s_c k; + subvol_inum inum = inode_inum(inode); u64 isize, next_data = MAX_LFS_FILESIZE; + u32 snapshot; int ret; isize = i_size_read(&inode->v); @@ -2965,9 +3110,15 @@ static loff_t bch2_seek_data(struct file *file, u64 offset) return -ENXIO; bch2_trans_init(&trans, c, 0, 0); +retry: + bch2_trans_begin(&trans); - for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, - POS(inode->v.i_ino, offset >> 9), 0, k, ret) { + ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot); + if (ret) + goto err; + + for_each_btree_key(&trans, iter, BTREE_ID_extents, + SPOS(inode->v.i_ino, offset >> 9, snapshot), 0, k, ret) { if (k.k->p.inode != inode->v.i_ino) { break; } else if (bkey_extent_is_data(k.k)) { @@ -2976,6 +3127,10 @@ static loff_t bch2_seek_data(struct file *file, u64 offset) } else if (k.k->p.offset >> 9 > isize) break; } + bch2_trans_iter_exit(&trans, &iter); +err: + if (ret == -EINTR) + goto retry; ret = bch2_trans_exit(&trans) ?: ret; if (ret) @@ -3013,8 +3168,8 @@ static loff_t page_hole_offset(struct address_space *mapping, loff_t offset) int pg_offset; loff_t ret = -1; - page = find_lock_entry(mapping, index); - if (!page || xa_is_value(page)) + page = find_lock_page(mapping, index); + if (!page) return offset; pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1)); @@ -3050,9 +3205,11 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset) struct bch_inode_info *inode = file_bch_inode(file); struct bch_fs *c = inode->v.i_sb->s_fs_info; struct btree_trans trans; - struct btree_iter *iter; + struct btree_iter iter; struct bkey_s_c k; + subvol_inum inum = inode_inum(inode); u64 isize, next_hole = MAX_LFS_FILESIZE; + u32 snapshot; int ret; isize = i_size_read(&inode->v); @@ -3060,9 +3217,15 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset) return -ENXIO; bch2_trans_init(&trans, c, 0, 0); +retry: + bch2_trans_begin(&trans); + + ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot); + if (ret) + goto err; - for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, - POS(inode->v.i_ino, offset >> 9), + for_each_btree_key(&trans, iter, BTREE_ID_extents, + SPOS(inode->v.i_ino, offset >> 9, snapshot), BTREE_ITER_SLOTS, k, ret) { if (k.k->p.inode != inode->v.i_ino) { next_hole = bch2_seek_pagecache_hole(&inode->v, @@ -3079,6 +3242,10 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset) offset = max(offset, bkey_start_offset(k.k) << 9); } } + bch2_trans_iter_exit(&trans, &iter); +err: + if (ret == -EINTR) + goto retry; ret = bch2_trans_exit(&trans) ?: ret; if (ret)