]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/fs-io.c
Update bcachefs sources to 1a510b00b6 bcachefs: Increase BTREE_TRANS_MEM_MAX
[bcachefs-tools-debian] / libbcachefs / fs-io.c
index bce25dde1172cc93cfb7524d0359d77cf10ae275..b6eaaa0dd67c71d95daef56bf619d2c543ba781d 100644 (file)
@@ -3,7 +3,7 @@
 
 #include "bcachefs.h"
 #include "alloc_foreground.h"
-#include "bkey_on_stack.h"
+#include "bkey_buf.h"
 #include "btree_update.h"
 #include "buckets.h"
 #include "clock.h"
@@ -26,6 +26,7 @@
 #include <linux/migrate.h>
 #include <linux/mmu_context.h>
 #include <linux/pagevec.h>
+#include <linux/rmap.h>
 #include <linux/sched/signal.h>
 #include <linux/task_io_accounting_ops.h>
 #include <linux/uio.h>
 #include <trace/events/bcachefs.h>
 #include <trace/events/writeback.h>
 
+static inline struct address_space *faults_disabled_mapping(void)
+{
+       return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
+}
+
+static inline void set_fdm_dropped_locks(void)
+{
+       current->faults_disabled_mapping =
+               (void *) (((unsigned long) current->faults_disabled_mapping)|1);
+}
+
+static inline bool fdm_dropped_locks(void)
+{
+       return ((unsigned long) current->faults_disabled_mapping) & 1;
+}
+
 struct quota_res {
        u64                             sectors;
 };
@@ -54,6 +71,7 @@ struct dio_write {
                                        sync:1,
                                        free_iov:1;
        struct quota_res                quota_res;
+       u64                             written;
 
        struct iov_iter                 iter;
        struct iovec                    inline_vecs[2];
@@ -66,6 +84,7 @@ struct dio_read {
        struct closure                  cl;
        struct kiocb                    *req;
        long                            ret;
+       bool                            should_dirty;
        struct bch_read_bio             rbio;
 };
 
@@ -263,28 +282,13 @@ static inline struct bch_page_state *bch2_page_state(struct page *page)
 /* for newly allocated pages: */
 static void __bch2_page_state_release(struct page *page)
 {
-       struct bch_page_state *s = __bch2_page_state(page);
-
-       if (!s)
-               return;
-
-       ClearPagePrivate(page);
-       set_page_private(page, 0);
-       put_page(page);
-       kfree(s);
+       kfree(detach_page_private(page));
 }
 
 static void bch2_page_state_release(struct page *page)
 {
-       struct bch_page_state *s = bch2_page_state(page);
-
-       if (!s)
-               return;
-
-       ClearPagePrivate(page);
-       set_page_private(page, 0);
-       put_page(page);
-       kfree(s);
+       EBUG_ON(!PageLocked(page));
+       __bch2_page_state_release(page);
 }
 
 /* for newly allocated pages: */
@@ -298,13 +302,7 @@ static struct bch_page_state *__bch2_page_state_create(struct page *page,
                return NULL;
 
        spin_lock_init(&s->lock);
-       /*
-        * migrate_page_move_mapping() assumes that pages with private data
-        * have their count elevated by 1.
-        */
-       get_page(page);
-       set_page_private(page, (unsigned long) s);
-       SetPagePrivate(page);
+       attach_page_private(page, s);
        return s;
 }
 
@@ -512,10 +510,35 @@ static void bch2_set_page_dirty(struct bch_fs *c,
 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
 {
        struct file *file = vmf->vma->vm_file;
+       struct address_space *mapping = file->f_mapping;
+       struct address_space *fdm = faults_disabled_mapping();
        struct bch_inode_info *inode = file_bch_inode(file);
        int ret;
 
+       if (fdm == mapping)
+               return VM_FAULT_SIGBUS;
+
+       /* Lock ordering: */
+       if (fdm > mapping) {
+               struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
+
+               if (bch2_pagecache_add_tryget(&inode->ei_pagecache_lock))
+                       goto got_lock;
+
+               bch2_pagecache_block_put(&fdm_host->ei_pagecache_lock);
+
+               bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+               bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+
+               bch2_pagecache_block_get(&fdm_host->ei_pagecache_lock);
+
+               /* Signal that lock has been dropped: */
+               set_fdm_dropped_locks();
+               return VM_FAULT_SIGBUS;
+       }
+
        bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+got_lock:
        ret = filemap_fault(vmf);
        bch2_pagecache_add_put(&inode->ei_pagecache_lock);
 
@@ -602,18 +625,12 @@ int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
        EBUG_ON(!PageLocked(page));
        EBUG_ON(!PageLocked(newpage));
 
-       ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
+       ret = migrate_page_move_mapping(mapping, newpage, page, 0);
        if (ret != MIGRATEPAGE_SUCCESS)
                return ret;
 
-       if (PagePrivate(page)) {
-               ClearPagePrivate(page);
-               get_page(newpage);
-               set_page_private(newpage, page_private(page));
-               set_page_private(page, 0);
-               put_page(page);
-               SetPagePrivate(newpage);
-       }
+       if (PagePrivate(page))
+               attach_page_private(newpage, detach_page_private(page));
 
        if (mode != MIGRATE_SYNC_NO_COPY)
                migrate_page_copy(newpage, page);
@@ -645,41 +662,33 @@ static void bch2_readpages_end_io(struct bio *bio)
        bio_put(bio);
 }
 
-static inline void page_state_init_for_read(struct page *page)
-{
-       SetPagePrivate(page);
-       page->private = 0;
-}
-
 struct readpages_iter {
        struct address_space    *mapping;
        struct page             **pages;
        unsigned                nr_pages;
-       unsigned                nr_added;
        unsigned                idx;
        pgoff_t                 offset;
 };
 
 static int readpages_iter_init(struct readpages_iter *iter,
-                              struct address_space *mapping,
-                              struct list_head *pages, unsigned nr_pages)
+                              struct readahead_control *ractl)
 {
+       unsigned i, nr_pages = readahead_count(ractl);
+
        memset(iter, 0, sizeof(*iter));
 
-       iter->mapping   = mapping;
-       iter->offset    = list_last_entry(pages, struct page, lru)->index;
+       iter->mapping   = ractl->mapping;
+       iter->offset    = readahead_index(ractl);
+       iter->nr_pages  = nr_pages;
 
        iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
        if (!iter->pages)
                return -ENOMEM;
 
-       while (!list_empty(pages)) {
-               struct page *page = list_last_entry(pages, struct page, lru);
-
-               __bch2_page_state_create(page, __GFP_NOFAIL);
-
-               iter->pages[iter->nr_pages++] = page;
-               list_del(&page->lru);
+       nr_pages = __readahead_batch(ractl, iter->pages, nr_pages);
+       for (i = 0; i < nr_pages; i++) {
+               __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL);
+               put_page(iter->pages[i]);
        }
 
        return 0;
@@ -687,41 +696,9 @@ static int readpages_iter_init(struct readpages_iter *iter,
 
 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
 {
-       struct page *page;
-       unsigned i;
-       int ret;
-
-       BUG_ON(iter->idx > iter->nr_added);
-       BUG_ON(iter->nr_added > iter->nr_pages);
-
-       if (iter->idx < iter->nr_added)
-               goto out;
-
-       while (1) {
-               if (iter->idx == iter->nr_pages)
-                       return NULL;
-
-               ret = add_to_page_cache_lru_vec(iter->mapping,
-                               iter->pages     + iter->nr_added,
-                               iter->nr_pages  - iter->nr_added,
-                               iter->offset    + iter->nr_added,
-                               GFP_NOFS);
-               if (ret > 0)
-                       break;
-
-               page = iter->pages[iter->nr_added];
-               iter->idx++;
-               iter->nr_added++;
-
-               __bch2_page_state_release(page);
-               put_page(page);
-       }
-
-       iter->nr_added += ret;
+       if (iter->idx >= iter->nr_pages)
+               return NULL;
 
-       for (i = iter->idx; i < iter->nr_added; i++)
-               put_page(iter->pages[i]);
-out:
        EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
 
        return iter->pages[iter->idx];
@@ -815,7 +792,7 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
                       struct readpages_iter *readpages_iter)
 {
        struct bch_fs *c = trans->c;
-       struct bkey_on_stack sk;
+       struct bkey_buf sk;
        int flags = BCH_READ_RETRY_IF_STALE|
                BCH_READ_MAY_PROMOTE;
        int ret = 0;
@@ -823,11 +800,12 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
        rbio->c = c;
        rbio->start_time = local_clock();
 
-       bkey_on_stack_init(&sk);
+       bch2_bkey_buf_init(&sk);
 retry:
        while (1) {
                struct bkey_s_c k;
                unsigned bytes, sectors, offset_into_extent;
+               enum btree_id data_btree = BTREE_ID_extents;
 
                bch2_btree_iter_set_pos(iter,
                                POS(inum, rbio->bio.bi_iter.bi_sector));
@@ -837,19 +815,19 @@ retry:
                if (ret)
                        break;
 
-               bkey_on_stack_realloc(&sk, c, k.k->u64s);
-               bkey_reassemble(sk.k, k);
-               k = bkey_i_to_s_c(sk.k);
-
                offset_into_extent = iter->pos.offset -
                        bkey_start_offset(k.k);
                sectors = k.k->size - offset_into_extent;
 
-               ret = bch2_read_indirect_extent(trans,
-                                       &offset_into_extent, sk.k);
+               bch2_bkey_buf_reassemble(&sk, c, k);
+
+               ret = bch2_read_indirect_extent(trans, &data_btree,
+                                       &offset_into_extent, &sk);
                if (ret)
                        break;
 
+               k = bkey_i_to_s_c(sk.k);
+
                sectors = min(sectors, k.k->size - offset_into_extent);
 
                bch2_trans_unlock(trans);
@@ -867,7 +845,8 @@ retry:
                if (bkey_extent_is_allocation(k.k))
                        bch2_add_page_sectors(&rbio->bio, k);
 
-               bch2_read_extent(c, rbio, k, offset_into_extent, flags);
+               bch2_read_extent(trans, rbio, iter->pos,
+                                data_btree, k, offset_into_extent, flags);
 
                if (flags & BCH_READ_LAST_FRAGMENT)
                        break;
@@ -880,17 +859,18 @@ retry:
                goto retry;
 
        if (ret) {
-               bcache_io_error(c, &rbio->bio, "btree IO error %i", ret);
+               bch_err_inum_ratelimited(c, inum,
+                               "read error %i from btree lookup", ret);
+               rbio->bio.bi_status = BLK_STS_IOERR;
                bio_endio(&rbio->bio);
        }
 
-       bkey_on_stack_exit(&sk, c);
+       bch2_bkey_buf_exit(&sk, c);
 }
 
-int bch2_readpages(struct file *file, struct address_space *mapping,
-                  struct list_head *pages, unsigned nr_pages)
+void bch2_readahead(struct readahead_control *ractl)
 {
-       struct bch_inode_info *inode = to_bch_ei(mapping->host);
+       struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
        struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
        struct btree_trans trans;
@@ -899,12 +879,11 @@ int bch2_readpages(struct file *file, struct address_space *mapping,
        struct readpages_iter readpages_iter;
        int ret;
 
-       ret = readpages_iter_init(&readpages_iter, mapping, pages, nr_pages);
+       ret = readpages_iter_init(&readpages_iter, ractl);
        BUG_ON(ret);
 
        bch2_trans_init(&trans, c, 0, 0);
-
-       iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS_MIN,
                                   BTREE_ITER_SLOTS);
 
        bch2_pagecache_add_get(&inode->ei_pagecache_lock);
@@ -914,7 +893,7 @@ int bch2_readpages(struct file *file, struct address_space *mapping,
                unsigned n = min_t(unsigned,
                                   readpages_iter.nr_pages -
                                   readpages_iter.idx,
-                                  BIO_MAX_PAGES);
+                                  BIO_MAX_VECS);
                struct bch_read_bio *rbio =
                        rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
                                  opts);
@@ -932,10 +911,9 @@ int bch2_readpages(struct file *file, struct address_space *mapping,
 
        bch2_pagecache_add_put(&inode->ei_pagecache_lock);
 
+       bch2_trans_iter_put(&trans, iter);
        bch2_trans_exit(&trans);
        kfree(readpages_iter.pages);
-
-       return 0;
 }
 
 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
@@ -952,11 +930,12 @@ static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
        BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
 
        bch2_trans_init(&trans, c, 0, 0);
-       iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS_MIN,
                                   BTREE_ITER_SLOTS);
 
        bchfs_read(&trans, iter, rbio, inum, NULL);
 
+       bch2_trans_iter_put(&trans, iter);
        bch2_trans_exit(&trans);
 }
 
@@ -1039,7 +1018,11 @@ static void bch2_writepage_io_done(struct closure *cl)
        struct bio_vec *bvec;
        unsigned i;
 
+       up(&io->op.c->io_in_flight);
+
        if (io->op.error) {
+               set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
+
                bio_for_each_segment_all(bvec, bio, iter) {
                        struct bch_page_state *s;
 
@@ -1099,6 +1082,8 @@ static void bch2_writepage_do_io(struct bch_writepage_state *w)
 {
        struct bch_writepage_io *io = w->io;
 
+       down(&io->op.c->io_in_flight);
+
        w->io = NULL;
        closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
        continue_at(&io->cl, bch2_writepage_io_done, NULL);
@@ -1117,8 +1102,7 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
 {
        struct bch_write_op *op;
 
-       w->io = container_of(bio_alloc_bioset(GFP_NOFS,
-                                             BIO_MAX_PAGES,
+       w->io = container_of(bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS,
                                              &c->writepage_bioset),
                             struct bch_writepage_io, op.wbio.bio);
 
@@ -1239,8 +1223,9 @@ do_io:
 
                if (w->io &&
                    (w->io->op.res.nr_replicas != nr_replicas_this_write ||
-                    bio_full(&w->io->op.wbio.bio) ||
-                    w->io->op.wbio.bio.bi_iter.bi_size >= (256U << 20) ||
+                    bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
+                    w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
+                    (BIO_MAX_VECS * PAGE_SIZE) ||
                     bio_end_sector(&w->io->op.wbio.bio) != sector))
                        bch2_writepage_do_io(w);
 
@@ -1515,24 +1500,24 @@ retry_reservation:
                if (!pg_copied)
                        break;
 
+               if (!PageUptodate(page) &&
+                   pg_copied != PAGE_SIZE &&
+                   pos + copied + pg_copied < inode->v.i_size) {
+                       zero_user(page, 0, PAGE_SIZE);
+                       break;
+               }
+
                flush_dcache_page(page);
                iov_iter_advance(iter, pg_copied);
                copied += pg_copied;
+
+               if (pg_copied != pg_len)
+                       break;
        }
 
        if (!copied)
                goto out;
 
-       if (copied < len &&
-           ((offset + copied) & (PAGE_SIZE - 1))) {
-               struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
-
-               if (!PageUptodate(page)) {
-                       zero_user(page, 0, PAGE_SIZE);
-                       copied -= (offset + copied) & (PAGE_SIZE - 1);
-               }
-       }
-
        spin_lock(&inode->v.i_lock);
        if (pos + copied > inode->v.i_size)
                i_size_write(&inode->v, pos + copied);
@@ -1629,6 +1614,7 @@ again:
                }
                pos += ret;
                written += ret;
+               ret = 0;
 
                balance_dirty_pages_ratelimited(mapping);
        } while (iov_iter_count(iter));
@@ -1640,12 +1626,22 @@ again:
 
 /* O_DIRECT reads */
 
+static void bio_check_or_release(struct bio *bio, bool check_dirty)
+{
+       if (check_dirty) {
+               bio_check_pages_dirty(bio);
+       } else {
+               bio_release_pages(bio, false);
+               bio_put(bio);
+       }
+}
+
 static void bch2_dio_read_complete(struct closure *cl)
 {
        struct dio_read *dio = container_of(cl, struct dio_read, cl);
 
        dio->req->ki_complete(dio->req, dio->ret, 0);
-       bio_check_pages_dirty(&dio->rbio.bio);  /* transfers ownership */
+       bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
 }
 
 static void bch2_direct_IO_read_endio(struct bio *bio)
@@ -1660,8 +1656,11 @@ static void bch2_direct_IO_read_endio(struct bio *bio)
 
 static void bch2_direct_IO_read_split_endio(struct bio *bio)
 {
+       struct dio_read *dio = bio->bi_private;
+       bool should_dirty = dio->should_dirty;
+
        bch2_direct_IO_read_endio(bio);
-       bio_check_pages_dirty(bio);     /* transfers ownership */
+       bio_check_or_release(bio, should_dirty);
 }
 
 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
@@ -1690,7 +1689,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
        iter->count -= shorten;
 
        bio = bio_alloc_bioset(GFP_KERNEL,
-                              iov_iter_npages(iter, BIO_MAX_PAGES),
+                              iov_iter_npages(iter, BIO_MAX_VECS),
                               &c->dio_read_bioset);
 
        bio->bi_end_io = bch2_direct_IO_read_endio;
@@ -1715,11 +1714,17 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
 
        dio->req        = req;
        dio->ret        = ret;
+       /*
+        * This is one of the sketchier things I've encountered: we have to skip
+        * the dirtying of requests that are internal from the kernel (i.e. from
+        * loopback), because we'll deadlock on page_lock.
+        */
+       dio->should_dirty = iter_is_iovec(iter);
 
        goto start;
        while (iter->count) {
                bio = bio_alloc_bioset(GFP_KERNEL,
-                                      iov_iter_npages(iter, BIO_MAX_PAGES),
+                                      iov_iter_npages(iter, BIO_MAX_VECS),
                                       &c->bio_read);
                bio->bi_end_io          = bch2_direct_IO_read_split_endio;
 start:
@@ -1736,7 +1741,9 @@ start:
                }
 
                offset += bio->bi_iter.bi_size;
-               bio_set_pages_dirty(bio);
+
+               if (dio->should_dirty)
+                       bio_set_pages_dirty(bio);
 
                if (iter->count)
                        closure_get(&dio->cl);
@@ -1750,7 +1757,7 @@ start:
                closure_sync(&dio->cl);
                closure_debug_destroy(&dio->cl);
                ret = dio->ret;
-               bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
+               bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
                return ret;
        } else {
                return -EIOCBQUEUED;
@@ -1796,39 +1803,65 @@ ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 
 /* O_DIRECT writes */
 
+static void bch2_dio_write_loop_async(struct bch_write_op *);
+
 static long bch2_dio_write_loop(struct dio_write *dio)
 {
        bool kthread = (current->flags & PF_KTHREAD) != 0;
-       struct bch_fs *c = dio->op.c;
        struct kiocb *req = dio->req;
        struct address_space *mapping = req->ki_filp->f_mapping;
        struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
+       struct bch_fs *c = inode->v.i_sb->s_fs_info;
        struct bio *bio = &dio->op.wbio.bio;
        struct bvec_iter_all iter;
        struct bio_vec *bv;
-       unsigned unaligned;
-       u64 new_i_size;
-       bool sync;
+       unsigned unaligned, iter_count;
+       bool sync = dio->sync, dropped_locks;
        long ret;
 
        if (dio->loop)
                goto loop;
 
+       down(&c->io_in_flight);
+
        while (1) {
+               iter_count = dio->iter.count;
+
                if (kthread)
-                       use_mm(dio->mm);
+                       kthread_use_mm(dio->mm);
                BUG_ON(current->faults_disabled_mapping);
                current->faults_disabled_mapping = mapping;
 
                ret = bio_iov_iter_get_pages(bio, &dio->iter);
 
+               dropped_locks = fdm_dropped_locks();
+
                current->faults_disabled_mapping = NULL;
                if (kthread)
-                       unuse_mm(dio->mm);
+                       kthread_unuse_mm(dio->mm);
+
+               /*
+                * If the fault handler returned an error but also signalled
+                * that it dropped & retook ei_pagecache_lock, we just need to
+                * re-shoot down the page cache and retry:
+                */
+               if (dropped_locks && ret)
+                       ret = 0;
 
                if (unlikely(ret < 0))
                        goto err;
 
+               if (unlikely(dropped_locks)) {
+                       ret = write_invalidate_inode_pages_range(mapping,
+                                       req->ki_pos,
+                                       req->ki_pos + iter_count - 1);
+                       if (unlikely(ret))
+                               goto err;
+
+                       if (!bio->bi_iter.bi_size)
+                               continue;
+               }
+
                unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
                bio->bi_iter.bi_size -= unaligned;
                iov_iter_revert(&dio->iter, unaligned);
@@ -1844,8 +1877,27 @@ static long bch2_dio_write_loop(struct dio_write *dio)
                        goto err;
                }
 
-               dio->op.pos = POS(inode->v.i_ino,
-                                 (req->ki_pos >> 9) + dio->op.written);
+               bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
+               dio->op.end_io          = bch2_dio_write_loop_async;
+               dio->op.target          = dio->op.opts.foreground_target;
+               op_journal_seq_set(&dio->op, &inode->ei_journal_seq);
+               dio->op.write_point     = writepoint_hashed((unsigned long) current);
+               dio->op.nr_replicas     = dio->op.opts.data_replicas;
+               dio->op.pos             = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
+
+               if ((req->ki_flags & IOCB_DSYNC) &&
+                   !c->opts.journal_flush_disabled)
+                       dio->op.flags |= BCH_WRITE_FLUSH;
+               dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
+
+               ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
+                                               dio->op.opts.data_replicas, 0);
+               if (unlikely(ret) &&
+                   !bch2_check_range_allocated(c, dio->op.pos,
+                               bio_sectors(bio),
+                               dio->op.opts.data_replicas,
+                               dio->op.opts.compression != 0))
+                       goto err;
 
                task_io_account_write(bio->bi_iter.bi_size);
 
@@ -1856,7 +1908,7 @@ static long bch2_dio_write_loop(struct dio_write *dio)
                                iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
                                              GFP_KERNEL);
                                if (unlikely(!iov)) {
-                                       dio->sync = true;
+                                       dio->sync = sync = true;
                                        goto do_io;
                                }
 
@@ -1870,41 +1922,46 @@ do_io:
                dio->loop = true;
                closure_call(&dio->op.cl, bch2_write, NULL, NULL);
 
-               if (dio->sync)
+               if (sync)
                        wait_for_completion(&dio->done);
                else
                        return -EIOCBQUEUED;
 loop:
                i_sectors_acct(c, inode, &dio->quota_res,
                               dio->op.i_sectors_delta);
-               dio->op.i_sectors_delta = 0;
-
-               new_i_size = req->ki_pos + ((u64) dio->op.written << 9);
+               req->ki_pos += (u64) dio->op.written << 9;
+               dio->written += dio->op.written;
 
                spin_lock(&inode->v.i_lock);
-               if (new_i_size > inode->v.i_size)
-                       i_size_write(&inode->v, new_i_size);
+               if (req->ki_pos > inode->v.i_size)
+                       i_size_write(&inode->v, req->ki_pos);
                spin_unlock(&inode->v.i_lock);
 
-               bio_for_each_segment_all(bv, bio, iter)
-                       put_page(bv->bv_page);
-               if (!dio->iter.count || dio->op.error)
+               if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
+                       bio_for_each_segment_all(bv, bio, iter)
+                               put_page(bv->bv_page);
+
+               if (dio->op.error) {
+                       set_bit(EI_INODE_ERROR, &inode->ei_flags);
+                       break;
+               }
+
+               if (!dio->iter.count)
                        break;
 
                bio_reset(bio);
                reinit_completion(&dio->done);
        }
 
-       ret = dio->op.error ?: ((long) dio->op.written << 9);
+       ret = dio->op.error ?: ((long) dio->written << 9);
 err:
+       up(&c->io_in_flight);
        bch2_pagecache_block_put(&inode->ei_pagecache_lock);
-       bch2_disk_reservation_put(c, &dio->op.res);
        bch2_quota_reservation_put(c, inode, &dio->quota_res);
 
        if (dio->free_iov)
                kfree(dio->iter.iov);
 
-       sync = dio->sync;
        bio_put(bio);
 
        /* inode->i_dio_count is our ref on inode and thus bch_fs */
@@ -1934,7 +1991,6 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
        struct address_space *mapping = file->f_mapping;
        struct bch_inode_info *inode = file_bch_inode(file);
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
-       struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
        struct dio_write *dio;
        struct bio *bio;
        bool locked = true, extending;
@@ -1972,7 +2028,9 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
        }
 
        bio = bio_alloc_bioset(GFP_KERNEL,
-                              iov_iter_npages(iter, BIO_MAX_PAGES),
+                              iov_iter_is_bvec(iter)
+                              ? 0
+                              : iov_iter_npages(iter, BIO_MAX_VECS),
                               &c->dio_write_bioset);
        dio = container_of(bio, struct dio_write, op.wbio.bio);
        init_completion(&dio->done);
@@ -1982,35 +2040,14 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
        dio->sync               = is_sync_kiocb(req) || extending;
        dio->free_iov           = false;
        dio->quota_res.sectors  = 0;
+       dio->written            = 0;
        dio->iter               = *iter;
 
-       bch2_write_op_init(&dio->op, c, opts);
-       dio->op.end_io          = bch2_dio_write_loop_async;
-       dio->op.target          = opts.foreground_target;
-       op_journal_seq_set(&dio->op, &inode->ei_journal_seq);
-       dio->op.write_point     = writepoint_hashed((unsigned long) current);
-       dio->op.flags |= BCH_WRITE_NOPUT_RESERVATION;
-
-       if ((req->ki_flags & IOCB_DSYNC) &&
-           !c->opts.journal_flush_disabled)
-               dio->op.flags |= BCH_WRITE_FLUSH;
-
        ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
                                         iter->count >> 9, true);
        if (unlikely(ret))
                goto err_put_bio;
 
-       dio->op.nr_replicas     = dio->op.opts.data_replicas;
-
-       ret = bch2_disk_reservation_get(c, &dio->op.res, iter->count >> 9,
-                                       dio->op.opts.data_replicas, 0);
-       if (unlikely(ret) &&
-           !bch2_check_range_allocated(c, POS(inode->v.i_ino,
-                                              req->ki_pos >> 9),
-                                       iter->count >> 9,
-                                       dio->op.opts.data_replicas))
-               goto err_put_bio;
-
        ret = write_invalidate_inode_pages_range(mapping,
                                        req->ki_pos,
                                        req->ki_pos + iter->count - 1);
@@ -2021,12 +2058,9 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
 err:
        if (locked)
                inode_unlock(&inode->v);
-       if (ret > 0)
-               req->ki_pos += ret;
        return ret;
 err_put_bio:
        bch2_pagecache_block_put(&inode->ei_pagecache_lock);
-       bch2_disk_reservation_put(c, &dio->op.res);
        bch2_quota_reservation_put(c, inode, &dio->quota_res);
        bio_put(bio);
        inode_dio_end(&inode->v);
@@ -2111,7 +2145,7 @@ static inline int range_has_data(struct bch_fs *c,
 
        bch2_trans_init(&trans, c, 0, 0);
 
-       for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) {
+       for_each_btree_key(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
                if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
                        break;
 
@@ -2120,6 +2154,7 @@ static inline int range_has_data(struct bch_fs *c,
                        break;
                }
        }
+       bch2_trans_iter_put(&trans, iter);
 
        return bch2_trans_exit(&trans) ?: ret;
 }
@@ -2199,6 +2234,12 @@ static int __bch2_truncate_page(struct bch_inode_info *inode,
        ret = bch2_get_page_disk_reservation(c, inode, page, false);
        BUG_ON(ret);
 
+       /*
+        * This removes any writeable userspace mappings; we need to force
+        * .page_mkwrite to be called again before any mmapped writes, to
+        * redirty the full page:
+        */
+       page_mkclean(page);
        __set_page_dirty_nobuffers(page);
 unlock:
        unlock_page(page);
@@ -2213,11 +2254,11 @@ static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
                                    from, round_up(from, PAGE_SIZE));
 }
 
-static int bch2_extend(struct bch_inode_info *inode,
+static int bch2_extend(struct user_namespace *mnt_userns,
+                      struct bch_inode_info *inode,
                       struct bch_inode_unpacked *inode_u,
                       struct iattr *iattr)
 {
-       struct bch_fs *c = inode->v.i_sb->s_fs_info;
        struct address_space *mapping = inode->v.i_mapping;
        int ret;
 
@@ -2231,24 +2272,15 @@ static int bch2_extend(struct bch_inode_info *inode,
                return ret;
 
        truncate_setsize(&inode->v, iattr->ia_size);
-       setattr_copy(&inode->v, iattr);
 
-       mutex_lock(&inode->ei_update_lock);
-       ret = bch2_write_inode_size(c, inode, inode->v.i_size,
-                                   ATTR_MTIME|ATTR_CTIME);
-       mutex_unlock(&inode->ei_update_lock);
-
-       return ret;
+       return bch2_setattr_nonsize(mnt_userns, inode, iattr);
 }
 
 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
                                   struct bch_inode_unpacked *bi,
                                   void *p)
 {
-       struct bch_fs *c = inode->v.i_sb->s_fs_info;
-
        bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
-       bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
        return 0;
 }
 
@@ -2262,7 +2294,8 @@ static int bch2_truncate_start_fn(struct bch_inode_info *inode,
        return 0;
 }
 
-int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
+int bch2_truncate(struct user_namespace *mnt_userns,
+                 struct bch_inode_info *inode, struct iattr *iattr)
 {
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
        struct address_space *mapping = inode->v.i_mapping;
@@ -2273,6 +2306,19 @@ int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
        s64 i_sectors_delta = 0;
        int ret = 0;
 
+       /*
+        * If the truncate call with change the size of the file, the
+        * cmtimes should be updated. If the size will not change, we
+        * do not need to update the cmtimes.
+        */
+       if (iattr->ia_size != inode->v.i_size) {
+               if (!(iattr->ia_valid & ATTR_MTIME))
+                       ktime_get_coarse_real_ts64(&iattr->ia_mtime);
+               if (!(iattr->ia_valid & ATTR_CTIME))
+                       ktime_get_coarse_real_ts64(&iattr->ia_ctime);
+               iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
+       }
+
        inode_dio_wait(&inode->v);
        bch2_pagecache_block_get(&inode->ei_pagecache_lock);
 
@@ -2283,18 +2329,31 @@ int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
        bch2_trans_init(&trans, c, 0, 0);
        iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino, 0);
        ret = PTR_ERR_OR_ZERO(iter);
+       bch2_trans_iter_put(&trans, iter);
        bch2_trans_exit(&trans);
 
        if (ret)
                goto err;
 
-       BUG_ON(inode->v.i_size < inode_u.bi_size);
+       /*
+        * check this before next assertion; on filesystem error our normal
+        * invariants are a bit broken (truncate has to truncate the page cache
+        * before the inode).
+        */
+       ret = bch2_journal_error(&c->journal);
+       if (ret)
+               goto err;
+
+       WARN_ON(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
+               inode->v.i_size < inode_u.bi_size);
 
        if (iattr->ia_size > inode->v.i_size) {
-               ret = bch2_extend(inode, &inode_u, iattr);
+               ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
                goto err;
        }
 
+       iattr->ia_valid &= ~ATTR_SIZE;
+
        ret = bch2_truncate_page(inode, iattr->ia_size);
        if (unlikely(ret))
                goto err;
@@ -2338,12 +2397,11 @@ int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
        if (unlikely(ret))
                goto err;
 
-       setattr_copy(&inode->v, iattr);
-
        mutex_lock(&inode->ei_update_lock);
-       ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL,
-                              ATTR_MTIME|ATTR_CTIME);
+       ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
        mutex_unlock(&inode->ei_update_lock);
+
+       ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
 err:
        bch2_pagecache_block_put(&inode->ei_pagecache_lock);
        return ret;
@@ -2351,6 +2409,15 @@ err:
 
 /* fallocate: */
 
+static int inode_update_times_fn(struct bch_inode_info *inode,
+                                struct bch_inode_unpacked *bi, void *p)
+{
+       struct bch_fs *c = inode->v.i_sb->s_fs_info;
+
+       bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
+       return 0;
+}
+
 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
 {
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
@@ -2388,6 +2455,11 @@ static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len
                                  &i_sectors_delta);
                i_sectors_acct(c, inode, NULL, i_sectors_delta);
        }
+
+       mutex_lock(&inode->ei_update_lock);
+       ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
+                              ATTR_MTIME|ATTR_CTIME) ?: ret;
+       mutex_unlock(&inode->ei_update_lock);
 err:
        bch2_pagecache_block_put(&inode->ei_pagecache_lock);
        inode_unlock(&inode->v);
@@ -2401,19 +2473,16 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
 {
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
        struct address_space *mapping = inode->v.i_mapping;
-       struct bkey_on_stack copy;
+       struct bkey_buf copy;
        struct btree_trans trans;
-       struct btree_iter *src, *dst, *del = NULL;
+       struct btree_iter *src, *dst, *del;
        loff_t shift, new_size;
        u64 src_start;
-       int ret;
+       int ret = 0;
 
        if ((offset | len) & (block_bytes(c) - 1))
                return -EINVAL;
 
-       bkey_on_stack_init(&copy);
-       bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
-
        /*
         * We need i_mutex to keep the page cache consistent with the extents
         * btree, and the btree consistent with i_size - we don't need outside
@@ -2469,15 +2538,15 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
                        goto err;
        }
 
-       src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+       bch2_bkey_buf_init(&copy);
+       bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
+       src = bch2_trans_get_iter(&trans, BTREE_ID_extents,
                        POS(inode->v.i_ino, src_start >> 9),
                        BTREE_ITER_INTENT);
-       BUG_ON(IS_ERR_OR_NULL(src));
-
        dst = bch2_trans_copy_iter(&trans, src);
-       BUG_ON(IS_ERR_OR_NULL(dst));
+       del = bch2_trans_copy_iter(&trans, src);
 
-       while (1) {
+       while (ret == 0 || ret == -EINTR) {
                struct disk_reservation disk_res =
                        bch2_disk_reservation_init(c, 0);
                struct bkey_i delete;
@@ -2485,40 +2554,33 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
                struct bpos next_pos;
                struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
                struct bpos atomic_end;
-               unsigned commit_flags = BTREE_INSERT_NOFAIL|
-                       BTREE_INSERT_ATOMIC|
-                       BTREE_INSERT_USE_RESERVE;
+               unsigned trigger_flags = 0;
 
                k = insert
                        ? bch2_btree_iter_peek_prev(src)
                        : bch2_btree_iter_peek(src);
                if ((ret = bkey_err(k)))
-                       goto bkey_err;
+                       continue;
 
                if (!k.k || k.k->p.inode != inode->v.i_ino)
                        break;
 
-               BUG_ON(bkey_cmp(src->pos, bkey_start_pos(k.k)));
-
                if (insert &&
                    bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
                        break;
 reassemble:
-               bkey_on_stack_realloc(&copy, c, k.k->u64s);
-               bkey_reassemble(copy.k, k);
+               bch2_bkey_buf_reassemble(&copy, c, k);
 
                if (insert &&
-                   bkey_cmp(bkey_start_pos(k.k), move_pos) < 0) {
+                   bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
                        bch2_cut_front(move_pos, copy.k);
-                       bch2_btree_iter_set_pos(src, bkey_start_pos(&copy.k->k));
-               }
 
                copy.k->k.p.offset += shift >> 9;
                bch2_btree_iter_set_pos(dst, bkey_start_pos(&copy.k->k));
 
                ret = bch2_extent_atomic_end(dst, copy.k, &atomic_end);
                if (ret)
-                       goto bkey_err;
+                       continue;
 
                if (bkey_cmp(atomic_end, copy.k->k.p)) {
                        if (insert) {
@@ -2531,43 +2593,19 @@ reassemble:
                }
 
                bkey_init(&delete.k);
-               delete.k.p = src->pos;
-               bch2_key_resize(&delete.k, copy.k->k.size);
+               delete.k.p = copy.k->k.p;
+               delete.k.size = copy.k->k.size;
+               delete.k.p.offset -= shift >> 9;
+               bch2_btree_iter_set_pos(del, bkey_start_pos(&delete.k));
 
                next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
 
-               /*
-                * If the new and old keys overlap (because we're moving an
-                * extent that's bigger than the amount we're collapsing by),
-                * we need to trim the delete key here so they don't overlap
-                * because overlaps on insertions aren't handled before
-                * triggers are run, so the overwrite will get double counted
-                * by the triggers machinery:
-                */
-               if (insert &&
-                   bkey_cmp(bkey_start_pos(&copy.k->k), delete.k.p) < 0) {
-                       bch2_cut_back(bkey_start_pos(&copy.k->k), &delete);
-               } else if (!insert &&
-                          bkey_cmp(copy.k->k.p,
-                                   bkey_start_pos(&delete.k)) > 0) {
-                       bch2_cut_front(copy.k->k.p, &delete);
-
-                       del = bch2_trans_copy_iter(&trans, src);
-                       BUG_ON(IS_ERR_OR_NULL(del));
-
-                       bch2_btree_iter_set_pos(del,
-                               bkey_start_pos(&delete.k));
-               }
-
-               bch2_trans_update(&trans, dst, copy.k);
-               bch2_trans_update(&trans, del ?: src, &delete);
-
                if (copy.k->k.size == k.k->size) {
                        /*
                         * If we're moving the entire extent, we can skip
                         * running triggers:
                         */
-                       commit_flags |= BTREE_INSERT_NOMARK;
+                       trigger_flags |= BTREE_TRIGGER_NORUN;
                } else {
                        /* We might end up splitting compressed extents: */
                        unsigned nr_ptrs =
@@ -2579,26 +2617,25 @@ reassemble:
                        BUG_ON(ret);
                }
 
-               ret = bch2_trans_commit(&trans, &disk_res,
-                                       &inode->ei_journal_seq,
-                                       commit_flags);
+               ret =   bch2_btree_iter_traverse(del) ?:
+                       bch2_trans_update(&trans, del, &delete, trigger_flags) ?:
+                       bch2_trans_update(&trans, dst, copy.k, trigger_flags) ?:
+                       bch2_trans_commit(&trans, &disk_res,
+                                         &inode->ei_journal_seq,
+                                         BTREE_INSERT_NOFAIL);
                bch2_disk_reservation_put(c, &disk_res);
-bkey_err:
-               if (del)
-                       bch2_trans_iter_put(&trans, del);
-               del = NULL;
 
                if (!ret)
                        bch2_btree_iter_set_pos(src, next_pos);
-
-               if (ret == -EINTR)
-                       ret = 0;
-               if (ret)
-                       goto err;
-
-               bch2_trans_cond_resched(&trans);
        }
-       bch2_trans_unlock(&trans);
+       bch2_trans_iter_put(&trans, del);
+       bch2_trans_iter_put(&trans, dst);
+       bch2_trans_iter_put(&trans, src);
+       bch2_trans_exit(&trans);
+       bch2_bkey_buf_exit(&copy, c);
+
+       if (ret)
+               goto err;
 
        if (!insert) {
                i_size_write(&inode->v, new_size);
@@ -2608,68 +2645,36 @@ bkey_err:
                mutex_unlock(&inode->ei_update_lock);
        }
 err:
-       bch2_trans_exit(&trans);
-       bkey_on_stack_exit(&copy, c);
        bch2_pagecache_block_put(&inode->ei_pagecache_lock);
        inode_unlock(&inode->v);
        return ret;
 }
 
-static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
-                           loff_t offset, loff_t len)
+static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
+                            u64 start_sector, u64 end_sector)
 {
-       struct address_space *mapping = inode->v.i_mapping;
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
        struct btree_trans trans;
        struct btree_iter *iter;
-       struct bpos end_pos;
-       loff_t end              = offset + len;
-       loff_t block_start      = round_down(offset,    block_bytes(c));
-       loff_t block_end        = round_up(end,         block_bytes(c));
-       unsigned sectors;
+       struct bpos end_pos = POS(inode->v.i_ino, end_sector);
        unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
-       int ret;
-
-       bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
-
-       inode_lock(&inode->v);
-       inode_dio_wait(&inode->v);
-       bch2_pagecache_block_get(&inode->ei_pagecache_lock);
-
-       if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
-               ret = inode_newsize_ok(&inode->v, end);
-               if (ret)
-                       goto err;
-       }
-
-       if (mode & FALLOC_FL_ZERO_RANGE) {
-               ret = __bch2_truncate_page(inode,
-                                          offset >> PAGE_SHIFT,
-                                          offset, end);
-
-               if (!ret &&
-                   offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
-                       ret = __bch2_truncate_page(inode,
-                                                  end >> PAGE_SHIFT,
-                                                  offset, end);
-
-               if (unlikely(ret))
-                       goto err;
+       int ret = 0;
 
-               truncate_pagecache_range(&inode->v, offset, end - 1);
-       }
+       bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
 
-       iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
-                       POS(inode->v.i_ino, block_start >> 9),
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
+                       POS(inode->v.i_ino, start_sector),
                        BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
-       end_pos = POS(inode->v.i_ino, block_end >> 9);
 
-       while (bkey_cmp(iter->pos, end_pos) < 0) {
+       while (!ret && bkey_cmp(iter->pos, end_pos) < 0) {
                s64 i_sectors_delta = 0;
                struct disk_reservation disk_res = { 0 };
                struct quota_res quota_res = { 0 };
                struct bkey_i_reservation reservation;
                struct bkey_s_c k;
+               unsigned sectors;
+
+               bch2_trans_begin(&trans);
 
                k = bch2_btree_iter_peek_slot(iter);
                if ((ret = bkey_err(k)))
@@ -2717,21 +2722,62 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
                        reservation.v.nr_replicas = disk_res.nr_replicas;
                }
 
-               bch2_trans_begin_updates(&trans);
-
                ret = bch2_extent_update(&trans, iter, &reservation.k_i,
                                &disk_res, &inode->ei_journal_seq,
-                               0, &i_sectors_delta);
+                               0, &i_sectors_delta, true);
                i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
 bkey_err:
                bch2_quota_reservation_put(c, inode, &quota_res);
                bch2_disk_reservation_put(c, &disk_res);
                if (ret == -EINTR)
                        ret = 0;
+       }
+       bch2_trans_iter_put(&trans, iter);
+       bch2_trans_exit(&trans);
+       return ret;
+}
+
+static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
+                           loff_t offset, loff_t len)
+{
+       struct address_space *mapping = inode->v.i_mapping;
+       struct bch_fs *c = inode->v.i_sb->s_fs_info;
+       loff_t end              = offset + len;
+       loff_t block_start      = round_down(offset,    block_bytes(c));
+       loff_t block_end        = round_up(end,         block_bytes(c));
+       int ret;
+
+       inode_lock(&inode->v);
+       inode_dio_wait(&inode->v);
+       bch2_pagecache_block_get(&inode->ei_pagecache_lock);
+
+       if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
+               ret = inode_newsize_ok(&inode->v, end);
                if (ret)
                        goto err;
        }
 
+       if (mode & FALLOC_FL_ZERO_RANGE) {
+               ret = __bch2_truncate_page(inode,
+                                          offset >> PAGE_SHIFT,
+                                          offset, end);
+
+               if (!ret &&
+                   offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
+                       ret = __bch2_truncate_page(inode,
+                                                  end >> PAGE_SHIFT,
+                                                  offset, end);
+
+               if (unlikely(ret))
+                       goto err;
+
+               truncate_pagecache_range(&inode->v, offset, end - 1);
+       }
+
+       ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
+       if (ret)
+               goto err;
+
        /*
         * Do we need to extend the file?
         *
@@ -2742,27 +2788,13 @@ bkey_err:
        if (end >= inode->v.i_size &&
            (!(mode & FALLOC_FL_KEEP_SIZE) ||
             (mode & FALLOC_FL_ZERO_RANGE))) {
-               struct btree_iter *inode_iter;
-               struct bch_inode_unpacked inode_u;
-
-               do {
-                       bch2_trans_begin(&trans);
-                       inode_iter = bch2_inode_peek(&trans, &inode_u,
-                                                    inode->v.i_ino, 0);
-                       ret = PTR_ERR_OR_ZERO(inode_iter);
-               } while (ret == -EINTR);
-
-               bch2_trans_unlock(&trans);
-
-               if (ret)
-                       goto err;
 
                /*
                 * Sync existing appends before extending i_size,
                 * as in bch2_extend():
                 */
                ret = filemap_write_and_wait_range(mapping,
-                                       inode_u.bi_size, S64_MAX);
+                                       inode->ei_inode.bi_size, S64_MAX);
                if (ret)
                        goto err;
 
@@ -2776,7 +2808,6 @@ bkey_err:
                mutex_unlock(&inode->ei_update_lock);
        }
 err:
-       bch2_trans_exit(&trans);
        bch2_pagecache_block_put(&inode->ei_pagecache_lock);
        inode_unlock(&inode->v);
        return ret;
@@ -2913,6 +2944,11 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
        if (pos_dst + ret > dst->v.i_size)
                i_size_write(&dst->v, pos_dst + ret);
        spin_unlock(&dst->v.i_lock);
+
+       if (((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
+            IS_SYNC(file_inode(file_dst))) &&
+           !c->opts.journal_flush_disabled)
+               ret = bch2_journal_flush_seq(&c->journal, dst->ei_journal_seq);
 err:
        bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
 
@@ -2989,7 +3025,7 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
 
        bch2_trans_init(&trans, c, 0, 0);
 
-       for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
+       for_each_btree_key(&trans, iter, BTREE_ID_extents,
                           POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
                if (k.k->p.inode != inode->v.i_ino) {
                        break;
@@ -2999,6 +3035,7 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
                } else if (k.k->p.offset >> 9 > isize)
                        break;
        }
+       bch2_trans_iter_put(&trans, iter);
 
        ret = bch2_trans_exit(&trans) ?: ret;
        if (ret)
@@ -3036,8 +3073,8 @@ static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
        int pg_offset;
        loff_t ret = -1;
 
-       page = find_lock_entry(mapping, index);
-       if (!page || xa_is_value(page))
+       page = find_lock_page(mapping, index);
+       if (!page)
                return offset;
 
        pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
@@ -3084,7 +3121,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
 
        bch2_trans_init(&trans, c, 0, 0);
 
-       for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
+       for_each_btree_key(&trans, iter, BTREE_ID_extents,
                           POS(inode->v.i_ino, offset >> 9),
                           BTREE_ITER_SLOTS, k, ret) {
                if (k.k->p.inode != inode->v.i_ino) {
@@ -3102,6 +3139,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
                        offset = max(offset, bkey_start_offset(k.k) << 9);
                }
        }
+       bch2_trans_iter_put(&trans, iter);
 
        ret = bch2_trans_exit(&trans) ?: ret;
        if (ret)