]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/fs-io.c
Update bcachefs sources to 3e93567c51 bcachefs: Switch to local_clock() for fastpath...
[bcachefs-tools-debian] / libbcachefs / fs-io.c
index 8e6f7300e9b61421931230b73250a4aab8dd40d1..02ef3430a30b69f4d321d162f3b2d52b52fedfb4 100644 (file)
@@ -3,7 +3,7 @@
 
 #include "bcachefs.h"
 #include "alloc_foreground.h"
-#include "bkey_on_stack.h"
+#include "bkey_buf.h"
 #include "btree_update.h"
 #include "buckets.h"
 #include "clock.h"
 #include <trace/events/bcachefs.h>
 #include <trace/events/writeback.h>
 
+static inline bool bio_full(struct bio *bio, unsigned len)
+{
+       if (bio->bi_vcnt >= bio->bi_max_vecs)
+               return true;
+       if (bio->bi_iter.bi_size > UINT_MAX - len)
+               return true;
+       return false;
+}
+
 static inline struct address_space *faults_disabled_mapping(void)
 {
        return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
@@ -84,6 +93,7 @@ struct dio_read {
        struct closure                  cl;
        struct kiocb                    *req;
        long                            ret;
+       bool                            should_dirty;
        struct bch_read_bio             rbio;
 };
 
@@ -98,8 +108,7 @@ static int write_invalidate_inode_pages_range(struct address_space *mapping,
         * is continually redirtying a specific page
         */
        do {
-               if (!mapping->nrpages &&
-                   !mapping->nrexceptional)
+               if (!mapping->nrpages)
                        return 0;
 
                ret = filemap_write_and_wait_range(mapping, start, end);
@@ -142,7 +151,7 @@ static void bch2_quota_reservation_put(struct bch_fs *c,
 static int bch2_quota_reservation_add(struct bch_fs *c,
                                      struct bch_inode_info *inode,
                                      struct quota_res *res,
-                                     unsigned sectors,
+                                     u64 sectors,
                                      bool check_enospc)
 {
        int ret;
@@ -223,6 +232,12 @@ static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
                return;
 
        mutex_lock(&inode->ei_quota_lock);
+       bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
+                               "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
+                               inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
+                               inode->ei_inode.bi_sectors);
+       inode->v.i_blocks += sectors;
+
 #ifdef CONFIG_BCACHEFS_QUOTA
        if (quota_res && sectors > 0) {
                BUG_ON(sectors > quota_res->sectors);
@@ -234,7 +249,6 @@ static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
                bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
        }
 #endif
-       inode->v.i_blocks += sectors;
        mutex_unlock(&inode->ei_quota_lock);
 }
 
@@ -243,24 +257,26 @@ static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
 /* stored in page->private: */
 
 struct bch_page_sector {
-       /* Uncompressed, fully allocated replicas: */
-       unsigned                nr_replicas:3;
+       /* Uncompressed, fully allocated replicas (or on disk reservation): */
+       unsigned                nr_replicas:4;
 
-       /* Owns PAGE_SECTORS * replicas_reserved sized reservation: */
-       unsigned                replicas_reserved:3;
+       /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
+       unsigned                replicas_reserved:4;
 
        /* i_sectors: */
        enum {
                SECTOR_UNALLOCATED,
                SECTOR_RESERVED,
                SECTOR_DIRTY,
+               SECTOR_DIRTY_RESERVED,
                SECTOR_ALLOCATED,
-       }                       state:2;
+       }                       state:8;
 };
 
 struct bch_page_state {
        spinlock_t              lock;
        atomic_t                write_count;
+       bool                    uptodate;
        struct bch_page_sector  s[PAGE_SECTORS];
 };
 
@@ -311,6 +327,210 @@ static struct bch_page_state *bch2_page_state_create(struct page *page,
        return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
 }
 
+static unsigned bkey_to_sector_state(const struct bkey *k)
+{
+       if (k->type == KEY_TYPE_reservation)
+               return SECTOR_RESERVED;
+       if (bkey_extent_is_allocation(k))
+               return SECTOR_ALLOCATED;
+       return SECTOR_UNALLOCATED;
+}
+
+static void __bch2_page_state_set(struct page *page,
+                                 unsigned pg_offset, unsigned pg_len,
+                                 unsigned nr_ptrs, unsigned state)
+{
+       struct bch_page_state *s = bch2_page_state_create(page, __GFP_NOFAIL);
+       unsigned i;
+
+       BUG_ON(pg_offset >= PAGE_SECTORS);
+       BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
+
+       spin_lock(&s->lock);
+
+       for (i = pg_offset; i < pg_offset + pg_len; i++) {
+               s->s[i].nr_replicas = nr_ptrs;
+               s->s[i].state = state;
+       }
+
+       if (i == PAGE_SECTORS)
+               s->uptodate = true;
+
+       spin_unlock(&s->lock);
+}
+
+static int bch2_page_state_set(struct bch_fs *c, subvol_inum inum,
+                              struct page **pages, unsigned nr_pages)
+{
+       struct btree_trans trans;
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       u64 offset = pages[0]->index << PAGE_SECTORS_SHIFT;
+       unsigned pg_idx = 0;
+       u32 snapshot;
+       int ret;
+
+       bch2_trans_init(&trans, c, 0, 0);
+retry:
+       bch2_trans_begin(&trans);
+
+       ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
+       if (ret)
+               goto err;
+
+       for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
+                          SPOS(inum.inum, offset, snapshot),
+                          BTREE_ITER_SLOTS, k, ret) {
+               unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
+               unsigned state = bkey_to_sector_state(k.k);
+
+               while (pg_idx < nr_pages) {
+                       struct page *page = pages[pg_idx];
+                       u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
+                       u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
+                       unsigned pg_offset = max(bkey_start_offset(k.k), pg_start) - pg_start;
+                       unsigned pg_len = min(k.k->p.offset, pg_end) - pg_offset - pg_start;
+
+                       BUG_ON(k.k->p.offset < pg_start);
+                       BUG_ON(bkey_start_offset(k.k) > pg_end);
+
+                       if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate)
+                               __bch2_page_state_set(page, pg_offset, pg_len, nr_ptrs, state);
+
+                       if (k.k->p.offset < pg_end)
+                               break;
+                       pg_idx++;
+               }
+
+               if (pg_idx == nr_pages)
+                       break;
+       }
+
+       offset = iter.pos.offset;
+       bch2_trans_iter_exit(&trans, &iter);
+err:
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+               goto retry;
+       bch2_trans_exit(&trans);
+
+       return ret;
+}
+
+static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
+{
+       struct bvec_iter iter;
+       struct bio_vec bv;
+       unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
+               ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
+       unsigned state = bkey_to_sector_state(k.k);
+
+       bio_for_each_segment(bv, bio, iter)
+               __bch2_page_state_set(bv.bv_page, bv.bv_offset >> 9,
+                                     bv.bv_len >> 9, nr_ptrs, state);
+}
+
+static void mark_pagecache_unallocated(struct bch_inode_info *inode,
+                                      u64 start, u64 end)
+{
+       pgoff_t index = start >> PAGE_SECTORS_SHIFT;
+       pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
+       struct folio_batch fbatch;
+       unsigned i, j;
+
+       if (end <= start)
+               return;
+
+       folio_batch_init(&fbatch);
+
+       while (filemap_get_folios(inode->v.i_mapping,
+                                 &index, end_index, &fbatch)) {
+               for (i = 0; i < folio_batch_count(&fbatch); i++) {
+                       struct folio *folio = fbatch.folios[i];
+                       u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
+                       u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
+                       unsigned pg_offset = max(start, pg_start) - pg_start;
+                       unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
+                       struct bch_page_state *s;
+
+                       BUG_ON(end <= pg_start);
+                       BUG_ON(pg_offset >= PAGE_SECTORS);
+                       BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
+
+                       folio_lock(folio);
+                       s = bch2_page_state(&folio->page);
+
+                       if (s) {
+                               spin_lock(&s->lock);
+                               for (j = pg_offset; j < pg_offset + pg_len; j++)
+                                       s->s[j].nr_replicas = 0;
+                               spin_unlock(&s->lock);
+                       }
+
+                       folio_unlock(folio);
+               }
+               folio_batch_release(&fbatch);
+               cond_resched();
+       }
+}
+
+static void mark_pagecache_reserved(struct bch_inode_info *inode,
+                                   u64 start, u64 end)
+{
+       struct bch_fs *c = inode->v.i_sb->s_fs_info;
+       pgoff_t index = start >> PAGE_SECTORS_SHIFT;
+       pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
+       struct folio_batch fbatch;
+       s64 i_sectors_delta = 0;
+       unsigned i, j;
+
+       if (end <= start)
+               return;
+
+       folio_batch_init(&fbatch);
+
+       while (filemap_get_folios(inode->v.i_mapping,
+                                 &index, end_index, &fbatch)) {
+               for (i = 0; i < folio_batch_count(&fbatch); i++) {
+                       struct folio *folio = fbatch.folios[i];
+                       u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
+                       u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
+                       unsigned pg_offset = max(start, pg_start) - pg_start;
+                       unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
+                       struct bch_page_state *s;
+
+                       BUG_ON(end <= pg_start);
+                       BUG_ON(pg_offset >= PAGE_SECTORS);
+                       BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
+
+                       folio_lock(folio);
+                       s = bch2_page_state(&folio->page);
+
+                       if (s) {
+                               spin_lock(&s->lock);
+                               for (j = pg_offset; j < pg_offset + pg_len; j++)
+                                       switch (s->s[j].state) {
+                                       case SECTOR_UNALLOCATED:
+                                               s->s[j].state = SECTOR_RESERVED;
+                                               break;
+                                       case SECTOR_DIRTY:
+                                               s->s[j].state = SECTOR_DIRTY_RESERVED;
+                                               i_sectors_delta--;
+                                               break;
+                                       default:
+                                               break;
+                                       }
+                               spin_unlock(&s->lock);
+                       }
+
+                       folio_unlock(folio);
+               }
+               folio_batch_release(&fbatch);
+               cond_resched();
+       }
+
+       i_sectors_acct(c, inode, NULL, i_sectors_delta);
+}
+
 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
 {
        /* XXX: this should not be open coded */
@@ -395,6 +615,8 @@ static int bch2_page_reservation_get(struct bch_fs *c,
        if (!s)
                return -ENOMEM;
 
+       BUG_ON(!s->uptodate);
+
        for (i = round_down(offset, block_bytes(c)) >> 9;
             i < round_up(offset + len, block_bytes(c)) >> 9;
             i++) {
@@ -449,16 +671,22 @@ static void bch2_clear_page_bits(struct page *page)
                disk_res.sectors += s->s[i].replicas_reserved;
                s->s[i].replicas_reserved = 0;
 
-               if (s->s[i].state == SECTOR_DIRTY) {
-                       dirty_sectors++;
+               switch (s->s[i].state) {
+               case SECTOR_DIRTY:
                        s->s[i].state = SECTOR_UNALLOCATED;
+                       --dirty_sectors;
+                       break;
+               case SECTOR_DIRTY_RESERVED:
+                       s->s[i].state = SECTOR_RESERVED;
+                       break;
+               default:
+                       break;
                }
        }
 
        bch2_disk_reservation_put(c, &disk_res);
 
-       if (dirty_sectors)
-               i_sectors_acct(c, inode, NULL, -dirty_sectors);
+       i_sectors_acct(c, inode, NULL, dirty_sectors);
 
        bch2_page_state_release(page);
 }
@@ -491,16 +719,22 @@ static void bch2_set_page_dirty(struct bch_fs *c,
                s->s[i].replicas_reserved += sectors;
                res->disk.sectors -= sectors;
 
-               if (s->s[i].state == SECTOR_UNALLOCATED)
+               switch (s->s[i].state) {
+               case SECTOR_UNALLOCATED:
+                       s->s[i].state = SECTOR_DIRTY;
                        dirty_sectors++;
-
-               s->s[i].state = max_t(unsigned, s->s[i].state, SECTOR_DIRTY);
+                       break;
+               case SECTOR_RESERVED:
+                       s->s[i].state = SECTOR_DIRTY_RESERVED;
+                       break;
+               default:
+                       break;
+               }
        }
 
        spin_unlock(&s->lock);
 
-       if (dirty_sectors)
-               i_sectors_acct(c, inode, &res->quota, dirty_sectors);
+       i_sectors_acct(c, inode, &res->quota, dirty_sectors);
 
        if (!PageDirty(page))
                __set_page_dirty_nobuffers(page);
@@ -554,7 +788,7 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
        struct bch2_page_reservation res;
        unsigned len;
        loff_t isize;
-       int ret = VM_FAULT_LOCKED;
+       int ret;
 
        bch2_page_reservation_init(c, inode, &res);
 
@@ -580,6 +814,14 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
 
        len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
 
+       if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
+               if (bch2_page_state_set(c, inode_inum(inode), &page, 1)) {
+                       unlock_page(page);
+                       ret = VM_FAULT_SIGBUS;
+                       goto out;
+               }
+       }
+
        if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
                unlock_page(page);
                ret = VM_FAULT_SIGBUS;
@@ -590,6 +832,7 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
        bch2_page_reservation_put(c, inode, &res);
 
        wait_for_stable_page(page);
+       ret = VM_FAULT_LOCKED;
 out:
        bch2_pagecache_add_put(&inode->ei_pagecache_lock);
        sb_end_pagefault(inode->v.i_sb);
@@ -597,47 +840,22 @@ out:
        return ret;
 }
 
-void bch2_invalidatepage(struct page *page, unsigned int offset,
-                        unsigned int length)
+void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
 {
-       if (offset || length < PAGE_SIZE)
+       if (offset || length < folio_size(folio))
                return;
 
-       bch2_clear_page_bits(page);
+       bch2_clear_page_bits(&folio->page);
 }
 
-int bch2_releasepage(struct page *page, gfp_t gfp_mask)
+bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
 {
-       if (PageDirty(page))
-               return 0;
-
-       bch2_clear_page_bits(page);
-       return 1;
-}
-
-#ifdef CONFIG_MIGRATION
-int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
-                     struct page *page, enum migrate_mode mode)
-{
-       int ret;
-
-       EBUG_ON(!PageLocked(page));
-       EBUG_ON(!PageLocked(newpage));
+       if (folio_test_dirty(folio) || folio_test_writeback(folio))
+               return false;
 
-       ret = migrate_page_move_mapping(mapping, newpage, page, 0);
-       if (ret != MIGRATEPAGE_SUCCESS)
-               return ret;
-
-       if (PagePrivate(page))
-               attach_page_private(newpage, detach_page_private(page));
-
-       if (mode != MIGRATE_SYNC_NO_COPY)
-               migrate_page_copy(newpage, page);
-       else
-               migrate_page_states(newpage, page);
-       return MIGRATEPAGE_SUCCESS;
+       bch2_clear_page_bits(&folio->page);
+       return true;
 }
-#endif
 
 /* readpage(s): */
 
@@ -703,29 +921,6 @@ static inline struct page *readpage_iter_next(struct readpages_iter *iter)
        return iter->pages[iter->idx];
 }
 
-static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
-{
-       struct bvec_iter iter;
-       struct bio_vec bv;
-       unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
-               ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
-       unsigned state = k.k->type == KEY_TYPE_reservation
-               ? SECTOR_RESERVED
-               : SECTOR_ALLOCATED;
-
-       bio_for_each_segment(bv, bio, iter) {
-               struct bch_page_state *s = bch2_page_state(bv.bv_page);
-               unsigned i;
-
-               for (i = bv.bv_offset >> 9;
-                    i < (bv.bv_offset + bv.bv_len) >> 9;
-                    i++) {
-                       s->s[i].nr_replicas = nr_ptrs;
-                       s->s[i].state = state;
-               }
-       }
-}
-
 static bool extent_partial_reads_expensive(struct bkey_s_c k)
 {
        struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
@@ -745,7 +940,7 @@ static void readpage_bio_extend(struct readpages_iter *iter,
 {
        while (bio_sectors(bio) < sectors_this_extent &&
               bio->bi_vcnt < bio->bi_max_vecs) {
-               pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
+               pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
                struct page *page = readpage_iter_next(iter);
                int ret;
 
@@ -786,40 +981,63 @@ static void readpage_bio_extend(struct readpages_iter *iter,
        }
 }
 
-static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
-                      struct bch_read_bio *rbio, u64 inum,
+static void bchfs_read(struct btree_trans *trans,
+                      struct bch_read_bio *rbio,
+                      subvol_inum inum,
                       struct readpages_iter *readpages_iter)
 {
        struct bch_fs *c = trans->c;
-       struct bkey_on_stack sk;
+       struct btree_iter iter;
+       struct bkey_buf sk;
        int flags = BCH_READ_RETRY_IF_STALE|
                BCH_READ_MAY_PROMOTE;
+       u32 snapshot;
        int ret = 0;
 
        rbio->c = c;
        rbio->start_time = local_clock();
+       rbio->subvol = inum.subvol;
 
-       bkey_on_stack_init(&sk);
+       bch2_bkey_buf_init(&sk);
 retry:
+       bch2_trans_begin(trans);
+       iter = (struct btree_iter) { NULL };
+
+       ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+       if (ret)
+               goto err;
+
+       bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+                            SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
+                            BTREE_ITER_SLOTS);
        while (1) {
                struct bkey_s_c k;
                unsigned bytes, sectors, offset_into_extent;
+               enum btree_id data_btree = BTREE_ID_extents;
+
+               /*
+                * read_extent -> io_time_reset may cause a transaction restart
+                * without returning an error, we need to check for that here:
+                */
+               ret = bch2_trans_relock(trans);
+               if (ret)
+                       break;
 
-               bch2_btree_iter_set_pos(iter,
-                               POS(inum, rbio->bio.bi_iter.bi_sector));
+               bch2_btree_iter_set_pos(&iter,
+                               POS(inum.inum, rbio->bio.bi_iter.bi_sector));
 
-               k = bch2_btree_iter_peek_slot(iter);
+               k = bch2_btree_iter_peek_slot(&iter);
                ret = bkey_err(k);
                if (ret)
                        break;
 
-               offset_into_extent = iter->pos.offset -
+               offset_into_extent = iter.pos.offset -
                        bkey_start_offset(k.k);
                sectors = k.k->size - offset_into_extent;
 
-               bkey_on_stack_reassemble(&sk, c, k);
+               bch2_bkey_buf_reassemble(&sk, c, k);
 
-               ret = bch2_read_indirect_extent(trans,
+               ret = bch2_read_indirect_extent(trans, &data_btree,
                                        &offset_into_extent, &sk);
                if (ret)
                        break;
@@ -828,8 +1046,6 @@ retry:
 
                sectors = min(sectors, k.k->size - offset_into_extent);
 
-               bch2_trans_unlock(trans);
-
                if (readpages_iter)
                        readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
                                            extent_partial_reads_expensive(k));
@@ -840,27 +1056,35 @@ retry:
                if (rbio->bio.bi_iter.bi_size == bytes)
                        flags |= BCH_READ_LAST_FRAGMENT;
 
-               if (bkey_extent_is_allocation(k.k))
-                       bch2_add_page_sectors(&rbio->bio, k);
+               bch2_bio_page_state_set(&rbio->bio, k);
 
-               bch2_read_extent(trans, rbio, k, offset_into_extent, flags);
+               bch2_read_extent(trans, rbio, iter.pos,
+                                data_btree, k, offset_into_extent, flags);
 
                if (flags & BCH_READ_LAST_FRAGMENT)
                        break;
 
                swap(rbio->bio.bi_iter.bi_size, bytes);
                bio_advance(&rbio->bio, bytes);
+
+               ret = btree_trans_too_many_iters(trans);
+               if (ret)
+                       break;
        }
+err:
+       bch2_trans_iter_exit(trans, &iter);
 
-       if (ret == -EINTR)
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
                goto retry;
 
        if (ret) {
-               bcache_io_error(c, &rbio->bio, "btree IO error %i", ret);
+               bch_err_inum_ratelimited(c, inum.inum,
+                               "read error %i from btree lookup", ret);
+               rbio->bio.bi_status = BLK_STS_IOERR;
                bio_endio(&rbio->bio);
        }
 
-       bkey_on_stack_exit(&sk, c);
+       bch2_bkey_buf_exit(&sk, c);
 }
 
 void bch2_readahead(struct readahead_control *ractl)
@@ -869,7 +1093,6 @@ void bch2_readahead(struct readahead_control *ractl)
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
        struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
        struct btree_trans trans;
-       struct btree_iter *iter;
        struct page *page;
        struct readpages_iter readpages_iter;
        int ret;
@@ -879,9 +1102,6 @@ void bch2_readahead(struct readahead_control *ractl)
 
        bch2_trans_init(&trans, c, 0, 0);
 
-       iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
-                                  BTREE_ITER_SLOTS);
-
        bch2_pagecache_add_get(&inode->ei_pagecache_lock);
 
        while ((page = readpage_iter_next(&readpages_iter))) {
@@ -889,19 +1109,19 @@ void bch2_readahead(struct readahead_control *ractl)
                unsigned n = min_t(unsigned,
                                   readpages_iter.nr_pages -
                                   readpages_iter.idx,
-                                  BIO_MAX_PAGES);
+                                  BIO_MAX_VECS);
                struct bch_read_bio *rbio =
-                       rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
+                       rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
+                                                  GFP_NOFS, &c->bio_read),
                                  opts);
 
                readpages_iter.idx++;
 
-               bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
-               rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTOR_SHIFT;
+               rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTORS_SHIFT;
                rbio->bio.bi_end_io = bch2_readpages_end_io;
                BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
 
-               bchfs_read(&trans, iter, rbio, inode->v.i_ino,
+               bchfs_read(&trans, rbio, inode_inum(inode),
                           &readpages_iter);
        }
 
@@ -912,41 +1132,22 @@ void bch2_readahead(struct readahead_control *ractl)
 }
 
 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
-                            u64 inum, struct page *page)
+                            subvol_inum inum, struct page *page)
 {
        struct btree_trans trans;
-       struct btree_iter *iter;
 
        bch2_page_state_create(page, __GFP_NOFAIL);
 
        bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
        rbio->bio.bi_iter.bi_sector =
-               (sector_t) page->index << PAGE_SECTOR_SHIFT;
+               (sector_t) page->index << PAGE_SECTORS_SHIFT;
        BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
 
        bch2_trans_init(&trans, c, 0, 0);
-       iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
-                                  BTREE_ITER_SLOTS);
-
-       bchfs_read(&trans, iter, rbio, inum, NULL);
-
+       bchfs_read(&trans, rbio, inum, NULL);
        bch2_trans_exit(&trans);
 }
 
-int bch2_readpage(struct file *file, struct page *page)
-{
-       struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
-       struct bch_fs *c = inode->v.i_sb->s_fs_info;
-       struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
-       struct bch_read_bio *rbio;
-
-       rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
-       rbio->bio.bi_end_io = bch2_readpages_end_io;
-
-       __bchfs_readpage(c, rbio, inode->v.i_ino, page);
-       return 0;
-}
-
 static void bch2_read_single_page_end_io(struct bio *bio)
 {
        complete(bio->bi_private);
@@ -961,12 +1162,12 @@ static int bch2_read_single_page(struct page *page,
        int ret;
        DECLARE_COMPLETION_ONSTACK(done);
 
-       rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
+       rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
                         io_opts(c, &inode->ei_inode));
        rbio->bio.bi_private = &done;
        rbio->bio.bi_end_io = bch2_read_single_page_end_io;
 
-       __bchfs_readpage(c, rbio, inode->v.i_ino, page);
+       __bchfs_readpage(c, rbio, inode_inum(inode), page);
        wait_for_completion(&done);
 
        ret = blk_status_to_errno(rbio->bio.bi_status);
@@ -979,6 +1180,16 @@ static int bch2_read_single_page(struct page *page,
        return 0;
 }
 
+int bch2_read_folio(struct file *file, struct folio *folio)
+{
+       struct page *page = &folio->page;
+       int ret;
+
+       ret = bch2_read_single_page(page, page->mapping);
+       folio_unlock(folio);
+       return bch2_err_class(ret);
+}
+
 /* writepages: */
 
 struct bch_writepage_state {
@@ -1013,6 +1224,8 @@ static void bch2_writepage_io_done(struct closure *cl)
        unsigned i;
 
        if (io->op.error) {
+               set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
+
                bio_for_each_segment_all(bvec, bio, iter) {
                        struct bch_page_state *s;
 
@@ -1043,7 +1256,7 @@ static void bch2_writepage_io_done(struct closure *cl)
         * racing with fallocate can cause us to add fewer sectors than
         * expected - but we shouldn't add more sectors than expected:
         */
-       BUG_ON(io->op.i_sectors_delta > 0);
+       WARN_ON_ONCE(io->op.i_sectors_delta > 0);
 
        /*
         * (error (due to going RO) halfway through a page can screw that up
@@ -1090,8 +1303,9 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
 {
        struct bch_write_op *op;
 
-       w->io = container_of(bio_alloc_bioset(GFP_NOFS,
-                                             BIO_MAX_PAGES,
+       w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
+                                             REQ_OP_WRITE,
+                                             GFP_NOFS,
                                              &c->writepage_bioset),
                             struct bch_writepage_io, op.wbio.bio);
 
@@ -1101,10 +1315,10 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
        op                      = &w->io->op;
        bch2_write_op_init(op, c, w->opts);
        op->target              = w->opts.foreground_target;
-       op_journal_seq_set(op, &inode->ei_journal_seq);
        op->nr_replicas         = nr_replicas;
        op->res.nr_replicas     = nr_replicas;
        op->write_point         = writepoint_hashed(inode->ei_last_dirtied);
+       op->subvol              = inode->ei_subvol;
        op->pos                 = POS(inode->v.i_ino, sector);
        op->wbio.bio.bi_iter.bi_sector = sector;
        op->wbio.bio.bi_opf     = wbc_to_write_flags(wbc);
@@ -1147,16 +1361,16 @@ static int __bch2_writepage(struct page *page,
 do_io:
        s = bch2_page_state_create(page, __GFP_NOFAIL);
 
-       ret = bch2_get_page_disk_reservation(c, inode, page, true);
-       if (ret) {
-               SetPageError(page);
-               mapping_set_error(page->mapping, ret);
-               unlock_page(page);
-               return 0;
-       }
+       /*
+        * Things get really hairy with errors during writeback:
+        */
+       ret = bch2_get_page_disk_reservation(c, inode, page, false);
+       BUG_ON(ret);
 
        /* Before unlocking the page, get copy of reservations: */
+       spin_lock(&s->lock);
        orig = *s;
+       spin_unlock(&s->lock);
 
        for (i = 0; i < PAGE_SECTORS; i++) {
                if (s->s[i].state < SECTOR_DIRTY)
@@ -1189,7 +1403,7 @@ do_io:
 
        offset = 0;
        while (1) {
-               unsigned sectors = 1, dirty_sectors = 0, reserved_sectors = 0;
+               unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
                u64 sector;
 
                while (offset < PAGE_SECTORS &&
@@ -1199,22 +1413,21 @@ do_io:
                if (offset == PAGE_SECTORS)
                        break;
 
-               sector = ((u64) page->index << PAGE_SECTOR_SHIFT) + offset;
-
                while (offset + sectors < PAGE_SECTORS &&
-                      orig.s[offset + sectors].state >= SECTOR_DIRTY)
+                      orig.s[offset + sectors].state >= SECTOR_DIRTY) {
+                       reserved_sectors += orig.s[offset + sectors].replicas_reserved;
+                       dirty_sectors += orig.s[offset + sectors].state == SECTOR_DIRTY;
                        sectors++;
-
-               for (i = offset; i < offset + sectors; i++) {
-                       reserved_sectors += orig.s[i].replicas_reserved;
-                       dirty_sectors += orig.s[i].state == SECTOR_DIRTY;
                }
+               BUG_ON(!sectors);
+
+               sector = ((u64) page->index << PAGE_SECTORS_SHIFT) + offset;
 
                if (w->io &&
                    (w->io->op.res.nr_replicas != nr_replicas_this_write ||
                     bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
                     w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
-                    (BIO_MAX_PAGES * PAGE_SIZE) ||
+                    (BIO_MAX_VECS * PAGE_SIZE) ||
                     bio_end_sector(&w->io->op.wbio.bio) != sector))
                        bch2_writepage_do_io(w);
 
@@ -1229,8 +1442,8 @@ do_io:
                                     sectors << 9, offset << 9));
 
                /* Check for writing past i_size: */
-               WARN_ON((bio_end_sector(&w->io->op.wbio.bio) << 9) >
-                       round_up(i_size, block_bytes(c)));
+               WARN_ON_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
+                            round_up(i_size, block_bytes(c)));
 
                w->io->op.res.sectors += reserved_sectors;
                w->io->op.i_sectors_delta -= dirty_sectors;
@@ -1258,27 +1471,13 @@ int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc
        if (w.io)
                bch2_writepage_do_io(&w);
        blk_finish_plug(&plug);
-       return ret;
-}
-
-int bch2_writepage(struct page *page, struct writeback_control *wbc)
-{
-       struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
-       struct bch_writepage_state w =
-               bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
-       int ret;
-
-       ret = __bch2_writepage(page, wbc, &w);
-       if (w.io)
-               bch2_writepage_do_io(&w);
-
-       return ret;
+       return bch2_err_class(ret);
 }
 
 /* buffered writes: */
 
 int bch2_write_begin(struct file *file, struct address_space *mapping,
-                    loff_t pos, unsigned len, unsigned flags,
+                    loff_t pos, unsigned len,
                     struct page **pagep, void **fsdata)
 {
        struct bch_inode_info *inode = to_bch_ei(mapping->host);
@@ -1298,7 +1497,7 @@ int bch2_write_begin(struct file *file, struct address_space *mapping,
 
        bch2_pagecache_add_get(&inode->ei_pagecache_lock);
 
-       page = grab_cache_page_write_begin(mapping, index, flags);
+       page = grab_cache_page_write_begin(mapping, index);
        if (!page)
                goto err_unlock;
 
@@ -1325,6 +1524,12 @@ readpage:
        if (ret)
                goto err;
 out:
+       if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
+               ret = bch2_page_state_set(c, inode_inum(inode), &page, 1);
+               if (ret)
+                       goto err;
+       }
+
        ret = bch2_page_reservation_get(c, inode, page, res,
                                        offset, len, true);
        if (ret) {
@@ -1351,7 +1556,7 @@ err_unlock:
        bch2_pagecache_add_put(&inode->ei_pagecache_lock);
        kfree(res);
        *fsdata = NULL;
-       return ret;
+       return bch2_err_class(ret);
 }
 
 int bch2_write_end(struct file *file, struct address_space *mapping,
@@ -1423,7 +1628,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
        bch2_page_reservation_init(c, inode, &res);
 
        for (i = 0; i < nr_pages; i++) {
-               pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
+               pages[i] = grab_cache_page_write_begin(mapping, index + i);
                if (!pages[i]) {
                        nr_pages = i;
                        if (!i) {
@@ -1454,20 +1659,21 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
        }
 
        while (reserved < len) {
-               struct page *page = pages[(offset + reserved) >> PAGE_SHIFT];
+               unsigned i = (offset + reserved) >> PAGE_SHIFT;
+               struct page *page = pages[i];
                unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
                unsigned pg_len = min_t(unsigned, len - reserved,
                                        PAGE_SIZE - pg_offset);
-retry_reservation:
-               ret = bch2_page_reservation_get(c, inode, page, &res,
-                                               pg_offset, pg_len, true);
 
-               if (ret && !PageUptodate(page)) {
-                       ret = bch2_read_single_page(page, mapping);
-                       if (!ret)
-                               goto retry_reservation;
+               if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
+                       ret = bch2_page_state_set(c, inode_inum(inode),
+                                                 pages + i, nr_pages - i);
+                       if (ret)
+                               goto out;
                }
 
+               ret = bch2_page_reservation_get(c, inode, page, &res,
+                                               pg_offset, pg_len, true);
                if (ret)
                        goto out;
 
@@ -1483,8 +1689,8 @@ retry_reservation:
                unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
                unsigned pg_len = min_t(unsigned, len - copied,
                                        PAGE_SIZE - pg_offset);
-               unsigned pg_copied = iov_iter_copy_from_user_atomic(page,
-                                               iter, pg_offset, pg_len);
+               unsigned pg_copied = copy_page_from_iter_atomic(page,
+                                               pg_offset, pg_len,iter);
 
                if (!pg_copied)
                        break;
@@ -1497,7 +1703,6 @@ retry_reservation:
                }
 
                flush_dcache_page(page);
-               iov_iter_advance(iter, pg_copied);
                copied += pg_copied;
 
                if (pg_copied != pg_len)
@@ -1567,11 +1772,11 @@ again:
                 * to check that the address is actually valid, when atomic
                 * usercopies are used, below.
                 */
-               if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
+               if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
                        bytes = min_t(unsigned long, iov_iter_count(iter),
                                      PAGE_SIZE - offset);
 
-                       if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
+                       if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
                                ret = -EFAULT;
                                break;
                        }
@@ -1615,12 +1820,22 @@ again:
 
 /* O_DIRECT reads */
 
+static void bio_check_or_release(struct bio *bio, bool check_dirty)
+{
+       if (check_dirty) {
+               bio_check_pages_dirty(bio);
+       } else {
+               bio_release_pages(bio, false);
+               bio_put(bio);
+       }
+}
+
 static void bch2_dio_read_complete(struct closure *cl)
 {
        struct dio_read *dio = container_of(cl, struct dio_read, cl);
 
-       dio->req->ki_complete(dio->req, dio->ret, 0);
-       bio_check_pages_dirty(&dio->rbio.bio);  /* transfers ownership */
+       dio->req->ki_complete(dio->req, dio->ret);
+       bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
 }
 
 static void bch2_direct_IO_read_endio(struct bio *bio)
@@ -1635,8 +1850,11 @@ static void bch2_direct_IO_read_endio(struct bio *bio)
 
 static void bch2_direct_IO_read_split_endio(struct bio *bio)
 {
+       struct dio_read *dio = bio->bi_private;
+       bool should_dirty = dio->should_dirty;
+
        bch2_direct_IO_read_endio(bio);
-       bio_check_pages_dirty(bio);     /* transfers ownership */
+       bio_check_or_release(bio, should_dirty);
 }
 
 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
@@ -1664,8 +1882,10 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
        shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
        iter->count -= shorten;
 
-       bio = bio_alloc_bioset(GFP_KERNEL,
-                              iov_iter_npages(iter, BIO_MAX_PAGES),
+       bio = bio_alloc_bioset(NULL,
+                              bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+                              REQ_OP_READ,
+                              GFP_KERNEL,
                               &c->dio_read_bioset);
 
        bio->bi_end_io = bch2_direct_IO_read_endio;
@@ -1690,11 +1910,19 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
 
        dio->req        = req;
        dio->ret        = ret;
+       /*
+        * This is one of the sketchier things I've encountered: we have to skip
+        * the dirtying of requests that are internal from the kernel (i.e. from
+        * loopback), because we'll deadlock on page_lock.
+        */
+       dio->should_dirty = iter_is_iovec(iter);
 
        goto start;
        while (iter->count) {
-               bio = bio_alloc_bioset(GFP_KERNEL,
-                                      iov_iter_npages(iter, BIO_MAX_PAGES),
+               bio = bio_alloc_bioset(NULL,
+                                      bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+                                      REQ_OP_READ,
+                                      GFP_KERNEL,
                                       &c->bio_read);
                bio->bi_end_io          = bch2_direct_IO_read_split_endio;
 start:
@@ -1711,12 +1939,14 @@ start:
                }
 
                offset += bio->bi_iter.bi_size;
-               bio_set_pages_dirty(bio);
+
+               if (dio->should_dirty)
+                       bio_set_pages_dirty(bio);
 
                if (iter->count)
                        closure_get(&dio->cl);
 
-               bch2_read(c, rbio_init(bio, opts), inode->v.i_ino);
+               bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
        }
 
        iter->count += shorten;
@@ -1725,7 +1955,7 @@ start:
                closure_sync(&dio->cl);
                closure_debug_destroy(&dio->cl);
                ret = dio->ret;
-               bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
+               bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
                return ret;
        } else {
                return -EIOCBQUEUED;
@@ -1750,7 +1980,7 @@ ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
                                        iocb->ki_pos,
                                        iocb->ki_pos + count - 1);
                if (ret < 0)
-                       return ret;
+                       goto out;
 
                file_accessed(file);
 
@@ -1765,12 +1995,56 @@ ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
                ret = generic_file_read_iter(iocb, iter);
                bch2_pagecache_add_put(&inode->ei_pagecache_lock);
        }
-
-       return ret;
+out:
+       return bch2_err_class(ret);
 }
 
 /* O_DIRECT writes */
 
+static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
+                                      u64 offset, u64 size,
+                                      unsigned nr_replicas, bool compressed)
+{
+       struct btree_trans trans;
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       u64 end = offset + size;
+       u32 snapshot;
+       bool ret = true;
+       int err;
+
+       bch2_trans_init(&trans, c, 0, 0);
+retry:
+       bch2_trans_begin(&trans);
+
+       err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
+       if (err)
+               goto err;
+
+       for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
+                          SPOS(inum.inum, offset, snapshot),
+                          BTREE_ITER_SLOTS, k, err) {
+               if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0)
+                       break;
+
+               if (k.k->p.snapshot != snapshot ||
+                   nr_replicas > bch2_bkey_replicas(c, k) ||
+                   (!compressed && bch2_bkey_sectors_compressed(k))) {
+                       ret = false;
+                       break;
+               }
+       }
+
+       offset = iter.pos.offset;
+       bch2_trans_iter_exit(&trans, &iter);
+err:
+       if (bch2_err_matches(err, BCH_ERR_transaction_restart))
+               goto retry;
+       bch2_trans_exit(&trans);
+
+       return err ? false : ret;
+}
+
 static void bch2_dio_write_loop_async(struct bch_write_op *);
 
 static long bch2_dio_write_loop(struct dio_write *dio)
@@ -1793,7 +2067,7 @@ static long bch2_dio_write_loop(struct dio_write *dio)
        while (1) {
                iter_count = dio->iter.count;
 
-               if (kthread)
+               if (kthread && dio->mm)
                        kthread_use_mm(dio->mm);
                BUG_ON(current->faults_disabled_mapping);
                current->faults_disabled_mapping = mapping;
@@ -1803,7 +2077,7 @@ static long bch2_dio_write_loop(struct dio_write *dio)
                dropped_locks = fdm_dropped_locks();
 
                current->faults_disabled_mapping = NULL;
-               if (kthread)
+               if (kthread && dio->mm)
                        kthread_unuse_mm(dio->mm);
 
                /*
@@ -1837,8 +2111,6 @@ static long bch2_dio_write_loop(struct dio_write *dio)
                         * bio_iov_iter_get_pages was only able to get <
                         * blocksize worth of pages:
                         */
-                       bio_for_each_segment_all(bv, bio, iter)
-                               put_page(bv->bv_page);
                        ret = -EFAULT;
                        goto err;
                }
@@ -1846,20 +2118,23 @@ static long bch2_dio_write_loop(struct dio_write *dio)
                bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
                dio->op.end_io          = bch2_dio_write_loop_async;
                dio->op.target          = dio->op.opts.foreground_target;
-               op_journal_seq_set(&dio->op, &inode->ei_journal_seq);
                dio->op.write_point     = writepoint_hashed((unsigned long) current);
                dio->op.nr_replicas     = dio->op.opts.data_replicas;
+               dio->op.subvol          = inode->ei_subvol;
                dio->op.pos             = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
 
                if ((req->ki_flags & IOCB_DSYNC) &&
                    !c->opts.journal_flush_disabled)
                        dio->op.flags |= BCH_WRITE_FLUSH;
+               dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
 
                ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
                                                dio->op.opts.data_replicas, 0);
                if (unlikely(ret) &&
-                   !bch2_check_range_allocated(c, dio->op.pos,
-                               bio_sectors(bio), dio->op.opts.data_replicas))
+                   !bch2_check_range_allocated(c, inode_inum(inode),
+                               dio->op.pos.offset, bio_sectors(bio),
+                               dio->op.opts.data_replicas,
+                               dio->op.opts.compression != 0))
                        goto err;
 
                task_io_account_write(bio->bi_iter.bi_size);
@@ -1900,12 +2175,20 @@ loop:
                        i_size_write(&inode->v, req->ki_pos);
                spin_unlock(&inode->v.i_lock);
 
-               bio_for_each_segment_all(bv, bio, iter)
-                       put_page(bv->bv_page);
-               if (!dio->iter.count || dio->op.error)
+               if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
+                       bio_for_each_segment_all(bv, bio, iter)
+                               put_page(bv->bv_page);
+               bio->bi_vcnt = 0;
+
+               if (dio->op.error) {
+                       set_bit(EI_INODE_ERROR, &inode->ei_flags);
+                       break;
+               }
+
+               if (!dio->iter.count)
                        break;
 
-               bio_reset(bio);
+               bio_reset(bio, NULL, REQ_OP_WRITE);
                reinit_completion(&dio->done);
        }
 
@@ -1917,13 +2200,19 @@ err:
        if (dio->free_iov)
                kfree(dio->iter.iov);
 
+       if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
+               bio_for_each_segment_all(bv, bio, iter)
+                       put_page(bv->bv_page);
        bio_put(bio);
 
        /* inode->i_dio_count is our ref on inode and thus bch_fs */
        inode_dio_end(&inode->v);
 
+       if (ret < 0)
+               ret = bch2_err_class(ret);
+
        if (!sync) {
-               req->ki_complete(req, ret, 0);
+               req->ki_complete(req, ret);
                ret = -EIOCBQUEUED;
        }
        return ret;
@@ -1982,8 +2271,10 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
                locked = false;
        }
 
-       bio = bio_alloc_bioset(GFP_KERNEL,
-                              iov_iter_npages(iter, BIO_MAX_PAGES),
+       bio = bio_alloc_bioset(NULL,
+                              bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+                              REQ_OP_WRITE,
+                              GFP_KERNEL,
                               &c->dio_write_bioset);
        dio = container_of(bio, struct dio_write, op.wbio.bio);
        init_completion(&dio->done);
@@ -2026,8 +2317,10 @@ ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct bch_inode_info *inode = file_bch_inode(file);
        ssize_t ret;
 
-       if (iocb->ki_flags & IOCB_DIRECT)
-               return bch2_direct_write(iocb, from);
+       if (iocb->ki_flags & IOCB_DIRECT) {
+               ret = bch2_direct_write(iocb, from);
+               goto out;
+       }
 
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = inode_to_bdi(&inode->v);
@@ -2054,51 +2347,64 @@ unlock:
 
        if (ret > 0)
                ret = generic_write_sync(iocb, ret);
-
-       return ret;
+out:
+       return bch2_err_class(ret);
 }
 
 /* fsync: */
 
-int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+/*
+ * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
+ * insert trigger: look up the btree inode instead
+ */
+static int bch2_flush_inode(struct bch_fs *c, subvol_inum inum)
 {
-       struct bch_inode_info *inode = file_bch_inode(file);
-       struct bch_fs *c = inode->v.i_sb->s_fs_info;
-       int ret, ret2;
+       struct bch_inode_unpacked inode;
+       int ret;
 
-       ret = file_write_and_wait_range(file, start, end);
+       if (c->opts.journal_flush_disabled)
+               return 0;
+
+       ret = bch2_inode_find_by_inum(c, inum, &inode);
        if (ret)
                return ret;
 
-       if (datasync && !(inode->v.i_state & I_DIRTY_DATASYNC))
-               goto out;
+       return bch2_journal_flush_seq(&c->journal, inode.bi_journal_seq);
+}
 
-       ret = sync_inode_metadata(&inode->v, 1);
-       if (ret)
-               return ret;
-out:
-       if (!c->opts.journal_flush_disabled)
-               ret = bch2_journal_flush_seq(&c->journal,
-                                            inode->ei_journal_seq);
-       ret2 = file_check_and_advance_wb_err(file);
+int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+       struct bch_inode_info *inode = file_bch_inode(file);
+       struct bch_fs *c = inode->v.i_sb->s_fs_info;
+       int ret, ret2, ret3;
 
-       return ret ?: ret2;
+       ret = file_write_and_wait_range(file, start, end);
+       ret2 = sync_inode_metadata(&inode->v, 1);
+       ret3 = bch2_flush_inode(c, inode_inum(inode));
+
+       return bch2_err_class(ret ?: ret2 ?: ret3);
 }
 
 /* truncate: */
 
-static inline int range_has_data(struct bch_fs *c,
-                                 struct bpos start,
-                                 struct bpos end)
+static inline int range_has_data(struct bch_fs *c, u32 subvol,
+                                struct bpos start,
+                                struct bpos end)
 {
        struct btree_trans trans;
-       struct btree_iter *iter;
+       struct btree_iter iter;
        struct bkey_s_c k;
        int ret = 0;
 
        bch2_trans_init(&trans, c, 0, 0);
+retry:
+       bch2_trans_begin(&trans);
 
-       for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) {
+       ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
+       if (ret)
+               goto err;
+
+       for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
                if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
                        break;
 
@@ -2107,8 +2413,14 @@ static inline int range_has_data(struct bch_fs *c,
                        break;
                }
        }
+       start = iter.pos;
+       bch2_trans_iter_exit(&trans, &iter);
+err:
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+               goto retry;
 
-       return bch2_trans_exit(&trans) ?: ret;
+       bch2_trans_exit(&trans);
+       return ret;
 }
 
 static int __bch2_truncate_page(struct bch_inode_info *inode,
@@ -2121,6 +2433,7 @@ static int __bch2_truncate_page(struct bch_inode_info *inode,
        unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
        unsigned i;
        struct page *page;
+       s64 i_sectors_delta = 0;
        int ret = 0;
 
        /* Page boundary? Nothing to do */
@@ -2138,9 +2451,9 @@ static int __bch2_truncate_page(struct bch_inode_info *inode,
                 * XXX: we're doing two index lookups when we end up reading the
                 * page
                 */
-               ret = range_has_data(c,
-                               POS(inode->v.i_ino, index << PAGE_SECTOR_SHIFT),
-                               POS(inode->v.i_ino, (index + 1) << PAGE_SECTOR_SHIFT));
+               ret = range_has_data(c, inode->ei_subvol,
+                               POS(inode->v.i_ino, index << PAGE_SECTORS_SHIFT),
+                               POS(inode->v.i_ino, (index + 1) << PAGE_SECTORS_SHIFT));
                if (ret <= 0)
                        return ret;
 
@@ -2172,9 +2485,21 @@ static int __bch2_truncate_page(struct bch_inode_info *inode,
             i < round_down(end_offset, block_bytes(c)) >> 9;
             i++) {
                s->s[i].nr_replicas     = 0;
+               if (s->s[i].state == SECTOR_DIRTY)
+                       i_sectors_delta--;
                s->s[i].state           = SECTOR_UNALLOCATED;
        }
 
+       i_sectors_acct(c, inode, NULL, i_sectors_delta);
+
+       /*
+        * Caller needs to know whether this page will be written out by
+        * writeback - doing an i_size update if necessary - or whether it will
+        * be responsible for the i_size update:
+        */
+       ret = s->s[(min_t(u64, inode->v.i_size - (index << PAGE_SHIFT),
+                         PAGE_SIZE) - 1) >> 9].state >= SECTOR_DIRTY;
+
        zero_user_segment(page, start_offset, end_offset);
 
        /*
@@ -2183,8 +2508,7 @@ static int __bch2_truncate_page(struct bch_inode_info *inode,
         * XXX: because we aren't currently tracking whether the page has actual
         * data in it (vs. just 0s, or only partially written) this wrong. ick.
         */
-       ret = bch2_get_page_disk_reservation(c, inode, page, false);
-       BUG_ON(ret);
+       BUG_ON(bch2_get_page_disk_reservation(c, inode, page, false));
 
        /*
         * This removes any writeable userspace mappings; we need to force
@@ -2206,11 +2530,25 @@ static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
                                    from, round_up(from, PAGE_SIZE));
 }
 
-static int bch2_extend(struct bch_inode_info *inode,
+static int bch2_truncate_pages(struct bch_inode_info *inode,
+                              loff_t start, loff_t end)
+{
+       int ret = __bch2_truncate_page(inode, start >> PAGE_SHIFT,
+                                      start, end);
+
+       if (ret >= 0 &&
+           start >> PAGE_SHIFT != end >> PAGE_SHIFT)
+               ret = __bch2_truncate_page(inode,
+                                          end >> PAGE_SHIFT,
+                                          start, end);
+       return ret;
+}
+
+static int bch2_extend(struct user_namespace *mnt_userns,
+                      struct bch_inode_info *inode,
                       struct bch_inode_unpacked *inode_u,
                       struct iattr *iattr)
 {
-       struct bch_fs *c = inode->v.i_sb->s_fs_info;
        struct address_space *mapping = inode->v.i_mapping;
        int ret;
 
@@ -2224,24 +2562,15 @@ static int bch2_extend(struct bch_inode_info *inode,
                return ret;
 
        truncate_setsize(&inode->v, iattr->ia_size);
-       setattr_copy(&inode->v, iattr);
-
-       mutex_lock(&inode->ei_update_lock);
-       ret = bch2_write_inode_size(c, inode, inode->v.i_size,
-                                   ATTR_MTIME|ATTR_CTIME);
-       mutex_unlock(&inode->ei_update_lock);
 
-       return ret;
+       return bch2_setattr_nonsize(mnt_userns, inode, iattr);
 }
 
 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
                                   struct bch_inode_unpacked *bi,
                                   void *p)
 {
-       struct bch_fs *c = inode->v.i_sb->s_fs_info;
-
        bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
-       bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
        return 0;
 }
 
@@ -2255,29 +2584,33 @@ static int bch2_truncate_start_fn(struct bch_inode_info *inode,
        return 0;
 }
 
-int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
+int bch2_truncate(struct user_namespace *mnt_userns,
+                 struct bch_inode_info *inode, struct iattr *iattr)
 {
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
        struct address_space *mapping = inode->v.i_mapping;
        struct bch_inode_unpacked inode_u;
-       struct btree_trans trans;
-       struct btree_iter *iter;
        u64 new_i_size = iattr->ia_size;
        s64 i_sectors_delta = 0;
        int ret = 0;
 
-       inode_dio_wait(&inode->v);
-       bch2_pagecache_block_get(&inode->ei_pagecache_lock);
-
        /*
-        * fetch current on disk i_size: inode is locked, i_size can only
-        * increase underneath us:
+        * If the truncate call with change the size of the file, the
+        * cmtimes should be updated. If the size will not change, we
+        * do not need to update the cmtimes.
         */
-       bch2_trans_init(&trans, c, 0, 0);
-       iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino, 0);
-       ret = PTR_ERR_OR_ZERO(iter);
-       bch2_trans_exit(&trans);
+       if (iattr->ia_size != inode->v.i_size) {
+               if (!(iattr->ia_valid & ATTR_MTIME))
+                       ktime_get_coarse_real_ts64(&iattr->ia_mtime);
+               if (!(iattr->ia_valid & ATTR_CTIME))
+                       ktime_get_coarse_real_ts64(&iattr->ia_ctime);
+               iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
+       }
+
+       inode_dio_wait(&inode->v);
+       bch2_pagecache_block_get(&inode->ei_pagecache_lock);
 
+       ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
        if (ret)
                goto err;
 
@@ -2290,15 +2623,18 @@ int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
        if (ret)
                goto err;
 
-       BUG_ON(inode->v.i_size < inode_u.bi_size);
+       WARN_ON(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
+               inode->v.i_size < inode_u.bi_size);
 
        if (iattr->ia_size > inode->v.i_size) {
-               ret = bch2_extend(inode, &inode_u, iattr);
+               ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
                goto err;
        }
 
+       iattr->ia_valid &= ~ATTR_SIZE;
+
        ret = bch2_truncate_page(inode, iattr->ia_size);
-       if (unlikely(ret))
+       if (unlikely(ret < 0))
                goto err;
 
        /*
@@ -2332,68 +2668,76 @@ int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
 
        truncate_setsize(&inode->v, iattr->ia_size);
 
-       ret = bch2_fpunch(c, inode->v.i_ino,
+       ret = bch2_fpunch(c, inode_inum(inode),
                        round_up(iattr->ia_size, block_bytes(c)) >> 9,
-                       U64_MAX, &inode->ei_journal_seq, &i_sectors_delta);
+                       U64_MAX, &i_sectors_delta);
        i_sectors_acct(c, inode, NULL, i_sectors_delta);
 
+       bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
+                               !bch2_journal_error(&c->journal), c,
+                               "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
+                               inode->v.i_ino, (u64) inode->v.i_blocks,
+                               inode->ei_inode.bi_sectors);
        if (unlikely(ret))
                goto err;
 
-       setattr_copy(&inode->v, iattr);
-
        mutex_lock(&inode->ei_update_lock);
-       ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL,
-                              ATTR_MTIME|ATTR_CTIME);
+       ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
        mutex_unlock(&inode->ei_update_lock);
+
+       ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
 err:
        bch2_pagecache_block_put(&inode->ei_pagecache_lock);
-       return ret;
+       return bch2_err_class(ret);
 }
 
 /* fallocate: */
 
+static int inode_update_times_fn(struct bch_inode_info *inode,
+                                struct bch_inode_unpacked *bi, void *p)
+{
+       struct bch_fs *c = inode->v.i_sb->s_fs_info;
+
+       bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
+       return 0;
+}
+
 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
 {
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
-       u64 discard_start = round_up(offset, block_bytes(c)) >> 9;
-       u64 discard_end = round_down(offset + len, block_bytes(c)) >> 9;
+       u64 end         = offset + len;
+       u64 block_start = round_up(offset, block_bytes(c));
+       u64 block_end   = round_down(end, block_bytes(c));
+       bool truncated_last_page;
        int ret = 0;
 
-       inode_lock(&inode->v);
-       inode_dio_wait(&inode->v);
-       bch2_pagecache_block_get(&inode->ei_pagecache_lock);
-
-       ret = __bch2_truncate_page(inode,
-                                  offset >> PAGE_SHIFT,
-                                  offset, offset + len);
-       if (unlikely(ret))
+       ret = bch2_truncate_pages(inode, offset, end);
+       if (unlikely(ret < 0))
                goto err;
 
-       if (offset >> PAGE_SHIFT !=
-           (offset + len) >> PAGE_SHIFT) {
-               ret = __bch2_truncate_page(inode,
-                                          (offset + len) >> PAGE_SHIFT,
-                                          offset, offset + len);
-               if (unlikely(ret))
-                       goto err;
-       }
+       truncated_last_page = ret;
 
-       truncate_pagecache_range(&inode->v, offset, offset + len - 1);
+       truncate_pagecache_range(&inode->v, offset, end - 1);
 
-       if (discard_start < discard_end) {
+       if (block_start < block_end ) {
                s64 i_sectors_delta = 0;
 
-               ret = bch2_fpunch(c, inode->v.i_ino,
-                                 discard_start, discard_end,
-                                 &inode->ei_journal_seq,
+               ret = bch2_fpunch(c, inode_inum(inode),
+                                 block_start >> 9, block_end >> 9,
                                  &i_sectors_delta);
                i_sectors_acct(c, inode, NULL, i_sectors_delta);
        }
-err:
-       bch2_pagecache_block_put(&inode->ei_pagecache_lock);
-       inode_unlock(&inode->v);
 
+       mutex_lock(&inode->ei_update_lock);
+       if (end >= inode->v.i_size && !truncated_last_page) {
+               ret = bch2_write_inode_size(c, inode, inode->v.i_size,
+                                           ATTR_MTIME|ATTR_CTIME);
+       } else {
+               ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
+                                      ATTR_MTIME|ATTR_CTIME);
+       }
+       mutex_unlock(&inode->ei_update_lock);
+err:
        return ret;
 }
 
@@ -2403,44 +2747,28 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
 {
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
        struct address_space *mapping = inode->v.i_mapping;
-       struct bkey_on_stack copy;
+       struct bkey_buf copy;
        struct btree_trans trans;
-       struct btree_iter *src, *dst;
+       struct btree_iter src, dst, del;
        loff_t shift, new_size;
        u64 src_start;
-       int ret;
+       int ret = 0;
 
        if ((offset | len) & (block_bytes(c) - 1))
                return -EINVAL;
 
-       bkey_on_stack_init(&copy);
-       bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
-
-       /*
-        * We need i_mutex to keep the page cache consistent with the extents
-        * btree, and the btree consistent with i_size - we don't need outside
-        * locking for the extents btree itself, because we're using linked
-        * iterators
-        */
-       inode_lock(&inode->v);
-       inode_dio_wait(&inode->v);
-       bch2_pagecache_block_get(&inode->ei_pagecache_lock);
-
        if (insert) {
-               ret = -EFBIG;
                if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
-                       goto err;
+                       return -EFBIG;
 
-               ret = -EINVAL;
                if (offset >= inode->v.i_size)
-                       goto err;
+                       return -EINVAL;
 
                src_start       = U64_MAX;
                shift           = len;
        } else {
-               ret = -EINVAL;
                if (offset + len >= inode->v.i_size)
-                       goto err;
+                       return -EINVAL;
 
                src_start       = offset + len;
                shift           = -len;
@@ -2450,7 +2778,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
 
        ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
        if (ret)
-               goto err;
+               return ret;
 
        if (insert) {
                i_size_write(&inode->v, new_size);
@@ -2461,22 +2789,25 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
        } else {
                s64 i_sectors_delta = 0;
 
-               ret = bch2_fpunch(c, inode->v.i_ino,
+               ret = bch2_fpunch(c, inode_inum(inode),
                                  offset >> 9, (offset + len) >> 9,
-                                 &inode->ei_journal_seq,
                                  &i_sectors_delta);
                i_sectors_acct(c, inode, NULL, i_sectors_delta);
 
                if (ret)
-                       goto err;
+                       return ret;
        }
 
-       src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+       bch2_bkey_buf_init(&copy);
+       bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
+       bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
                        POS(inode->v.i_ino, src_start >> 9),
                        BTREE_ITER_INTENT);
-       dst = bch2_trans_copy_iter(&trans, src);
+       bch2_trans_copy_iter(&dst, &src);
+       bch2_trans_copy_iter(&del, &src);
 
-       while (1) {
+       while (ret == 0 ||
+              bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
                struct disk_reservation disk_res =
                        bch2_disk_reservation_init(c, 0);
                struct bkey_i delete;
@@ -2485,34 +2816,46 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
                struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
                struct bpos atomic_end;
                unsigned trigger_flags = 0;
+               u32 snapshot;
+
+               bch2_trans_begin(&trans);
+
+               ret = bch2_subvolume_get_snapshot(&trans,
+                                       inode->ei_subvol, &snapshot);
+               if (ret)
+                       continue;
+
+               bch2_btree_iter_set_snapshot(&src, snapshot);
+               bch2_btree_iter_set_snapshot(&dst, snapshot);
+               bch2_btree_iter_set_snapshot(&del, snapshot);
+
+               bch2_trans_begin(&trans);
 
                k = insert
-                       ? bch2_btree_iter_peek_prev(src)
-                       : bch2_btree_iter_peek(src);
+                       ? bch2_btree_iter_peek_prev(&src)
+                       : bch2_btree_iter_peek(&src);
                if ((ret = bkey_err(k)))
-                       goto bkey_err;
+                       continue;
 
                if (!k.k || k.k->p.inode != inode->v.i_ino)
                        break;
 
-               BUG_ON(bkey_cmp(src->pos, bkey_start_pos(k.k)));
-
                if (insert &&
                    bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
                        break;
 reassemble:
-               bkey_on_stack_reassemble(&copy, c, k);
+               bch2_bkey_buf_reassemble(&copy, c, k);
 
                if (insert &&
                    bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
                        bch2_cut_front(move_pos, copy.k);
 
                copy.k->k.p.offset += shift >> 9;
-               bch2_btree_iter_set_pos(dst, bkey_start_pos(&copy.k->k));
+               bch2_btree_iter_set_pos(&dst, bkey_start_pos(&copy.k->k));
 
-               ret = bch2_extent_atomic_end(dst, copy.k, &atomic_end);
+               ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
                if (ret)
-                       goto bkey_err;
+                       continue;
 
                if (bkey_cmp(atomic_end, copy.k->k.p)) {
                        if (insert) {
@@ -2528,16 +2871,11 @@ reassemble:
                delete.k.p = copy.k->k.p;
                delete.k.size = copy.k->k.size;
                delete.k.p.offset -= shift >> 9;
+               bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
 
                next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
 
-               if (copy.k->k.size == k.k->size) {
-                       /*
-                        * If we're moving the entire extent, we can skip
-                        * running triggers:
-                        */
-                       trigger_flags |= BTREE_TRIGGER_NORUN;
-               } else {
+               if (copy.k->k.size != k.k->size) {
                        /* We might end up splitting compressed extents: */
                        unsigned nr_ptrs =
                                bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
@@ -2548,114 +2886,87 @@ reassemble:
                        BUG_ON(ret);
                }
 
-               bch2_btree_iter_set_pos(src, bkey_start_pos(&delete.k));
-
-               ret =   bch2_trans_update(&trans, src, &delete, trigger_flags) ?:
-                       bch2_trans_update(&trans, dst, copy.k, trigger_flags) ?:
-                       bch2_trans_commit(&trans, &disk_res,
-                                         &inode->ei_journal_seq,
+               ret =   bch2_btree_iter_traverse(&del) ?:
+                       bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
+                       bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
+                       bch2_trans_commit(&trans, &disk_res, NULL,
                                          BTREE_INSERT_NOFAIL);
                bch2_disk_reservation_put(c, &disk_res);
-bkey_err:
-               if (!ret)
-                       bch2_btree_iter_set_pos(src, next_pos);
-
-               if (ret == -EINTR)
-                       ret = 0;
-               if (ret)
-                       goto err;
 
-               bch2_trans_cond_resched(&trans);
+               if (!ret)
+                       bch2_btree_iter_set_pos(&src, next_pos);
        }
-       bch2_trans_unlock(&trans);
+       bch2_trans_iter_exit(&trans, &del);
+       bch2_trans_iter_exit(&trans, &dst);
+       bch2_trans_iter_exit(&trans, &src);
+       bch2_trans_exit(&trans);
+       bch2_bkey_buf_exit(&copy, c);
+
+       if (ret)
+               return ret;
 
+       mutex_lock(&inode->ei_update_lock);
        if (!insert) {
                i_size_write(&inode->v, new_size);
-               mutex_lock(&inode->ei_update_lock);
                ret = bch2_write_inode_size(c, inode, new_size,
                                            ATTR_MTIME|ATTR_CTIME);
-               mutex_unlock(&inode->ei_update_lock);
+       } else {
+               /* We need an inode update to update bi_journal_seq for fsync: */
+               ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
+                                      ATTR_MTIME|ATTR_CTIME);
        }
-err:
-       bch2_trans_exit(&trans);
-       bkey_on_stack_exit(&copy, c);
-       bch2_pagecache_block_put(&inode->ei_pagecache_lock);
-       inode_unlock(&inode->v);
+       mutex_unlock(&inode->ei_update_lock);
        return ret;
 }
 
-static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
-                           loff_t offset, loff_t len)
+static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
+                            u64 start_sector, u64 end_sector)
 {
-       struct address_space *mapping = inode->v.i_mapping;
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
        struct btree_trans trans;
-       struct btree_iter *iter;
-       struct bpos end_pos;
-       loff_t end              = offset + len;
-       loff_t block_start      = round_down(offset,    block_bytes(c));
-       loff_t block_end        = round_up(end,         block_bytes(c));
-       unsigned sectors;
+       struct btree_iter iter;
+       struct bpos end_pos = POS(inode->v.i_ino, end_sector);
        unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
-       int ret;
-
-       bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
-
-       inode_lock(&inode->v);
-       inode_dio_wait(&inode->v);
-       bch2_pagecache_block_get(&inode->ei_pagecache_lock);
-
-       if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
-               ret = inode_newsize_ok(&inode->v, end);
-               if (ret)
-                       goto err;
-       }
-
-       if (mode & FALLOC_FL_ZERO_RANGE) {
-               ret = __bch2_truncate_page(inode,
-                                          offset >> PAGE_SHIFT,
-                                          offset, end);
-
-               if (!ret &&
-                   offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
-                       ret = __bch2_truncate_page(inode,
-                                                  end >> PAGE_SHIFT,
-                                                  offset, end);
-
-               if (unlikely(ret))
-                       goto err;
+       int ret = 0;
 
-               truncate_pagecache_range(&inode->v, offset, end - 1);
-       }
+       bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
 
-       iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
-                       POS(inode->v.i_ino, block_start >> 9),
+       bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
+                       POS(inode->v.i_ino, start_sector),
                        BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
-       end_pos = POS(inode->v.i_ino, block_end >> 9);
 
-       while (bkey_cmp(iter->pos, end_pos) < 0) {
+       while (!ret && bkey_cmp(iter.pos, end_pos) < 0) {
                s64 i_sectors_delta = 0;
                struct disk_reservation disk_res = { 0 };
                struct quota_res quota_res = { 0 };
                struct bkey_i_reservation reservation;
                struct bkey_s_c k;
+               unsigned sectors;
+               u32 snapshot;
 
                bch2_trans_begin(&trans);
 
-               k = bch2_btree_iter_peek_slot(iter);
+               ret = bch2_subvolume_get_snapshot(&trans,
+                                       inode->ei_subvol, &snapshot);
+               if (ret)
+                       goto bkey_err;
+
+               bch2_btree_iter_set_snapshot(&iter, snapshot);
+
+               k = bch2_btree_iter_peek_slot(&iter);
                if ((ret = bkey_err(k)))
                        goto bkey_err;
 
                /* already reserved */
                if (k.k->type == KEY_TYPE_reservation &&
                    bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
-                       bch2_btree_iter_next_slot(iter);
+                       bch2_btree_iter_advance(&iter);
                        continue;
                }
 
                if (bkey_extent_is_data(k.k) &&
                    !(mode & FALLOC_FL_ZERO_RANGE)) {
-                       bch2_btree_iter_next_slot(iter);
+                       bch2_btree_iter_advance(&iter);
                        continue;
                }
 
@@ -2664,7 +2975,7 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
                reservation.k.p         = k.k->p;
                reservation.k.size      = k.k->size;
 
-               bch2_cut_front(iter->pos,       &reservation.k_i);
+               bch2_cut_front(iter.pos,        &reservation.k_i);
                bch2_cut_back(end_pos,          &reservation.k_i);
 
                sectors = reservation.k.size;
@@ -2688,67 +2999,93 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
                        reservation.v.nr_replicas = disk_res.nr_replicas;
                }
 
-               ret = bch2_extent_update(&trans, iter, &reservation.k_i,
-                               &disk_res, &inode->ei_journal_seq,
-                               0, &i_sectors_delta);
+               ret = bch2_extent_update(&trans, inode_inum(inode), &iter,
+                                        &reservation.k_i,
+                               &disk_res, NULL,
+                               0, &i_sectors_delta, true);
+               if (ret)
+                       goto bkey_err;
                i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
 bkey_err:
                bch2_quota_reservation_put(c, inode, &quota_res);
                bch2_disk_reservation_put(c, &disk_res);
-               if (ret == -EINTR)
+               if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
                        ret = 0;
-               if (ret)
-                       goto err;
        }
 
-       /*
-        * Do we need to extend the file?
-        *
-        * If we zeroed up to the end of the file, we dropped whatever writes
-        * were going to write out the current i_size, so we have to extend
-        * manually even if FL_KEEP_SIZE was set:
-        */
-       if (end >= inode->v.i_size &&
-           (!(mode & FALLOC_FL_KEEP_SIZE) ||
-            (mode & FALLOC_FL_ZERO_RANGE))) {
-               struct btree_iter *inode_iter;
-               struct bch_inode_unpacked inode_u;
+       bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
+       mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
 
-               do {
-                       bch2_trans_begin(&trans);
-                       inode_iter = bch2_inode_peek(&trans, &inode_u,
-                                                    inode->v.i_ino, 0);
-                       ret = PTR_ERR_OR_ZERO(inode_iter);
-               } while (ret == -EINTR);
+       if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
+               struct quota_res quota_res = { 0 };
+               s64 i_sectors_delta = 0;
 
-               bch2_trans_unlock(&trans);
+               bch2_fpunch_at(&trans, &iter, inode_inum(inode),
+                              end_sector, &i_sectors_delta);
+               i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
+               bch2_quota_reservation_put(c, inode, &quota_res);
+       }
 
-               if (ret)
-                       goto err;
+       bch2_trans_iter_exit(&trans, &iter);
+       bch2_trans_exit(&trans);
+       return ret;
+}
 
-               /*
-                * Sync existing appends before extending i_size,
-                * as in bch2_extend():
-                */
-               ret = filemap_write_and_wait_range(mapping,
-                                       inode_u.bi_size, S64_MAX);
+static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
+                           loff_t offset, loff_t len)
+{
+       struct bch_fs *c = inode->v.i_sb->s_fs_info;
+       u64 end         = offset + len;
+       u64 block_start = round_down(offset,    block_bytes(c));
+       u64 block_end   = round_up(end,         block_bytes(c));
+       bool truncated_last_page = false;
+       int ret, ret2 = 0;
+
+       if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
+               ret = inode_newsize_ok(&inode->v, end);
                if (ret)
-                       goto err;
+                       return ret;
+       }
 
-               if (mode & FALLOC_FL_KEEP_SIZE)
-                       end = inode->v.i_size;
-               else
-                       i_size_write(&inode->v, end);
+       if (mode & FALLOC_FL_ZERO_RANGE) {
+               ret = bch2_truncate_pages(inode, offset, end);
+               if (unlikely(ret < 0))
+                       return ret;
+
+               truncated_last_page = ret;
+
+               truncate_pagecache_range(&inode->v, offset, end - 1);
+
+               block_start     = round_up(offset,      block_bytes(c));
+               block_end       = round_down(end,       block_bytes(c));
+       }
+
+       ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
+
+       /*
+        * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
+        * so that the VFS cache i_size is consistent with the btree i_size:
+        */
+       if (ret &&
+           !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
+               return ret;
+
+       if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
+               end = inode->v.i_size;
+
+       if (end >= inode->v.i_size &&
+           (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
+            !(mode & FALLOC_FL_KEEP_SIZE))) {
+               spin_lock(&inode->v.i_lock);
+               i_size_write(&inode->v, end);
+               spin_unlock(&inode->v.i_lock);
 
                mutex_lock(&inode->ei_update_lock);
-               ret = bch2_write_inode_size(c, inode, end, 0);
+               ret2 = bch2_write_inode_size(c, inode, end, 0);
                mutex_unlock(&inode->ei_update_lock);
        }
-err:
-       bch2_trans_exit(&trans);
-       bch2_pagecache_block_put(&inode->ei_pagecache_lock);
-       inode_unlock(&inode->v);
-       return ret;
+
+       return ret ?: ret2;
 }
 
 long bch2_fallocate_dispatch(struct file *file, int mode,
@@ -2758,9 +3095,17 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
        long ret;
 
-       if (!percpu_ref_tryget(&c->writes))
+       if (!percpu_ref_tryget_live(&c->writes))
                return -EROFS;
 
+       inode_lock(&inode->v);
+       inode_dio_wait(&inode->v);
+       bch2_pagecache_block_get(&inode->ei_pagecache_lock);
+
+       ret = file_modified(file);
+       if (ret)
+               goto err;
+
        if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
                ret = bchfs_fallocate(inode, mode, offset, len);
        else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
@@ -2771,47 +3116,61 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
                ret = bchfs_fcollapse_finsert(inode, offset, len, false);
        else
                ret = -EOPNOTSUPP;
-
+err:
+       bch2_pagecache_block_put(&inode->ei_pagecache_lock);
+       inode_unlock(&inode->v);
        percpu_ref_put(&c->writes);
 
-       return ret;
+       return bch2_err_class(ret);
 }
 
-static void mark_range_unallocated(struct bch_inode_info *inode,
-                                  loff_t start, loff_t end)
+static int quota_reserve_range(struct bch_inode_info *inode,
+                              struct quota_res *res,
+                              u64 start, u64 end)
 {
-       pgoff_t index = start >> PAGE_SHIFT;
-       pgoff_t end_index = (end - 1) >> PAGE_SHIFT;
-       struct pagevec pvec;
-
-       pagevec_init(&pvec);
+       struct bch_fs *c = inode->v.i_sb->s_fs_info;
+       struct btree_trans trans;
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       u32 snapshot;
+       u64 sectors = end - start;
+       u64 pos = start;
+       int ret;
 
-       do {
-               unsigned nr_pages, i, j;
+       bch2_trans_init(&trans, c, 0, 0);
+retry:
+       bch2_trans_begin(&trans);
 
-               nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
-                                               &index, end_index);
-               if (nr_pages == 0)
-                       break;
+       ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
+       if (ret)
+               goto err;
 
-               for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
-                       struct bch_page_state *s;
+       bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
+                            SPOS(inode->v.i_ino, pos, snapshot), 0);
+
+       while (!(ret = btree_trans_too_many_iters(&trans)) &&
+              (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
+              !(ret = bkey_err(k))) {
+               if (bkey_extent_is_allocation(k.k)) {
+                       u64 s = min(end, k.k->p.offset) -
+                               max(start, bkey_start_offset(k.k));
+                       BUG_ON(s > sectors);
+                       sectors -= s;
+               }
+               bch2_btree_iter_advance(&iter);
+       }
+       pos = iter.pos.offset;
+       bch2_trans_iter_exit(&trans, &iter);
+err:
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+               goto retry;
 
-                       lock_page(page);
-                       s = bch2_page_state(page);
+       bch2_trans_exit(&trans);
 
-                       if (s) {
-                               spin_lock(&s->lock);
-                               for (j = 0; j < PAGE_SECTORS; j++)
-                                       s->s[j].nr_replicas = 0;
-                               spin_unlock(&s->lock);
-                       }
+       if (ret)
+               return ret;
 
-                       unlock_page(page);
-               }
-               pagevec_release(&pvec);
-       } while (index <= end_index);
+       return bch2_quota_reservation_add(c, inode, res, sectors, true);
 }
 
 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
@@ -2821,13 +3180,11 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
        struct bch_inode_info *src = file_bch_inode(file_src);
        struct bch_inode_info *dst = file_bch_inode(file_dst);
        struct bch_fs *c = src->v.i_sb->s_fs_info;
+       struct quota_res quota_res = { 0 };
        s64 i_sectors_delta = 0;
        u64 aligned_len;
        loff_t ret = 0;
 
-       if (!c->opts.reflink)
-               return -EOPNOTSUPP;
-
        if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
                return -EINVAL;
 
@@ -2844,8 +3201,6 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
 
        bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
 
-       file_update_time(file_dst);
-
        inode_dio_wait(&src->v);
        inode_dio_wait(&dst->v);
 
@@ -2862,13 +3217,20 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
        if (ret)
                goto err;
 
-       mark_range_unallocated(src, pos_src, pos_src + aligned_len);
+       ret = quota_reserve_range(dst, &quota_res, pos_dst >> 9,
+                                 (pos_dst + aligned_len) >> 9);
+       if (ret)
+               goto err;
+
+       file_update_time(file_dst);
+
+       mark_pagecache_unallocated(src, pos_src >> 9,
+                                  (pos_src + aligned_len) >> 9);
 
        ret = bch2_remap_range(c,
-                              POS(dst->v.i_ino, pos_dst >> 9),
-                              POS(src->v.i_ino, pos_src >> 9),
+                              inode_inum(dst), pos_dst >> 9,
+                              inode_inum(src), pos_src >> 9,
                               aligned_len >> 9,
-                              &dst->ei_journal_seq,
                               pos_dst + len, &i_sectors_delta);
        if (ret < 0)
                goto err;
@@ -2878,17 +3240,21 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
         */
        ret = min((u64) ret << 9, (u64) len);
 
-       /* XXX get a quota reservation */
-       i_sectors_acct(c, dst, NULL, i_sectors_delta);
+       i_sectors_acct(c, dst, &quota_res, i_sectors_delta);
 
        spin_lock(&dst->v.i_lock);
        if (pos_dst + ret > dst->v.i_size)
                i_size_write(&dst->v, pos_dst + ret);
        spin_unlock(&dst->v.i_lock);
+
+       if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
+           IS_SYNC(file_inode(file_dst)))
+               ret = bch2_flush_inode(c, inode_inum(dst));
 err:
+       bch2_quota_reservation_put(c, dst, &quota_res);
        bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
 
-       return ret;
+       return bch2_err_class(ret);
 }
 
 /* fseek: */
@@ -2910,36 +3276,40 @@ static loff_t bch2_seek_pagecache_data(struct inode *vinode,
                                       loff_t start_offset,
                                       loff_t end_offset)
 {
-       struct address_space *mapping = vinode->i_mapping;
-       struct page *page;
+       struct folio_batch fbatch;
        pgoff_t start_index     = start_offset >> PAGE_SHIFT;
        pgoff_t end_index       = end_offset >> PAGE_SHIFT;
        pgoff_t index           = start_index;
+       unsigned i;
        loff_t ret;
        int offset;
 
-       while (index <= end_index) {
-               if (find_get_pages_range(mapping, &index, end_index, 1, &page)) {
-                       lock_page(page);
+       folio_batch_init(&fbatch);
+
+       while (filemap_get_folios(vinode->i_mapping,
+                                 &index, end_index, &fbatch)) {
+               for (i = 0; i < folio_batch_count(&fbatch); i++) {
+                       struct folio *folio = fbatch.folios[i];
+
+                       folio_lock(folio);
 
-                       offset = page_data_offset(page,
-                                       page->index == start_index
+                       offset = page_data_offset(&folio->page,
+                                       folio->index == start_index
                                        ? start_offset & (PAGE_SIZE - 1)
                                        : 0);
                        if (offset >= 0) {
-                               ret = clamp(((loff_t) page->index << PAGE_SHIFT) +
+                               ret = clamp(((loff_t) folio->index << PAGE_SHIFT) +
                                            offset,
                                            start_offset, end_offset);
-                               unlock_page(page);
-                               put_page(page);
+                               folio_unlock(folio);
+                               folio_batch_release(&fbatch);
                                return ret;
                        }
 
-                       unlock_page(page);
-                       put_page(page);
-               } else {
-                       break;
+                       folio_unlock(folio);
                }
+               folio_batch_release(&fbatch);
+               cond_resched();
        }
 
        return end_offset;
@@ -2950,9 +3320,11 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
        struct bch_inode_info *inode = file_bch_inode(file);
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
        struct btree_trans trans;
-       struct btree_iter *iter;
+       struct btree_iter iter;
        struct bkey_s_c k;
+       subvol_inum inum = inode_inum(inode);
        u64 isize, next_data = MAX_LFS_FILESIZE;
+       u32 snapshot;
        int ret;
 
        isize = i_size_read(&inode->v);
@@ -2960,9 +3332,15 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
                return -ENXIO;
 
        bch2_trans_init(&trans, c, 0, 0);
+retry:
+       bch2_trans_begin(&trans);
+
+       ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
+       if (ret)
+               goto err;
 
-       for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
-                          POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
+       for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
+                          SPOS(inode->v.i_ino, offset >> 9, snapshot), 0, k, ret) {
                if (k.k->p.inode != inode->v.i_ino) {
                        break;
                } else if (bkey_extent_is_data(k.k)) {
@@ -2971,8 +3349,12 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
                } else if (k.k->p.offset >> 9 > isize)
                        break;
        }
+       bch2_trans_iter_exit(&trans, &iter);
+err:
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+               goto retry;
 
-       ret = bch2_trans_exit(&trans) ?: ret;
+       bch2_trans_exit(&trans);
        if (ret)
                return ret;
 
@@ -3008,8 +3390,8 @@ static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
        int pg_offset;
        loff_t ret = -1;
 
-       page = find_lock_entry(mapping, index);
-       if (!page || xa_is_value(page))
+       page = find_lock_page(mapping, index);
+       if (!page)
                return offset;
 
        pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
@@ -3045,9 +3427,11 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
        struct bch_inode_info *inode = file_bch_inode(file);
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
        struct btree_trans trans;
-       struct btree_iter *iter;
+       struct btree_iter iter;
        struct bkey_s_c k;
+       subvol_inum inum = inode_inum(inode);
        u64 isize, next_hole = MAX_LFS_FILESIZE;
+       u32 snapshot;
        int ret;
 
        isize = i_size_read(&inode->v);
@@ -3055,9 +3439,15 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
                return -ENXIO;
 
        bch2_trans_init(&trans, c, 0, 0);
+retry:
+       bch2_trans_begin(&trans);
 
-       for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
-                          POS(inode->v.i_ino, offset >> 9),
+       ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
+       if (ret)
+               goto err;
+
+       for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
+                          SPOS(inode->v.i_ino, offset >> 9, snapshot),
                           BTREE_ITER_SLOTS, k, ret) {
                if (k.k->p.inode != inode->v.i_ino) {
                        next_hole = bch2_seek_pagecache_hole(&inode->v,
@@ -3074,8 +3464,12 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
                        offset = max(offset, bkey_start_offset(k.k) << 9);
                }
        }
+       bch2_trans_iter_exit(&trans, &iter);
+err:
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+               goto retry;
 
-       ret = bch2_trans_exit(&trans) ?: ret;
+       bch2_trans_exit(&trans);
        if (ret)
                return ret;
 
@@ -3087,18 +3481,26 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
 
 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
 {
+       loff_t ret;
+
        switch (whence) {
        case SEEK_SET:
        case SEEK_CUR:
        case SEEK_END:
-               return generic_file_llseek(file, offset, whence);
+               ret = generic_file_llseek(file, offset, whence);
+               break;
        case SEEK_DATA:
-               return bch2_seek_data(file, offset);
+               ret = bch2_seek_data(file, offset);
+               break;
        case SEEK_HOLE:
-               return bch2_seek_hole(file, offset);
+               ret = bch2_seek_hole(file, offset);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
        }
 
-       return -EINVAL;
+       return bch2_err_class(ret);
 }
 
 void bch2_fs_fsio_exit(struct bch_fs *c)