]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/fs-io.c
Update bcachefs sources to ee560a3929 bcachefs: Print version, options earlier in...
[bcachefs-tools-debian] / libbcachefs / fs-io.c
index 43c39c62740307d5b2987fe5efe5f87e9af661f6..6b691b2b52afeb70703dd5387ee670e2c2931580 100644 (file)
@@ -19,6 +19,7 @@
 #include "keylist.h"
 #include "quota.h"
 #include "reflink.h"
+#include "trace.h"
 
 #include <linux/aio.h>
 #include <linux/backing-dev.h>
 #include <linux/uio.h>
 #include <linux/writeback.h>
 
-#include <trace/events/bcachefs.h>
 #include <trace/events/writeback.h>
 
+static void bch2_clamp_data_hole(struct inode *, u64 *, u64 *, unsigned);
+
+struct folio_vec {
+       struct folio    *fv_folio;
+       size_t          fv_offset;
+       size_t          fv_len;
+};
+
+static inline struct folio_vec biovec_to_foliovec(struct bio_vec bv)
+{
+
+       struct folio *folio     = page_folio(bv.bv_page);
+       size_t offset           = (folio_page_idx(folio, bv.bv_page) << PAGE_SHIFT) +
+               bv.bv_offset;
+       size_t len = min_t(size_t, folio_size(folio) - offset, bv.bv_len);
+
+       return (struct folio_vec) {
+               .fv_folio       = folio,
+               .fv_offset      = offset,
+               .fv_len         = len,
+       };
+}
+
+static inline struct folio_vec bio_iter_iovec_folio(struct bio *bio,
+                                                   struct bvec_iter iter)
+{
+       return biovec_to_foliovec(bio_iter_iovec(bio, iter));
+}
+
+#define __bio_for_each_folio(bvl, bio, iter, start)                    \
+       for (iter = (start);                                            \
+            (iter).bi_size &&                                          \
+               ((bvl = bio_iter_iovec_folio((bio), (iter))), 1);       \
+            bio_advance_iter_single((bio), &(iter), (bvl).fv_len))
+
+/**
+ * bio_for_each_folio - iterate over folios within a bio
+ *
+ * Like other non-_all versions, this iterates over what bio->bi_iter currently
+ * points to. This version is for drivers, where the bio may have previously
+ * been split or cloned.
+ */
+#define bio_for_each_folio(bvl, bio, iter)                             \
+       __bio_for_each_folio(bvl, bio, iter, (bio)->bi_iter)
+
 /*
  * Use u64 for the end pos and sector helpers because if the folio covers the
  * max supported range of the mapping, the start offset of the next folio
@@ -81,7 +126,7 @@ static int filemap_get_contig_folios_d(struct address_space *mapping,
                        break;
 
                f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
-               if (!f)
+               if (IS_ERR_OR_NULL(f))
                        break;
 
                BUG_ON(folios->nr && folio_pos(f) != pos);
@@ -290,6 +335,9 @@ static int bch2_quota_reservation_add(struct bch_fs *c,
 {
        int ret;
 
+       if (test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags))
+               return 0;
+
        mutex_lock(&inode->ei_quota_lock);
        ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
                              check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
@@ -371,7 +419,9 @@ static void __i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
        inode->v.i_blocks += sectors;
 
 #ifdef CONFIG_BCACHEFS_QUOTA
-       if (quota_res && sectors > 0) {
+       if (quota_res &&
+           !test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags) &&
+           sectors > 0) {
                BUG_ON(sectors > quota_res->sectors);
                BUG_ON(sectors > inode->ei_quota_reserved);
 
@@ -410,7 +460,7 @@ enum bch_folio_sector_state {
 #undef x
 };
 
-const char * const bch2_folio_sector_states[] = {
+static const char * const bch2_folio_sector_states[] = {
 #define x(n)   #n,
        BCH_FOLIO_SECTOR_STATE()
 #undef x
@@ -526,7 +576,7 @@ static struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp)
 
        s = kzalloc(sizeof(*s) +
                    sizeof(struct bch_folio_sector) *
-                   folio_sectors(folio), GFP_NOFS|gfp);
+                   folio_sectors(folio), gfp);
        if (!s)
                return NULL;
 
@@ -553,7 +603,7 @@ static void __bch2_folio_set(struct folio *folio,
                             unsigned pg_offset, unsigned pg_len,
                             unsigned nr_ptrs, unsigned state)
 {
-       struct bch_folio *s = bch2_folio_create(folio, __GFP_NOFAIL);
+       struct bch_folio *s = bch2_folio(folio);
        unsigned i, sectors = folio_sectors(folio);
 
        BUG_ON(pg_offset >= sectors);
@@ -582,11 +632,25 @@ static int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
        struct btree_trans trans;
        struct btree_iter iter;
        struct bkey_s_c k;
+       struct bch_folio *s;
        u64 offset = folio_sector(folios[0]);
-       unsigned folio_idx = 0;
+       unsigned folio_idx;
        u32 snapshot;
+       bool need_set = false;
        int ret;
 
+       for (folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
+               s = bch2_folio_create(folios[folio_idx], GFP_KERNEL);
+               if (!s)
+                       return -ENOMEM;
+
+               need_set |= !s->uptodate;
+       }
+
+       if (!need_set)
+               return 0;
+
+       folio_idx = 0;
        bch2_trans_init(&trans, c, 0, 0);
 retry:
        bch2_trans_begin(&trans);
@@ -611,7 +675,7 @@ retry:
                        BUG_ON(k.k->p.offset < folio_start);
                        BUG_ON(bkey_start_offset(k.k) > folio_end);
 
-                       if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate)
+                       if (!bch2_folio(folio)->uptodate)
                                __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
 
                        if (k.k->p.offset < folio_end)
@@ -935,7 +999,7 @@ vm_fault_t bch2_page_fault(struct vm_fault *vmf)
        struct address_space *mapping = file->f_mapping;
        struct address_space *fdm = faults_disabled_mapping();
        struct bch_inode_info *inode = file_bch_inode(file);
-       int ret;
+       vm_fault_t ret;
 
        if (fdm == mapping)
                return VM_FAULT_SIGBUS;
@@ -977,7 +1041,7 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
        struct bch2_folio_reservation res;
        unsigned len;
        loff_t isize;
-       int ret;
+       vm_fault_t ret;
 
        bch2_folio_reservation_init(c, inode, &res);
 
@@ -1003,15 +1067,8 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
 
        len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio));
 
-       if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate) {
-               if (bch2_folio_set(c, inode_inum(inode), &folio, 1)) {
-                       folio_unlock(folio);
-                       ret = VM_FAULT_SIGBUS;
-                       goto out;
-               }
-       }
-
-       if (bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
+       if (bch2_folio_set(c, inode_inum(inode), &folio, 1) ?:
+           bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
                folio_unlock(folio);
                ret = VM_FAULT_SIGBUS;
                goto out;
@@ -1050,17 +1107,16 @@ bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
 
 static void bch2_readpages_end_io(struct bio *bio)
 {
-       struct bvec_iter_all iter;
-       struct folio_vec fv;
+       struct folio_iter fi;
 
-       bio_for_each_folio_all(fv, bio, iter) {
+       bio_for_each_folio_all(fi, bio) {
                if (!bio->bi_status) {
-                       folio_mark_uptodate(fv.fv_folio);
+                       folio_mark_uptodate(fi.folio);
                } else {
-                       folio_clear_uptodate(fv.fv_folio);
-                       folio_set_error(fv.fv_folio);
+                       folio_clear_uptodate(fi.folio);
+                       folio_set_error(fi.folio);
                }
-               folio_unlock(fv.fv_folio);
+               folio_unlock(fi.folio);
        }
 
        bio_put(bio);
@@ -1092,7 +1148,7 @@ static int readpages_iter_init(struct readpages_iter *iter,
 
        darray_for_each(iter->folios, fi) {
                ractl->_nr_pages -= 1U << folio_order(*fi);
-               __bch2_folio_create(*fi, __GFP_NOFAIL);
+               __bch2_folio_create(*fi, __GFP_NOFAIL|GFP_KERNEL);
                folio_put(*fi);
                folio_put(*fi);
        }
@@ -1124,11 +1180,15 @@ static bool extent_partial_reads_expensive(struct bkey_s_c k)
        return false;
 }
 
-static void readpage_bio_extend(struct readpages_iter *iter,
-                               struct bio *bio,
-                               unsigned sectors_this_extent,
-                               bool get_more)
+static int readpage_bio_extend(struct btree_trans *trans,
+                              struct readpages_iter *iter,
+                              struct bio *bio,
+                              unsigned sectors_this_extent,
+                              bool get_more)
 {
+       /* Don't hold btree locks while allocating memory: */
+       bch2_trans_unlock(trans);
+
        while (bio_sectors(bio) < sectors_this_extent &&
               bio->bi_vcnt < bio->bi_max_vecs) {
                struct folio *folio = readpage_iter_peek(iter);
@@ -1150,12 +1210,12 @@ static void readpage_bio_extend(struct readpages_iter *iter,
                        if (!folio)
                                break;
 
-                       if (!__bch2_folio_create(folio, 0)) {
+                       if (!__bch2_folio_create(folio, GFP_KERNEL)) {
                                folio_put(folio);
                                break;
                        }
 
-                       ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_NOFS);
+                       ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_KERNEL);
                        if (ret) {
                                __bch2_folio_release(folio);
                                folio_put(folio);
@@ -1169,6 +1229,8 @@ static void readpage_bio_extend(struct readpages_iter *iter,
 
                BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
        }
+
+       return bch2_trans_relock(trans);
 }
 
 static void bchfs_read(struct btree_trans *trans,
@@ -1236,9 +1298,12 @@ retry:
 
                sectors = min(sectors, k.k->size - offset_into_extent);
 
-               if (readpages_iter)
-                       readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
-                                           extent_partial_reads_expensive(k));
+               if (readpages_iter) {
+                       ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
+                                                 extent_partial_reads_expensive(k));
+                       if (ret)
+                               break;
+               }
 
                bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
                swap(rbio->bio.bi_iter.bi_size, bytes);
@@ -1305,7 +1370,7 @@ void bch2_readahead(struct readahead_control *ractl)
                                   BIO_MAX_VECS);
                struct bch_read_bio *rbio =
                        rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
-                                                  GFP_NOFS, &c->bio_read),
+                                                  GFP_KERNEL, &c->bio_read),
                                  opts);
 
                readpage_iter_advance(&readpages_iter);
@@ -1316,6 +1381,7 @@ void bch2_readahead(struct readahead_control *ractl)
 
                bchfs_read(&trans, rbio, inode_inum(inode),
                           &readpages_iter);
+               bch2_trans_unlock(&trans);
        }
 
        bch2_pagecache_add_put(inode);
@@ -1357,7 +1423,7 @@ static int bch2_read_single_folio(struct folio *folio,
 
        bch2_inode_opts_get(&opts, c, &inode->ei_inode);
 
-       rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
+       rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
                         opts);
        rbio->bio.bi_private = &done;
        rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
@@ -1408,34 +1474,33 @@ static void bch2_writepage_io_done(struct bch_write_op *op)
                container_of(op, struct bch_writepage_io, op);
        struct bch_fs *c = io->op.c;
        struct bio *bio = &io->op.wbio.bio;
-       struct bvec_iter_all iter;
-       struct folio_vec fv;
+       struct folio_iter fi;
        unsigned i;
 
        if (io->op.error) {
                set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
 
-               bio_for_each_folio_all(fv, bio, iter) {
+               bio_for_each_folio_all(fi, bio) {
                        struct bch_folio *s;
 
-                       folio_set_error(fv.fv_folio);
-                       mapping_set_error(fv.fv_folio->mapping, -EIO);
+                       folio_set_error(fi.folio);
+                       mapping_set_error(fi.folio->mapping, -EIO);
 
-                       s = __bch2_folio(fv.fv_folio);
+                       s = __bch2_folio(fi.folio);
                        spin_lock(&s->lock);
-                       for (i = 0; i < folio_sectors(fv.fv_folio); i++)
+                       for (i = 0; i < folio_sectors(fi.folio); i++)
                                s->s[i].nr_replicas = 0;
                        spin_unlock(&s->lock);
                }
        }
 
        if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
-               bio_for_each_folio_all(fv, bio, iter) {
+               bio_for_each_folio_all(fi, bio) {
                        struct bch_folio *s;
 
-                       s = __bch2_folio(fv.fv_folio);
+                       s = __bch2_folio(fi.folio);
                        spin_lock(&s->lock);
-                       for (i = 0; i < folio_sectors(fv.fv_folio); i++)
+                       for (i = 0; i < folio_sectors(fi.folio); i++)
                                s->s[i].nr_replicas = 0;
                        spin_unlock(&s->lock);
                }
@@ -1460,11 +1525,11 @@ static void bch2_writepage_io_done(struct bch_write_op *op)
         */
        i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
 
-       bio_for_each_folio_all(fv, bio, iter) {
-               struct bch_folio *s = __bch2_folio(fv.fv_folio);
+       bio_for_each_folio_all(fi, bio) {
+               struct bch_folio *s = __bch2_folio(fi.folio);
 
                if (atomic_dec_and_test(&s->write_count))
-                       folio_end_writeback(fv.fv_folio);
+                       folio_end_writeback(fi.folio);
        }
 
        bio_put(&io->op.wbio.bio);
@@ -1493,7 +1558,7 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
 
        w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
                                              REQ_OP_WRITE,
-                                             GFP_NOFS,
+                                             GFP_KERNEL,
                                              &c->writepage_bioset),
                             struct bch_writepage_io, op.wbio.bio);
 
@@ -1512,11 +1577,10 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
        op->wbio.bio.bi_opf     = wbc_to_write_flags(wbc);
 }
 
-static int __bch2_writepage(struct page *_page,
+static int __bch2_writepage(struct folio *folio,
                            struct writeback_control *wbc,
                            void *data)
 {
-       struct folio *folio = page_folio(_page);
        struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
        struct bch_writepage_state *w = data;
@@ -1549,7 +1613,7 @@ static int __bch2_writepage(struct page *_page,
                           folio_size(folio));
 do_io:
        f_sectors = folio_sectors(folio);
-       s = bch2_folio_create(folio, __GFP_NOFAIL);
+       s = bch2_folio(folio);
 
        if (f_sectors > w->tmp_sectors) {
                kfree(w->tmp);
@@ -1702,7 +1766,7 @@ int bch2_write_begin(struct file *file, struct address_space *mapping,
        folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
                                FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
                                mapping_gfp_mask(mapping));
-       if (!folio)
+       if (IS_ERR_OR_NULL(folio))
                goto err_unlock;
 
        if (folio_test_uptodate(folio))
@@ -1731,11 +1795,9 @@ readpage:
        if (ret)
                goto err;
 out:
-       if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate) {
-               ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
-               if (ret)
-                       goto err;
-       }
+       ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
+       if (ret)
+               goto err;
 
        ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
        if (ret) {
@@ -1871,19 +1933,16 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
                }
        }
 
+       ret = bch2_folio_set(c, inode_inum(inode), folios.data, folios.nr);
+       if (ret)
+               goto out;
+
        f_pos = pos;
        f_offset = pos - folio_pos(darray_first(folios));
        darray_for_each(folios, fi) {
                struct folio *f = *fi;
                u64 f_len = min(end, folio_end_pos(f)) - f_pos;
 
-               if (!bch2_folio_create(f, __GFP_NOFAIL)->uptodate) {
-                       ret = bch2_folio_set(c, inode_inum(inode), fi,
-                                            folios.data + folios.nr - fi);
-                       if (ret)
-                               goto out;
-               }
-
                /*
                 * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
                 * supposed to write as much as we have disk space for.
@@ -1915,7 +1974,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
        darray_for_each(folios, fi) {
                struct folio *f = *fi;
                u64 f_len = min(end, folio_end_pos(f)) - f_pos;
-               unsigned f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
+               unsigned f_copied = copy_page_from_iter_atomic(&f->page, f_offset, f_len, iter);
 
                if (!f_copied) {
                        folios_trunc(&folios, fi);
@@ -2305,10 +2364,29 @@ static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
 static void bch2_dio_write_loop_async(struct bch_write_op *);
 static __always_inline long bch2_dio_write_done(struct dio_write *dio);
 
+/*
+ * We're going to return -EIOCBQUEUED, but we haven't finished consuming the
+ * iov_iter yet, so we need to stash a copy of the iovec: it might be on the
+ * caller's stack, we're not guaranteed that it will live for the duration of
+ * the IO:
+ */
 static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
 {
        struct iovec *iov = dio->inline_vecs;
 
+       /*
+        * iov_iter has a single embedded iovec - nothing to do:
+        */
+       if (iter_is_ubuf(&dio->iter))
+               return 0;
+
+       /*
+        * We don't currently handle non-iovec iov_iters here - return an error,
+        * and we'll fall back to doing the IO synchronously:
+        */
+       if (!iter_is_iovec(&dio->iter))
+               return -1;
+
        if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
                iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
                                    GFP_KERNEL);
@@ -2318,8 +2396,8 @@ static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
                dio->free_iov = true;
        }
 
-       memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
-       dio->iter.iov = iov;
+       memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
+       dio->iter.__iov = iov;
        return 0;
 }
 
@@ -2379,7 +2457,7 @@ static __always_inline long bch2_dio_write_done(struct dio_write *dio)
        bch2_pagecache_block_put(inode);
 
        if (dio->free_iov)
-               kfree(dio->iter.iov);
+               kfree(dio->iter.__iov);
 
        ret = dio->op.error ?: ((long) dio->written << 9);
        bio_put(&dio->op.wbio.bio);
@@ -2421,13 +2499,7 @@ static __always_inline void bch2_dio_write_end(struct dio_write *dio)
                mutex_unlock(&inode->ei_quota_lock);
        }
 
-       if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF))) {
-               struct bvec_iter_all iter;
-               struct folio_vec fv;
-
-               bio_for_each_folio_all(fv, bio, iter)
-                       folio_put(fv.fv_folio);
-       }
+       bio_release_pages(bio, false);
 
        if (unlikely(dio->op.error))
                set_bit(EI_INODE_ERROR, &inode->ei_flags);
@@ -2546,13 +2618,7 @@ out:
 err:
        dio->op.error = ret;
 
-       if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
-               struct bvec_iter_all iter;
-               struct folio_vec fv;
-
-               bio_for_each_folio_all(fv, bio, iter)
-                       folio_put(fv.fv_folio);
-       }
+       bio_release_pages(bio, false);
 
        bch2_quota_reservation_put(c, inode, &dio->quota_res);
        goto out;
@@ -2791,7 +2857,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
        u64 end_pos;
 
        folio = filemap_lock_folio(mapping, index);
-       if (!folio) {
+       if (IS_ERR_OR_NULL(folio)) {
                /*
                 * XXX: we're doing two index lookups when we end up reading the
                 * folio
@@ -2804,7 +2870,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
 
                folio = __filemap_get_folio(mapping, index,
                                            FGP_LOCK|FGP_CREAT, GFP_KERNEL);
-               if (unlikely(!folio)) {
+               if (unlikely(IS_ERR_OR_NULL(folio))) {
                        ret = -ENOMEM;
                        goto out;
                }
@@ -2835,11 +2901,9 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
                        goto unlock;
        }
 
-       if (!s->uptodate) {
-               ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
-               if (ret)
-                       goto unlock;
-       }
+       ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
+       if (ret)
+               goto unlock;
 
        for (i = round_up(start_offset, block_bytes(c)) >> 9;
             i < round_down(end_offset, block_bytes(c)) >> 9;
@@ -2912,7 +2976,7 @@ static int bch2_truncate_folios(struct bch_inode_info *inode,
        return ret;
 }
 
-static int bch2_extend(struct user_namespace *mnt_userns,
+static int bch2_extend(struct mnt_idmap *idmap,
                       struct bch_inode_info *inode,
                       struct bch_inode_unpacked *inode_u,
                       struct iattr *iattr)
@@ -2931,7 +2995,7 @@ static int bch2_extend(struct user_namespace *mnt_userns,
 
        truncate_setsize(&inode->v, iattr->ia_size);
 
-       return bch2_setattr_nonsize(mnt_userns, inode, iattr);
+       return bch2_setattr_nonsize(idmap, inode, iattr);
 }
 
 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
@@ -2952,7 +3016,7 @@ static int bch2_truncate_start_fn(struct bch_inode_info *inode,
        return 0;
 }
 
-int bch2_truncate(struct user_namespace *mnt_userns,
+int bch2_truncate(struct mnt_idmap *idmap,
                  struct bch_inode_info *inode, struct iattr *iattr)
 {
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
@@ -2997,7 +3061,7 @@ int bch2_truncate(struct user_namespace *mnt_userns,
                  (u64) inode->v.i_size, inode_u.bi_size);
 
        if (iattr->ia_size > inode->v.i_size) {
-               ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
+               ret = bch2_extend(idmap, inode, &inode_u, iattr);
                goto err;
        }
 
@@ -3055,7 +3119,7 @@ int bch2_truncate(struct user_namespace *mnt_userns,
        ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
        mutex_unlock(&inode->ei_update_lock);
 
-       ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
+       ret = bch2_setattr_nonsize(idmap, inode, iattr);
 err:
        bch2_pagecache_block_put(inode);
        return bch2_err_class(ret);
@@ -3311,6 +3375,8 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
                struct quota_res quota_res = { 0 };
                struct bkey_s_c k;
                unsigned sectors;
+               bool is_allocation;
+               u64 hole_start, hole_end;
                u32 snapshot;
 
                bch2_trans_begin(&trans);
@@ -3326,6 +3392,10 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
                if ((ret = bkey_err(k)))
                        goto bkey_err;
 
+               hole_start      = iter.pos.offset;
+               hole_end        = bpos_min(k.k->p, end_pos).offset;
+               is_allocation   = bkey_extent_is_allocation(k.k);
+
                /* already reserved */
                if (bkey_extent_is_reservation(k) &&
                    bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
@@ -3339,17 +3409,26 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
                        continue;
                }
 
-               /*
-                * XXX: for nocow mode, we should promote shared extents to
-                * unshared here
-                */
+               if (!(mode & FALLOC_FL_ZERO_RANGE)) {
+                       ret = drop_locks_do(&trans,
+                               (bch2_clamp_data_hole(&inode->v,
+                                                     &hole_start,
+                                                     &hole_end,
+                                                     opts.data_replicas), 0));
+                       bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start));
+
+                       if (ret)
+                               goto bkey_err;
+
+                       if (hole_start == hole_end)
+                               continue;
+               }
 
-               sectors = bpos_min(k.k->p, end_pos).offset - iter.pos.offset;
+               sectors = hole_end - hole_start;
 
-               if (!bkey_extent_is_allocation(k.k)) {
+               if (!is_allocation) {
                        ret = bch2_quota_reservation_add(c, inode,
-                                       &quota_res,
-                                       sectors, true);
+                                       &quota_res, sectors, true);
                        if (unlikely(ret))
                                goto bkey_err;
                }
@@ -3361,15 +3440,15 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
                        goto bkey_err;
 
                i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
+
+               drop_locks_do(&trans,
+                       (mark_pagecache_reserved(inode, hole_start, iter.pos.offset), 0));
 bkey_err:
                bch2_quota_reservation_put(c, inode, &quota_res);
                if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
                        ret = 0;
        }
 
-       bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
-       mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
-
        if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
                struct quota_res quota_res = { 0 };
                s64 i_sectors_delta = 0;
@@ -3617,14 +3696,16 @@ err:
 
 /* fseek: */
 
-static int folio_data_offset(struct folio *folio, loff_t pos)
+static int folio_data_offset(struct folio *folio, loff_t pos,
+                            unsigned min_replicas)
 {
        struct bch_folio *s = bch2_folio(folio);
        unsigned i, sectors = folio_sectors(folio);
 
        if (s)
                for (i = folio_pos_to_s(folio, pos); i < sectors; i++)
-                       if (s->s[i].state >= SECTOR_dirty)
+                       if (s->s[i].state >= SECTOR_dirty &&
+                           s->s[i].nr_replicas + s->s[i].replicas_reserved >= min_replicas)
                                return i << SECTOR_SHIFT;
 
        return -1;
@@ -3632,7 +3713,8 @@ static int folio_data_offset(struct folio *folio, loff_t pos)
 
 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
                                       loff_t start_offset,
-                                      loff_t end_offset)
+                                      loff_t end_offset,
+                                      unsigned min_replicas)
 {
        struct folio_batch fbatch;
        pgoff_t start_index     = start_offset >> PAGE_SHIFT;
@@ -3651,7 +3733,8 @@ static loff_t bch2_seek_pagecache_data(struct inode *vinode,
 
                        folio_lock(folio);
                        offset = folio_data_offset(folio,
-                                       max(folio_pos(folio), start_offset));
+                                       max(folio_pos(folio), start_offset),
+                                       min_replicas);
                        if (offset >= 0) {
                                ret = clamp(folio_pos(folio) + offset,
                                            start_offset, end_offset);
@@ -3713,7 +3796,7 @@ err:
 
        if (next_data > offset)
                next_data = bch2_seek_pagecache_data(&inode->v,
-                                                    offset, next_data);
+                                                    offset, next_data, 0);
 
        if (next_data >= isize)
                return -ENXIO;
@@ -3721,7 +3804,8 @@ err:
        return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
 }
 
-static bool folio_hole_offset(struct address_space *mapping, loff_t *offset)
+static bool folio_hole_offset(struct address_space *mapping, loff_t *offset,
+                             unsigned min_replicas)
 {
        struct folio *folio;
        struct bch_folio *s;
@@ -3729,7 +3813,7 @@ static bool folio_hole_offset(struct address_space *mapping, loff_t *offset)
        bool ret = true;
 
        folio = filemap_lock_folio(mapping, *offset >> PAGE_SHIFT);
-       if (!folio)
+       if (IS_ERR_OR_NULL(folio))
                return true;
 
        s = bch2_folio(folio);
@@ -3738,7 +3822,8 @@ static bool folio_hole_offset(struct address_space *mapping, loff_t *offset)
 
        sectors = folio_sectors(folio);
        for (i = folio_pos_to_s(folio, *offset); i < sectors; i++)
-               if (s->s[i].state < SECTOR_dirty) {
+               if (s->s[i].state < SECTOR_dirty ||
+                   s->s[i].nr_replicas + s->s[i].replicas_reserved < min_replicas) {
                        *offset = max(*offset,
                                      folio_pos(folio) + (i << SECTOR_SHIFT));
                        goto unlock;
@@ -3753,18 +3838,34 @@ unlock:
 
 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
                                       loff_t start_offset,
-                                      loff_t end_offset)
+                                      loff_t end_offset,
+                                      unsigned min_replicas)
 {
        struct address_space *mapping = vinode->i_mapping;
        loff_t offset = start_offset;
 
        while (offset < end_offset &&
-              !folio_hole_offset(mapping, &offset))
+              !folio_hole_offset(mapping, &offset, min_replicas))
                ;
 
        return min(offset, end_offset);
 }
 
+static void bch2_clamp_data_hole(struct inode *inode,
+                                u64 *hole_start,
+                                u64 *hole_end,
+                                unsigned min_replicas)
+{
+       *hole_start = bch2_seek_pagecache_hole(inode,
+               *hole_start << 9, *hole_end << 9, min_replicas) >> 9;
+
+       if (*hole_start == *hole_end)
+               return;
+
+       *hole_end = bch2_seek_pagecache_data(inode,
+               *hole_start << 9, *hole_end << 9, min_replicas) >> 9;
+}
+
 static loff_t bch2_seek_hole(struct file *file, u64 offset)
 {
        struct bch_inode_info *inode = file_bch_inode(file);
@@ -3794,12 +3895,12 @@ retry:
                           BTREE_ITER_SLOTS, k, ret) {
                if (k.k->p.inode != inode->v.i_ino) {
                        next_hole = bch2_seek_pagecache_hole(&inode->v,
-                                       offset, MAX_LFS_FILESIZE);
+                                       offset, MAX_LFS_FILESIZE, 0);
                        break;
                } else if (!bkey_extent_is_data(k.k)) {
                        next_hole = bch2_seek_pagecache_hole(&inode->v,
                                        max(offset, bkey_start_offset(k.k) << 9),
-                                       k.k->p.offset << 9);
+                                       k.k->p.offset << 9, 0);
 
                        if (next_hole < k.k->p.offset << 9)
                                break;
@@ -3856,10 +3957,6 @@ void bch2_fs_fsio_exit(struct bch_fs *c)
 
 int bch2_fs_fsio_init(struct bch_fs *c)
 {
-       int ret = 0;
-
-       pr_verbose_init(c->opts, "");
-
        if (bioset_init(&c->writepage_bioset,
                        4, offsetof(struct bch_writepage_io, op.wbio.bio),
                        BIOSET_NEED_BVECS))
@@ -3879,8 +3976,7 @@ int bch2_fs_fsio_init(struct bch_fs *c)
                        1, offsetof(struct nocow_flush, bio), 0))
                return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
 
-       pr_verbose_init(c->opts, "ret %i", ret);
-       return ret;
+       return 0;
 }
 
 #endif /* NO_BCACHEFS_FS */