#include "keylist.h"
#include "quota.h"
#include "reflink.h"
+#include "trace.h"
#include <linux/aio.h>
#include <linux/backing-dev.h>
#include <linux/uio.h>
#include <linux/writeback.h>
-#include <trace/events/bcachefs.h>
#include <trace/events/writeback.h>
+static void bch2_clamp_data_hole(struct inode *, u64 *, u64 *, unsigned);
+
+struct folio_vec {
+ struct folio *fv_folio;
+ size_t fv_offset;
+ size_t fv_len;
+};
+
+static inline struct folio_vec biovec_to_foliovec(struct bio_vec bv)
+{
+
+ struct folio *folio = page_folio(bv.bv_page);
+ size_t offset = (folio_page_idx(folio, bv.bv_page) << PAGE_SHIFT) +
+ bv.bv_offset;
+ size_t len = min_t(size_t, folio_size(folio) - offset, bv.bv_len);
+
+ return (struct folio_vec) {
+ .fv_folio = folio,
+ .fv_offset = offset,
+ .fv_len = len,
+ };
+}
+
+static inline struct folio_vec bio_iter_iovec_folio(struct bio *bio,
+ struct bvec_iter iter)
+{
+ return biovec_to_foliovec(bio_iter_iovec(bio, iter));
+}
+
+#define __bio_for_each_folio(bvl, bio, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = bio_iter_iovec_folio((bio), (iter))), 1); \
+ bio_advance_iter_single((bio), &(iter), (bvl).fv_len))
+
+/**
+ * bio_for_each_folio - iterate over folios within a bio
+ *
+ * Like other non-_all versions, this iterates over what bio->bi_iter currently
+ * points to. This version is for drivers, where the bio may have previously
+ * been split or cloned.
+ */
+#define bio_for_each_folio(bvl, bio, iter) \
+ __bio_for_each_folio(bvl, bio, iter, (bio)->bi_iter)
+
/*
* Use u64 for the end pos and sector helpers because if the folio covers the
* max supported range of the mapping, the start offset of the next folio
break;
f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
- if (!f)
+ if (IS_ERR_OR_NULL(f))
break;
BUG_ON(folios->nr && folio_pos(f) != pos);
{
int ret;
+ if (test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags))
+ return 0;
+
mutex_lock(&inode->ei_quota_lock);
ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
inode->v.i_blocks += sectors;
#ifdef CONFIG_BCACHEFS_QUOTA
- if (quota_res && sectors > 0) {
+ if (quota_res &&
+ !test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags) &&
+ sectors > 0) {
BUG_ON(sectors > quota_res->sectors);
BUG_ON(sectors > inode->ei_quota_reserved);
#undef x
};
-const char * const bch2_folio_sector_states[] = {
+static const char * const bch2_folio_sector_states[] = {
#define x(n) #n,
BCH_FOLIO_SECTOR_STATE()
#undef x
s = kzalloc(sizeof(*s) +
sizeof(struct bch_folio_sector) *
- folio_sectors(folio), GFP_NOFS|gfp);
+ folio_sectors(folio), gfp);
if (!s)
return NULL;
unsigned pg_offset, unsigned pg_len,
unsigned nr_ptrs, unsigned state)
{
- struct bch_folio *s = bch2_folio_create(folio, __GFP_NOFAIL);
+ struct bch_folio *s = bch2_folio(folio);
unsigned i, sectors = folio_sectors(folio);
BUG_ON(pg_offset >= sectors);
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
+ struct bch_folio *s;
u64 offset = folio_sector(folios[0]);
- unsigned folio_idx = 0;
+ unsigned folio_idx;
u32 snapshot;
+ bool need_set = false;
int ret;
+ for (folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
+ s = bch2_folio_create(folios[folio_idx], GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ need_set |= !s->uptodate;
+ }
+
+ if (!need_set)
+ return 0;
+
+ folio_idx = 0;
bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
BUG_ON(k.k->p.offset < folio_start);
BUG_ON(bkey_start_offset(k.k) > folio_end);
- if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate)
+ if (!bch2_folio(folio)->uptodate)
__bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
if (k.k->p.offset < folio_end)
struct address_space *mapping = file->f_mapping;
struct address_space *fdm = faults_disabled_mapping();
struct bch_inode_info *inode = file_bch_inode(file);
- int ret;
+ vm_fault_t ret;
if (fdm == mapping)
return VM_FAULT_SIGBUS;
struct bch2_folio_reservation res;
unsigned len;
loff_t isize;
- int ret;
+ vm_fault_t ret;
bch2_folio_reservation_init(c, inode, &res);
len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio));
- if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate) {
- if (bch2_folio_set(c, inode_inum(inode), &folio, 1)) {
- folio_unlock(folio);
- ret = VM_FAULT_SIGBUS;
- goto out;
- }
- }
-
- if (bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
+ if (bch2_folio_set(c, inode_inum(inode), &folio, 1) ?:
+ bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
folio_unlock(folio);
ret = VM_FAULT_SIGBUS;
goto out;
static void bch2_readpages_end_io(struct bio *bio)
{
- struct bvec_iter_all iter;
- struct folio_vec fv;
+ struct folio_iter fi;
- bio_for_each_folio_all(fv, bio, iter) {
+ bio_for_each_folio_all(fi, bio) {
if (!bio->bi_status) {
- folio_mark_uptodate(fv.fv_folio);
+ folio_mark_uptodate(fi.folio);
} else {
- folio_clear_uptodate(fv.fv_folio);
- folio_set_error(fv.fv_folio);
+ folio_clear_uptodate(fi.folio);
+ folio_set_error(fi.folio);
}
- folio_unlock(fv.fv_folio);
+ folio_unlock(fi.folio);
}
bio_put(bio);
darray_for_each(iter->folios, fi) {
ractl->_nr_pages -= 1U << folio_order(*fi);
- __bch2_folio_create(*fi, __GFP_NOFAIL);
+ __bch2_folio_create(*fi, __GFP_NOFAIL|GFP_KERNEL);
folio_put(*fi);
folio_put(*fi);
}
return false;
}
-static void readpage_bio_extend(struct readpages_iter *iter,
- struct bio *bio,
- unsigned sectors_this_extent,
- bool get_more)
+static int readpage_bio_extend(struct btree_trans *trans,
+ struct readpages_iter *iter,
+ struct bio *bio,
+ unsigned sectors_this_extent,
+ bool get_more)
{
+ /* Don't hold btree locks while allocating memory: */
+ bch2_trans_unlock(trans);
+
while (bio_sectors(bio) < sectors_this_extent &&
bio->bi_vcnt < bio->bi_max_vecs) {
struct folio *folio = readpage_iter_peek(iter);
if (!folio)
break;
- if (!__bch2_folio_create(folio, 0)) {
+ if (!__bch2_folio_create(folio, GFP_KERNEL)) {
folio_put(folio);
break;
}
- ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_NOFS);
+ ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_KERNEL);
if (ret) {
__bch2_folio_release(folio);
folio_put(folio);
BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
}
+
+ return bch2_trans_relock(trans);
}
static void bchfs_read(struct btree_trans *trans,
sectors = min(sectors, k.k->size - offset_into_extent);
- if (readpages_iter)
- readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
- extent_partial_reads_expensive(k));
+ if (readpages_iter) {
+ ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
+ extent_partial_reads_expensive(k));
+ if (ret)
+ break;
+ }
bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
swap(rbio->bio.bi_iter.bi_size, bytes);
BIO_MAX_VECS);
struct bch_read_bio *rbio =
rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
- GFP_NOFS, &c->bio_read),
+ GFP_KERNEL, &c->bio_read),
opts);
readpage_iter_advance(&readpages_iter);
bchfs_read(&trans, rbio, inode_inum(inode),
&readpages_iter);
+ bch2_trans_unlock(&trans);
}
bch2_pagecache_add_put(inode);
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
- rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
+ rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
opts);
rbio->bio.bi_private = &done;
rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
container_of(op, struct bch_writepage_io, op);
struct bch_fs *c = io->op.c;
struct bio *bio = &io->op.wbio.bio;
- struct bvec_iter_all iter;
- struct folio_vec fv;
+ struct folio_iter fi;
unsigned i;
if (io->op.error) {
set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
- bio_for_each_folio_all(fv, bio, iter) {
+ bio_for_each_folio_all(fi, bio) {
struct bch_folio *s;
- folio_set_error(fv.fv_folio);
- mapping_set_error(fv.fv_folio->mapping, -EIO);
+ folio_set_error(fi.folio);
+ mapping_set_error(fi.folio->mapping, -EIO);
- s = __bch2_folio(fv.fv_folio);
+ s = __bch2_folio(fi.folio);
spin_lock(&s->lock);
- for (i = 0; i < folio_sectors(fv.fv_folio); i++)
+ for (i = 0; i < folio_sectors(fi.folio); i++)
s->s[i].nr_replicas = 0;
spin_unlock(&s->lock);
}
}
if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
- bio_for_each_folio_all(fv, bio, iter) {
+ bio_for_each_folio_all(fi, bio) {
struct bch_folio *s;
- s = __bch2_folio(fv.fv_folio);
+ s = __bch2_folio(fi.folio);
spin_lock(&s->lock);
- for (i = 0; i < folio_sectors(fv.fv_folio); i++)
+ for (i = 0; i < folio_sectors(fi.folio); i++)
s->s[i].nr_replicas = 0;
spin_unlock(&s->lock);
}
*/
i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
- bio_for_each_folio_all(fv, bio, iter) {
- struct bch_folio *s = __bch2_folio(fv.fv_folio);
+ bio_for_each_folio_all(fi, bio) {
+ struct bch_folio *s = __bch2_folio(fi.folio);
if (atomic_dec_and_test(&s->write_count))
- folio_end_writeback(fv.fv_folio);
+ folio_end_writeback(fi.folio);
}
bio_put(&io->op.wbio.bio);
w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
REQ_OP_WRITE,
- GFP_NOFS,
+ GFP_KERNEL,
&c->writepage_bioset),
struct bch_writepage_io, op.wbio.bio);
op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
}
-static int __bch2_writepage(struct page *_page,
+static int __bch2_writepage(struct folio *folio,
struct writeback_control *wbc,
void *data)
{
- struct folio *folio = page_folio(_page);
struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_writepage_state *w = data;
folio_size(folio));
do_io:
f_sectors = folio_sectors(folio);
- s = bch2_folio_create(folio, __GFP_NOFAIL);
+ s = bch2_folio(folio);
if (f_sectors > w->tmp_sectors) {
kfree(w->tmp);
folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
mapping_gfp_mask(mapping));
- if (!folio)
+ if (IS_ERR_OR_NULL(folio))
goto err_unlock;
if (folio_test_uptodate(folio))
if (ret)
goto err;
out:
- if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate) {
- ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
- if (ret)
- goto err;
- }
+ ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
+ if (ret)
+ goto err;
ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
if (ret) {
}
}
+ ret = bch2_folio_set(c, inode_inum(inode), folios.data, folios.nr);
+ if (ret)
+ goto out;
+
f_pos = pos;
f_offset = pos - folio_pos(darray_first(folios));
darray_for_each(folios, fi) {
struct folio *f = *fi;
u64 f_len = min(end, folio_end_pos(f)) - f_pos;
- if (!bch2_folio_create(f, __GFP_NOFAIL)->uptodate) {
- ret = bch2_folio_set(c, inode_inum(inode), fi,
- folios.data + folios.nr - fi);
- if (ret)
- goto out;
- }
-
/*
* XXX: per POSIX and fstests generic/275, on -ENOSPC we're
* supposed to write as much as we have disk space for.
darray_for_each(folios, fi) {
struct folio *f = *fi;
u64 f_len = min(end, folio_end_pos(f)) - f_pos;
- unsigned f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
+ unsigned f_copied = copy_page_from_iter_atomic(&f->page, f_offset, f_len, iter);
if (!f_copied) {
folios_trunc(&folios, fi);
static void bch2_dio_write_loop_async(struct bch_write_op *);
static __always_inline long bch2_dio_write_done(struct dio_write *dio);
+/*
+ * We're going to return -EIOCBQUEUED, but we haven't finished consuming the
+ * iov_iter yet, so we need to stash a copy of the iovec: it might be on the
+ * caller's stack, we're not guaranteed that it will live for the duration of
+ * the IO:
+ */
static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
{
struct iovec *iov = dio->inline_vecs;
+ /*
+ * iov_iter has a single embedded iovec - nothing to do:
+ */
+ if (iter_is_ubuf(&dio->iter))
+ return 0;
+
+ /*
+ * We don't currently handle non-iovec iov_iters here - return an error,
+ * and we'll fall back to doing the IO synchronously:
+ */
+ if (!iter_is_iovec(&dio->iter))
+ return -1;
+
if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
GFP_KERNEL);
dio->free_iov = true;
}
- memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
- dio->iter.iov = iov;
+ memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
+ dio->iter.__iov = iov;
return 0;
}
bch2_pagecache_block_put(inode);
if (dio->free_iov)
- kfree(dio->iter.iov);
+ kfree(dio->iter.__iov);
ret = dio->op.error ?: ((long) dio->written << 9);
bio_put(&dio->op.wbio.bio);
mutex_unlock(&inode->ei_quota_lock);
}
- if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF))) {
- struct bvec_iter_all iter;
- struct folio_vec fv;
-
- bio_for_each_folio_all(fv, bio, iter)
- folio_put(fv.fv_folio);
- }
+ bio_release_pages(bio, false);
if (unlikely(dio->op.error))
set_bit(EI_INODE_ERROR, &inode->ei_flags);
err:
dio->op.error = ret;
- if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
- struct bvec_iter_all iter;
- struct folio_vec fv;
-
- bio_for_each_folio_all(fv, bio, iter)
- folio_put(fv.fv_folio);
- }
+ bio_release_pages(bio, false);
bch2_quota_reservation_put(c, inode, &dio->quota_res);
goto out;
u64 end_pos;
folio = filemap_lock_folio(mapping, index);
- if (!folio) {
+ if (IS_ERR_OR_NULL(folio)) {
/*
* XXX: we're doing two index lookups when we end up reading the
* folio
folio = __filemap_get_folio(mapping, index,
FGP_LOCK|FGP_CREAT, GFP_KERNEL);
- if (unlikely(!folio)) {
+ if (unlikely(IS_ERR_OR_NULL(folio))) {
ret = -ENOMEM;
goto out;
}
goto unlock;
}
- if (!s->uptodate) {
- ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
- if (ret)
- goto unlock;
- }
+ ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
+ if (ret)
+ goto unlock;
for (i = round_up(start_offset, block_bytes(c)) >> 9;
i < round_down(end_offset, block_bytes(c)) >> 9;
return ret;
}
-static int bch2_extend(struct user_namespace *mnt_userns,
+static int bch2_extend(struct mnt_idmap *idmap,
struct bch_inode_info *inode,
struct bch_inode_unpacked *inode_u,
struct iattr *iattr)
truncate_setsize(&inode->v, iattr->ia_size);
- return bch2_setattr_nonsize(mnt_userns, inode, iattr);
+ return bch2_setattr_nonsize(idmap, inode, iattr);
}
static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
return 0;
}
-int bch2_truncate(struct user_namespace *mnt_userns,
+int bch2_truncate(struct mnt_idmap *idmap,
struct bch_inode_info *inode, struct iattr *iattr)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
(u64) inode->v.i_size, inode_u.bi_size);
if (iattr->ia_size > inode->v.i_size) {
- ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
+ ret = bch2_extend(idmap, inode, &inode_u, iattr);
goto err;
}
ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
mutex_unlock(&inode->ei_update_lock);
- ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
+ ret = bch2_setattr_nonsize(idmap, inode, iattr);
err:
bch2_pagecache_block_put(inode);
return bch2_err_class(ret);
struct quota_res quota_res = { 0 };
struct bkey_s_c k;
unsigned sectors;
+ bool is_allocation;
+ u64 hole_start, hole_end;
u32 snapshot;
bch2_trans_begin(&trans);
if ((ret = bkey_err(k)))
goto bkey_err;
+ hole_start = iter.pos.offset;
+ hole_end = bpos_min(k.k->p, end_pos).offset;
+ is_allocation = bkey_extent_is_allocation(k.k);
+
/* already reserved */
if (bkey_extent_is_reservation(k) &&
bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
continue;
}
- /*
- * XXX: for nocow mode, we should promote shared extents to
- * unshared here
- */
+ if (!(mode & FALLOC_FL_ZERO_RANGE)) {
+ ret = drop_locks_do(&trans,
+ (bch2_clamp_data_hole(&inode->v,
+ &hole_start,
+ &hole_end,
+ opts.data_replicas), 0));
+ bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start));
+
+ if (ret)
+ goto bkey_err;
+
+ if (hole_start == hole_end)
+ continue;
+ }
- sectors = bpos_min(k.k->p, end_pos).offset - iter.pos.offset;
+ sectors = hole_end - hole_start;
- if (!bkey_extent_is_allocation(k.k)) {
+ if (!is_allocation) {
ret = bch2_quota_reservation_add(c, inode,
- "a_res,
- sectors, true);
+ "a_res, sectors, true);
if (unlikely(ret))
goto bkey_err;
}
goto bkey_err;
i_sectors_acct(c, inode, "a_res, i_sectors_delta);
+
+ drop_locks_do(&trans,
+ (mark_pagecache_reserved(inode, hole_start, iter.pos.offset), 0));
bkey_err:
bch2_quota_reservation_put(c, inode, "a_res);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
ret = 0;
}
- bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
- mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
-
if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
struct quota_res quota_res = { 0 };
s64 i_sectors_delta = 0;
/* fseek: */
-static int folio_data_offset(struct folio *folio, loff_t pos)
+static int folio_data_offset(struct folio *folio, loff_t pos,
+ unsigned min_replicas)
{
struct bch_folio *s = bch2_folio(folio);
unsigned i, sectors = folio_sectors(folio);
if (s)
for (i = folio_pos_to_s(folio, pos); i < sectors; i++)
- if (s->s[i].state >= SECTOR_dirty)
+ if (s->s[i].state >= SECTOR_dirty &&
+ s->s[i].nr_replicas + s->s[i].replicas_reserved >= min_replicas)
return i << SECTOR_SHIFT;
return -1;
static loff_t bch2_seek_pagecache_data(struct inode *vinode,
loff_t start_offset,
- loff_t end_offset)
+ loff_t end_offset,
+ unsigned min_replicas)
{
struct folio_batch fbatch;
pgoff_t start_index = start_offset >> PAGE_SHIFT;
folio_lock(folio);
offset = folio_data_offset(folio,
- max(folio_pos(folio), start_offset));
+ max(folio_pos(folio), start_offset),
+ min_replicas);
if (offset >= 0) {
ret = clamp(folio_pos(folio) + offset,
start_offset, end_offset);
if (next_data > offset)
next_data = bch2_seek_pagecache_data(&inode->v,
- offset, next_data);
+ offset, next_data, 0);
if (next_data >= isize)
return -ENXIO;
return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
}
-static bool folio_hole_offset(struct address_space *mapping, loff_t *offset)
+static bool folio_hole_offset(struct address_space *mapping, loff_t *offset,
+ unsigned min_replicas)
{
struct folio *folio;
struct bch_folio *s;
bool ret = true;
folio = filemap_lock_folio(mapping, *offset >> PAGE_SHIFT);
- if (!folio)
+ if (IS_ERR_OR_NULL(folio))
return true;
s = bch2_folio(folio);
sectors = folio_sectors(folio);
for (i = folio_pos_to_s(folio, *offset); i < sectors; i++)
- if (s->s[i].state < SECTOR_dirty) {
+ if (s->s[i].state < SECTOR_dirty ||
+ s->s[i].nr_replicas + s->s[i].replicas_reserved < min_replicas) {
*offset = max(*offset,
folio_pos(folio) + (i << SECTOR_SHIFT));
goto unlock;
static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
loff_t start_offset,
- loff_t end_offset)
+ loff_t end_offset,
+ unsigned min_replicas)
{
struct address_space *mapping = vinode->i_mapping;
loff_t offset = start_offset;
while (offset < end_offset &&
- !folio_hole_offset(mapping, &offset))
+ !folio_hole_offset(mapping, &offset, min_replicas))
;
return min(offset, end_offset);
}
+static void bch2_clamp_data_hole(struct inode *inode,
+ u64 *hole_start,
+ u64 *hole_end,
+ unsigned min_replicas)
+{
+ *hole_start = bch2_seek_pagecache_hole(inode,
+ *hole_start << 9, *hole_end << 9, min_replicas) >> 9;
+
+ if (*hole_start == *hole_end)
+ return;
+
+ *hole_end = bch2_seek_pagecache_data(inode,
+ *hole_start << 9, *hole_end << 9, min_replicas) >> 9;
+}
+
static loff_t bch2_seek_hole(struct file *file, u64 offset)
{
struct bch_inode_info *inode = file_bch_inode(file);
BTREE_ITER_SLOTS, k, ret) {
if (k.k->p.inode != inode->v.i_ino) {
next_hole = bch2_seek_pagecache_hole(&inode->v,
- offset, MAX_LFS_FILESIZE);
+ offset, MAX_LFS_FILESIZE, 0);
break;
} else if (!bkey_extent_is_data(k.k)) {
next_hole = bch2_seek_pagecache_hole(&inode->v,
max(offset, bkey_start_offset(k.k) << 9),
- k.k->p.offset << 9);
+ k.k->p.offset << 9, 0);
if (next_hole < k.k->p.offset << 9)
break;
int bch2_fs_fsio_init(struct bch_fs *c)
{
- int ret = 0;
-
- pr_verbose_init(c->opts, "");
-
if (bioset_init(&c->writepage_bioset,
4, offsetof(struct bch_writepage_io, op.wbio.bio),
BIOSET_NEED_BVECS))
1, offsetof(struct nocow_flush, bio), 0))
return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
- pr_verbose_init(c->opts, "ret %i", ret);
- return ret;
+ return 0;
}
#endif /* NO_BCACHEFS_FS */