int bch2_filemap_get_contig_folios_d(struct address_space *mapping,
loff_t start, u64 end,
- int fgp_flags, gfp_t gfp,
- folios *folios)
+ fgf_t fgp_flags, gfp_t gfp,
+ folios *fs)
{
struct folio *f;
u64 pos = start;
if ((u64) pos >= (u64) start + (1ULL << 20))
fgp_flags &= ~FGP_CREAT;
- ret = darray_make_room_gfp(folios, 1, gfp & GFP_KERNEL);
+ ret = darray_make_room_gfp(fs, 1, gfp & GFP_KERNEL);
if (ret)
break;
if (IS_ERR_OR_NULL(f))
break;
- BUG_ON(folios->nr && folio_pos(f) != pos);
+ BUG_ON(fs->nr && folio_pos(f) != pos);
pos = folio_end_pos(f);
- darray_push(folios, f);
+ darray_push(fs, f);
}
- if (!folios->nr && !ret && (fgp_flags & FGP_CREAT))
+ if (!fs->nr && !ret && (fgp_flags & FGP_CREAT))
ret = -ENOMEM;
- return folios->nr ? 0 : ret;
+ return fs->nr ? 0 : ret;
}
/* pagecache_block must be held */
return ret;
}
+#if 0
+/* Useful for debug tracing: */
static const char * const bch2_folio_sector_states[] = {
#define x(n) #n,
BCH_FOLIO_SECTOR_STATE()
#undef x
NULL
};
+#endif
static inline enum bch_folio_sector_state
folio_sector_dirty(enum bch_folio_sector_state state)
* extents btree:
*/
int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
- struct folio **folios, unsigned nr_folios)
+ struct folio **fs, unsigned nr_folios)
{
- struct btree_trans trans;
+ struct btree_trans *trans;
struct btree_iter iter;
struct bkey_s_c k;
struct bch_folio *s;
- u64 offset = folio_sector(folios[0]);
+ u64 offset = folio_sector(fs[0]);
unsigned folio_idx;
u32 snapshot;
bool need_set = false;
int ret;
for (folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
- s = bch2_folio_create(folios[folio_idx], GFP_KERNEL);
+ s = bch2_folio_create(fs[folio_idx], GFP_KERNEL);
if (!s)
return -ENOMEM;
return 0;
folio_idx = 0;
- bch2_trans_init(&trans, c, 0, 0);
+ trans = bch2_trans_get(c);
retry:
- bch2_trans_begin(&trans);
+ bch2_trans_begin(trans);
- ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
if (ret)
goto err;
- for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
SPOS(inum.inum, offset, snapshot),
BTREE_ITER_SLOTS, k, ret) {
unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
unsigned state = bkey_to_sector_state(k);
while (folio_idx < nr_folios) {
- struct folio *folio = folios[folio_idx];
+ struct folio *folio = fs[folio_idx];
u64 folio_start = folio_sector(folio);
u64 folio_end = folio_end_sector(folio);
unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) -
}
offset = iter.pos.offset;
- bch2_trans_iter_exit(&trans, &iter);
+ bch2_trans_iter_exit(trans, &iter);
err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
- bch2_trans_exit(&trans);
+ bch2_trans_put(trans);
return ret;
}
}
}
-void bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
- u64 start, u64 end)
+int bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
+ u64 *start, u64 end,
+ bool nonblocking)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- pgoff_t index = start >> PAGE_SECTORS_SHIFT;
+ pgoff_t index = *start >> PAGE_SECTORS_SHIFT;
pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
struct folio_batch fbatch;
s64 i_sectors_delta = 0;
- unsigned i, j;
+ int ret = 0;
- if (end <= start)
- return;
+ if (end <= *start)
+ return 0;
folio_batch_init(&fbatch);
while (filemap_get_folios(inode->v.i_mapping,
&index, end_index, &fbatch)) {
- for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ for (unsigned i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i];
+
+ if (!nonblocking)
+ folio_lock(folio);
+ else if (!folio_trylock(folio)) {
+ folio_batch_release(&fbatch);
+ ret = -EAGAIN;
+ break;
+ }
+
u64 folio_start = folio_sector(folio);
u64 folio_end = folio_end_sector(folio);
- unsigned folio_offset = max(start, folio_start) - folio_start;
- unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
- struct bch_folio *s;
BUG_ON(end <= folio_start);
- folio_lock(folio);
- s = bch2_folio(folio);
+ *start = min(end, folio_end);
+ struct bch_folio *s = bch2_folio(folio);
if (s) {
+ unsigned folio_offset = max(*start, folio_start) - folio_start;
+ unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
+
spin_lock(&s->lock);
- for (j = folio_offset; j < folio_offset + folio_len; j++) {
+ for (unsigned j = folio_offset; j < folio_offset + folio_len; j++) {
i_sectors_delta -= s->s[j].state == SECTOR_dirty;
bch2_folio_sector_set(folio, s, j,
folio_sector_reserve(s->s[j].state));
}
bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
+ return ret;
}
static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
return end_offset;
}
+/*
+ * Search for a hole in a folio.
+ *
+ * The filemap layer returns -ENOENT if no folio exists, so reuse the same error
+ * code to indicate a pagecache hole exists at the returned offset. Otherwise
+ * return 0 if the folio is filled with data, or an error code. This function
+ * can return -EAGAIN if nonblock is specified.
+ */
static int folio_hole_offset(struct address_space *mapping, loff_t *offset,
unsigned min_replicas, bool nonblock)
{
struct folio *folio;
struct bch_folio *s;
unsigned i, sectors;
- bool ret = true;
+ int ret = -ENOENT;
folio = __filemap_get_folio(mapping, *offset >> PAGE_SHIFT,
FGP_LOCK|(nonblock ? FGP_NOWAIT : 0), 0);
- if (folio == ERR_PTR(-EAGAIN))
- return -EAGAIN;
- if (IS_ERR_OR_NULL(folio))
- return true;
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
s = bch2_folio(folio);
if (!s)
}
*offset = folio_end_pos(folio);
- ret = false;
+ ret = 0;
unlock:
folio_unlock(folio);
folio_put(folio);
{
struct address_space *mapping = vinode->i_mapping;
loff_t offset = start_offset;
+ loff_t ret = 0;
- while (offset < end_offset &&
- !folio_hole_offset(mapping, &offset, min_replicas, nonblock))
- ;
+ while (!ret && offset < end_offset)
+ ret = folio_hole_offset(mapping, &offset, min_replicas, nonblock);
+ if (ret && ret != -ENOENT)
+ return ret;
return min(offset, end_offset);
}