#include "keylist.h"
#include "quota.h"
#include "reflink.h"
+#include "trace.h"
#include <linux/aio.h>
#include <linux/backing-dev.h>
#include <linux/uio.h>
#include <linux/writeback.h>
-#include <trace/events/bcachefs.h>
#include <trace/events/writeback.h>
+struct folio_vec {
+ struct folio *fv_folio;
+ size_t fv_offset;
+ size_t fv_len;
+};
+
+static inline struct folio_vec biovec_to_foliovec(struct bio_vec bv)
+{
+
+ struct folio *folio = page_folio(bv.bv_page);
+ size_t offset = (folio_page_idx(folio, bv.bv_page) << PAGE_SHIFT) +
+ bv.bv_offset;
+ size_t len = min_t(size_t, folio_size(folio) - offset, bv.bv_len);
+
+ return (struct folio_vec) {
+ .fv_folio = folio,
+ .fv_offset = offset,
+ .fv_len = len,
+ };
+}
+
+static inline struct folio_vec bio_iter_iovec_folio(struct bio *bio,
+ struct bvec_iter iter)
+{
+ return biovec_to_foliovec(bio_iter_iovec(bio, iter));
+}
+
+#define __bio_for_each_folio(bvl, bio, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = bio_iter_iovec_folio((bio), (iter))), 1); \
+ bio_advance_iter_single((bio), &(iter), (bvl).fv_len))
+
+/**
+ * bio_for_each_folio - iterate over folios within a bio
+ *
+ * Like other non-_all versions, this iterates over what bio->bi_iter currently
+ * points to. This version is for drivers, where the bio may have previously
+ * been split or cloned.
+ */
+#define bio_for_each_folio(bvl, bio, iter) \
+ __bio_for_each_folio(bvl, bio, iter, (bio)->bi_iter)
+
+/*
+ * Use u64 for the end pos and sector helpers because if the folio covers the
+ * max supported range of the mapping, the start offset of the next folio
+ * overflows loff_t. This breaks much of the range based processing in the
+ * buffered write path.
+ */
+static inline u64 folio_end_pos(struct folio *folio)
+{
+ return folio_pos(folio) + folio_size(folio);
+}
+
+static inline size_t folio_sectors(struct folio *folio)
+{
+ return PAGE_SECTORS << folio_order(folio);
+}
+
+static inline loff_t folio_sector(struct folio *folio)
+{
+ return folio_pos(folio) >> 9;
+}
+
+static inline u64 folio_end_sector(struct folio *folio)
+{
+ return folio_end_pos(folio) >> 9;
+}
+
+typedef DARRAY(struct folio *) folios;
+
+static int filemap_get_contig_folios_d(struct address_space *mapping,
+ loff_t start, u64 end,
+ int fgp_flags, gfp_t gfp,
+ folios *folios)
+{
+ struct folio *f;
+ u64 pos = start;
+ int ret = 0;
+
+ while (pos < end) {
+ if ((u64) pos >= (u64) start + (1ULL << 20))
+ fgp_flags &= ~FGP_CREAT;
+
+ ret = darray_make_room_gfp(folios, 1, gfp & GFP_KERNEL);
+ if (ret)
+ break;
+
+ f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
+ if (IS_ERR_OR_NULL(f))
+ break;
+
+ BUG_ON(folios->nr && folio_pos(f) != pos);
+
+ pos = folio_end_pos(f);
+ darray_push(folios, f);
+ }
+
+ if (!folios->nr && !ret && (fgp_flags & FGP_CREAT))
+ ret = -ENOMEM;
+
+ return folios->nr ? 0 : ret;
+}
+
struct nocow_flush {
struct closure *cl;
struct bch_dev *ca;
{
int ret;
+ if (test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags))
+ return 0;
+
mutex_lock(&inode->ei_quota_lock);
ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
inode->v.i_blocks += sectors;
#ifdef CONFIG_BCACHEFS_QUOTA
- if (quota_res && sectors > 0) {
+ if (quota_res &&
+ !test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags) &&
+ sectors > 0) {
BUG_ON(sectors > quota_res->sectors);
BUG_ON(sectors > inode->ei_quota_reserved);
/* stored in page->private: */
-struct bch_page_sector {
+#define BCH_FOLIO_SECTOR_STATE() \
+ x(unallocated) \
+ x(reserved) \
+ x(dirty) \
+ x(dirty_reserved) \
+ x(allocated)
+
+enum bch_folio_sector_state {
+#define x(n) SECTOR_##n,
+ BCH_FOLIO_SECTOR_STATE()
+#undef x
+};
+
+static const char * const bch2_folio_sector_states[] = {
+#define x(n) #n,
+ BCH_FOLIO_SECTOR_STATE()
+#undef x
+ NULL
+};
+
+static inline enum bch_folio_sector_state
+folio_sector_dirty(enum bch_folio_sector_state state)
+{
+ switch (state) {
+ case SECTOR_unallocated:
+ return SECTOR_dirty;
+ case SECTOR_reserved:
+ return SECTOR_dirty_reserved;
+ default:
+ return state;
+ }
+}
+
+static inline enum bch_folio_sector_state
+folio_sector_undirty(enum bch_folio_sector_state state)
+{
+ switch (state) {
+ case SECTOR_dirty:
+ return SECTOR_unallocated;
+ case SECTOR_dirty_reserved:
+ return SECTOR_reserved;
+ default:
+ return state;
+ }
+}
+
+static inline enum bch_folio_sector_state
+folio_sector_reserve(enum bch_folio_sector_state state)
+{
+ switch (state) {
+ case SECTOR_unallocated:
+ return SECTOR_reserved;
+ case SECTOR_dirty:
+ return SECTOR_dirty_reserved;
+ default:
+ return state;
+ }
+}
+
+struct bch_folio_sector {
/* Uncompressed, fully allocated replicas (or on disk reservation): */
unsigned nr_replicas:4;
unsigned replicas_reserved:4;
/* i_sectors: */
- enum {
- SECTOR_UNALLOCATED,
- SECTOR_RESERVED,
- SECTOR_DIRTY,
- SECTOR_DIRTY_RESERVED,
- SECTOR_ALLOCATED,
- } state:8;
+ enum bch_folio_sector_state state:8;
};
-struct bch_page_state {
+struct bch_folio {
spinlock_t lock;
atomic_t write_count;
+ /*
+ * Is the sector state up to date with the btree?
+ * (Not the data itself)
+ */
bool uptodate;
- struct bch_page_sector s[PAGE_SECTORS];
+ struct bch_folio_sector s[];
};
-static inline struct bch_page_state *__bch2_page_state(struct page *page)
+static inline void folio_sector_set(struct folio *folio,
+ struct bch_folio *s,
+ unsigned i, unsigned n)
+{
+ s->s[i].state = n;
+}
+
+/* file offset (to folio offset) to bch_folio_sector index */
+static inline int folio_pos_to_s(struct folio *folio, loff_t pos)
{
- return page_has_private(page)
- ? (struct bch_page_state *) page_private(page)
+ u64 f_offset = pos - folio_pos(folio);
+ BUG_ON(pos < folio_pos(folio) || pos >= folio_end_pos(folio));
+ return f_offset >> SECTOR_SHIFT;
+}
+
+static inline struct bch_folio *__bch2_folio(struct folio *folio)
+{
+ return folio_has_private(folio)
+ ? (struct bch_folio *) folio_get_private(folio)
: NULL;
}
-static inline struct bch_page_state *bch2_page_state(struct page *page)
+static inline struct bch_folio *bch2_folio(struct folio *folio)
{
- EBUG_ON(!PageLocked(page));
+ EBUG_ON(!folio_test_locked(folio));
- return __bch2_page_state(page);
+ return __bch2_folio(folio);
}
-/* for newly allocated pages: */
-static void __bch2_page_state_release(struct page *page)
+/* for newly allocated folios: */
+static void __bch2_folio_release(struct folio *folio)
{
- kfree(detach_page_private(page));
+ kfree(folio_detach_private(folio));
}
-static void bch2_page_state_release(struct page *page)
+static void bch2_folio_release(struct folio *folio)
{
- EBUG_ON(!PageLocked(page));
- __bch2_page_state_release(page);
+ EBUG_ON(!folio_test_locked(folio));
+ __bch2_folio_release(folio);
}
-/* for newly allocated pages: */
-static struct bch_page_state *__bch2_page_state_create(struct page *page,
- gfp_t gfp)
+/* for newly allocated folios: */
+static struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp)
{
- struct bch_page_state *s;
+ struct bch_folio *s;
- s = kzalloc(sizeof(*s), GFP_NOFS|gfp);
+ s = kzalloc(sizeof(*s) +
+ sizeof(struct bch_folio_sector) *
+ folio_sectors(folio), gfp);
if (!s)
return NULL;
spin_lock_init(&s->lock);
- attach_page_private(page, s);
+ folio_attach_private(folio, s);
return s;
}
-static struct bch_page_state *bch2_page_state_create(struct page *page,
- gfp_t gfp)
+static struct bch_folio *bch2_folio_create(struct folio *folio, gfp_t gfp)
{
- return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
+ return bch2_folio(folio) ?: __bch2_folio_create(folio, gfp);
}
static unsigned bkey_to_sector_state(struct bkey_s_c k)
{
if (bkey_extent_is_reservation(k))
- return SECTOR_RESERVED;
+ return SECTOR_reserved;
if (bkey_extent_is_allocation(k.k))
- return SECTOR_ALLOCATED;
- return SECTOR_UNALLOCATED;
+ return SECTOR_allocated;
+ return SECTOR_unallocated;
}
-static void __bch2_page_state_set(struct page *page,
- unsigned pg_offset, unsigned pg_len,
- unsigned nr_ptrs, unsigned state)
+static void __bch2_folio_set(struct folio *folio,
+ unsigned pg_offset, unsigned pg_len,
+ unsigned nr_ptrs, unsigned state)
{
- struct bch_page_state *s = bch2_page_state_create(page, __GFP_NOFAIL);
- unsigned i;
+ struct bch_folio *s = bch2_folio(folio);
+ unsigned i, sectors = folio_sectors(folio);
- BUG_ON(pg_offset >= PAGE_SECTORS);
- BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
+ BUG_ON(pg_offset >= sectors);
+ BUG_ON(pg_offset + pg_len > sectors);
spin_lock(&s->lock);
for (i = pg_offset; i < pg_offset + pg_len; i++) {
- s->s[i].nr_replicas = nr_ptrs;
- s->s[i].state = state;
+ s->s[i].nr_replicas = nr_ptrs;
+ folio_sector_set(folio, s, i, state);
}
- if (i == PAGE_SECTORS)
+ if (i == sectors)
s->uptodate = true;
spin_unlock(&s->lock);
}
-static int bch2_page_state_set(struct bch_fs *c, subvol_inum inum,
- struct page **pages, unsigned nr_pages)
+/*
+ * Initialize bch_folio state (allocated/unallocated, nr_replicas) from the
+ * extents btree:
+ */
+static int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
+ struct folio **folios, unsigned nr_folios)
{
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
- u64 offset = pages[0]->index << PAGE_SECTORS_SHIFT;
- unsigned pg_idx = 0;
+ struct bch_folio *s;
+ u64 offset = folio_sector(folios[0]);
+ unsigned folio_idx;
u32 snapshot;
+ bool need_set = false;
int ret;
+ for (folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
+ s = bch2_folio_create(folios[folio_idx], GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ need_set |= !s->uptodate;
+ }
+
+ if (!need_set)
+ return 0;
+
+ folio_idx = 0;
bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
unsigned state = bkey_to_sector_state(k);
- while (pg_idx < nr_pages) {
- struct page *page = pages[pg_idx];
- u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
- u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
- unsigned pg_offset = max(bkey_start_offset(k.k), pg_start) - pg_start;
- unsigned pg_len = min(k.k->p.offset, pg_end) - pg_offset - pg_start;
+ while (folio_idx < nr_folios) {
+ struct folio *folio = folios[folio_idx];
+ u64 folio_start = folio_sector(folio);
+ u64 folio_end = folio_end_sector(folio);
+ unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) - folio_start;
+ unsigned folio_len = min(k.k->p.offset, folio_end) - folio_offset - folio_start;
- BUG_ON(k.k->p.offset < pg_start);
- BUG_ON(bkey_start_offset(k.k) > pg_end);
+ BUG_ON(k.k->p.offset < folio_start);
+ BUG_ON(bkey_start_offset(k.k) > folio_end);
- if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate)
- __bch2_page_state_set(page, pg_offset, pg_len, nr_ptrs, state);
+ if (!bch2_folio(folio)->uptodate)
+ __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
- if (k.k->p.offset < pg_end)
+ if (k.k->p.offset < folio_end)
break;
- pg_idx++;
+ folio_idx++;
}
- if (pg_idx == nr_pages)
+ if (folio_idx == nr_folios)
break;
}
static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
{
struct bvec_iter iter;
- struct bio_vec bv;
+ struct folio_vec fv;
unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
unsigned state = bkey_to_sector_state(k);
- bio_for_each_segment(bv, bio, iter)
- __bch2_page_state_set(bv.bv_page, bv.bv_offset >> 9,
- bv.bv_len >> 9, nr_ptrs, state);
+ bio_for_each_folio(fv, bio, iter)
+ __bch2_folio_set(fv.fv_folio,
+ fv.fv_offset >> 9,
+ fv.fv_len >> 9,
+ nr_ptrs, state);
}
static void mark_pagecache_unallocated(struct bch_inode_info *inode,
&index, end_index, &fbatch)) {
for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i];
- u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
- u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
- unsigned pg_offset = max(start, pg_start) - pg_start;
- unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
- struct bch_page_state *s;
+ u64 folio_start = folio_sector(folio);
+ u64 folio_end = folio_end_sector(folio);
+ unsigned folio_offset = max(start, folio_start) - folio_start;
+ unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
+ struct bch_folio *s;
- BUG_ON(end <= pg_start);
- BUG_ON(pg_offset >= PAGE_SECTORS);
- BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
+ BUG_ON(end <= folio_start);
folio_lock(folio);
- s = bch2_page_state(&folio->page);
+ s = bch2_folio(folio);
if (s) {
spin_lock(&s->lock);
- for (j = pg_offset; j < pg_offset + pg_len; j++)
+ for (j = folio_offset; j < folio_offset + folio_len; j++)
s->s[j].nr_replicas = 0;
spin_unlock(&s->lock);
}
&index, end_index, &fbatch)) {
for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i];
- u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
- u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
- unsigned pg_offset = max(start, pg_start) - pg_start;
- unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
- struct bch_page_state *s;
+ u64 folio_start = folio_sector(folio);
+ u64 folio_end = folio_end_sector(folio);
+ unsigned folio_offset = max(start, folio_start) - folio_start;
+ unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
+ struct bch_folio *s;
- BUG_ON(end <= pg_start);
- BUG_ON(pg_offset >= PAGE_SECTORS);
- BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
+ BUG_ON(end <= folio_start);
folio_lock(folio);
- s = bch2_page_state(&folio->page);
+ s = bch2_folio(folio);
if (s) {
spin_lock(&s->lock);
- for (j = pg_offset; j < pg_offset + pg_len; j++)
- switch (s->s[j].state) {
- case SECTOR_UNALLOCATED:
- s->s[j].state = SECTOR_RESERVED;
- break;
- case SECTOR_DIRTY:
- s->s[j].state = SECTOR_DIRTY_RESERVED;
- i_sectors_delta--;
- break;
- default:
- break;
- }
+ for (j = folio_offset; j < folio_offset + folio_len; j++) {
+ i_sectors_delta -= s->s[j].state == SECTOR_dirty;
+ folio_sector_set(folio, s, j, folio_sector_reserve(s->s[j].state));
+ }
spin_unlock(&s->lock);
}
: c->opts.data_replicas;
}
-static inline unsigned sectors_to_reserve(struct bch_page_sector *s,
- unsigned nr_replicas)
+static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
+ unsigned nr_replicas)
{
return max(0, (int) nr_replicas -
s->nr_replicas -
s->replicas_reserved);
}
-static int bch2_get_page_disk_reservation(struct bch_fs *c,
+static int bch2_get_folio_disk_reservation(struct bch_fs *c,
struct bch_inode_info *inode,
- struct page *page, bool check_enospc)
+ struct folio *folio, bool check_enospc)
{
- struct bch_page_state *s = bch2_page_state_create(page, 0);
+ struct bch_folio *s = bch2_folio_create(folio, 0);
unsigned nr_replicas = inode_nr_replicas(c, inode);
struct disk_reservation disk_res = { 0 };
- unsigned i, disk_res_sectors = 0;
+ unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0;
int ret;
if (!s)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(s->s); i++)
+ for (i = 0; i < sectors; i++)
disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
if (!disk_res_sectors)
if (unlikely(ret))
return ret;
- for (i = 0; i < ARRAY_SIZE(s->s); i++)
+ for (i = 0; i < sectors; i++)
s->s[i].replicas_reserved +=
sectors_to_reserve(&s->s[i], nr_replicas);
return 0;
}
-struct bch2_page_reservation {
+struct bch2_folio_reservation {
struct disk_reservation disk;
struct quota_res quota;
};
-static void bch2_page_reservation_init(struct bch_fs *c,
+static void bch2_folio_reservation_init(struct bch_fs *c,
struct bch_inode_info *inode,
- struct bch2_page_reservation *res)
+ struct bch2_folio_reservation *res)
{
memset(res, 0, sizeof(*res));
res->disk.nr_replicas = inode_nr_replicas(c, inode);
}
-static void bch2_page_reservation_put(struct bch_fs *c,
+static void bch2_folio_reservation_put(struct bch_fs *c,
struct bch_inode_info *inode,
- struct bch2_page_reservation *res)
+ struct bch2_folio_reservation *res)
{
bch2_disk_reservation_put(c, &res->disk);
bch2_quota_reservation_put(c, inode, &res->quota);
}
-static int bch2_page_reservation_get(struct bch_fs *c,
- struct bch_inode_info *inode, struct page *page,
- struct bch2_page_reservation *res,
+static int bch2_folio_reservation_get(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct folio *folio,
+ struct bch2_folio_reservation *res,
unsigned offset, unsigned len)
{
- struct bch_page_state *s = bch2_page_state_create(page, 0);
+ struct bch_folio *s = bch2_folio_create(folio, 0);
unsigned i, disk_sectors = 0, quota_sectors = 0;
int ret;
i++) {
disk_sectors += sectors_to_reserve(&s->s[i],
res->disk.nr_replicas);
- quota_sectors += s->s[i].state == SECTOR_UNALLOCATED;
+ quota_sectors += s->s[i].state == SECTOR_unallocated;
}
if (disk_sectors) {
return 0;
}
-static void bch2_clear_page_bits(struct page *page)
+static void bch2_clear_folio_bits(struct folio *folio)
{
- struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
+ struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_page_state *s = bch2_page_state(page);
+ struct bch_folio *s = bch2_folio(folio);
struct disk_reservation disk_res = { 0 };
- int i, dirty_sectors = 0;
+ int i, sectors = folio_sectors(folio), dirty_sectors = 0;
if (!s)
return;
- EBUG_ON(!PageLocked(page));
- EBUG_ON(PageWriteback(page));
+ EBUG_ON(!folio_test_locked(folio));
+ EBUG_ON(folio_test_writeback(folio));
- for (i = 0; i < ARRAY_SIZE(s->s); i++) {
+ for (i = 0; i < sectors; i++) {
disk_res.sectors += s->s[i].replicas_reserved;
s->s[i].replicas_reserved = 0;
- switch (s->s[i].state) {
- case SECTOR_DIRTY:
- s->s[i].state = SECTOR_UNALLOCATED;
- --dirty_sectors;
- break;
- case SECTOR_DIRTY_RESERVED:
- s->s[i].state = SECTOR_RESERVED;
- break;
- default:
- break;
- }
+ dirty_sectors -= s->s[i].state == SECTOR_dirty;
+ folio_sector_set(folio, s, i, folio_sector_undirty(s->s[i].state));
}
bch2_disk_reservation_put(c, &disk_res);
i_sectors_acct(c, inode, NULL, dirty_sectors);
- bch2_page_state_release(page);
+ bch2_folio_release(folio);
}
-static void bch2_set_page_dirty(struct bch_fs *c,
- struct bch_inode_info *inode, struct page *page,
- struct bch2_page_reservation *res,
+static void bch2_set_folio_dirty(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct folio *folio,
+ struct bch2_folio_reservation *res,
unsigned offset, unsigned len)
{
- struct bch_page_state *s = bch2_page_state(page);
+ struct bch_folio *s = bch2_folio(folio);
unsigned i, dirty_sectors = 0;
- WARN_ON((u64) page_offset(page) + offset + len >
+ WARN_ON((u64) folio_pos(folio) + offset + len >
round_up((u64) i_size_read(&inode->v), block_bytes(c)));
+ BUG_ON(!s->uptodate);
+
spin_lock(&s->lock);
for (i = round_down(offset, block_bytes(c)) >> 9;
s->s[i].replicas_reserved += sectors;
res->disk.sectors -= sectors;
- switch (s->s[i].state) {
- case SECTOR_UNALLOCATED:
- s->s[i].state = SECTOR_DIRTY;
- dirty_sectors++;
- break;
- case SECTOR_RESERVED:
- s->s[i].state = SECTOR_DIRTY_RESERVED;
- break;
- default:
- break;
- }
+ dirty_sectors += s->s[i].state == SECTOR_unallocated;
+
+ folio_sector_set(folio, s, i, folio_sector_dirty(s->s[i].state));
}
spin_unlock(&s->lock);
i_sectors_acct(c, inode, &res->quota, dirty_sectors);
- if (!PageDirty(page))
- __set_page_dirty_nobuffers(page);
+ if (!folio_test_dirty(folio))
+ filemap_dirty_folio(inode->v.i_mapping, folio);
}
vm_fault_t bch2_page_fault(struct vm_fault *vmf)
struct address_space *mapping = file->f_mapping;
struct address_space *fdm = faults_disabled_mapping();
struct bch_inode_info *inode = file_bch_inode(file);
- int ret;
+ vm_fault_t ret;
if (fdm == mapping)
return VM_FAULT_SIGBUS;
vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct file *file = vmf->vma->vm_file;
struct bch_inode_info *inode = file_bch_inode(file);
struct address_space *mapping = file->f_mapping;
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch2_page_reservation res;
+ struct bch2_folio_reservation res;
unsigned len;
loff_t isize;
- int ret;
+ vm_fault_t ret;
- bch2_page_reservation_init(c, inode, &res);
+ bch2_folio_reservation_init(c, inode, &res);
sb_start_pagefault(inode->v.i_sb);
file_update_time(file);
*/
bch2_pagecache_add_get(inode);
- lock_page(page);
+ folio_lock(folio);
isize = i_size_read(&inode->v);
- if (page->mapping != mapping || page_offset(page) >= isize) {
- unlock_page(page);
+ if (folio->mapping != mapping || folio_pos(folio) >= isize) {
+ folio_unlock(folio);
ret = VM_FAULT_NOPAGE;
goto out;
}
- len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
+ len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio));
- if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
- if (bch2_page_state_set(c, inode_inum(inode), &page, 1)) {
- unlock_page(page);
- ret = VM_FAULT_SIGBUS;
- goto out;
- }
- }
-
- if (bch2_page_reservation_get(c, inode, page, &res, 0, len)) {
- unlock_page(page);
+ if (bch2_folio_set(c, inode_inum(inode), &folio, 1) ?:
+ bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
+ folio_unlock(folio);
ret = VM_FAULT_SIGBUS;
goto out;
}
- bch2_set_page_dirty(c, inode, page, &res, 0, len);
- bch2_page_reservation_put(c, inode, &res);
+ bch2_set_folio_dirty(c, inode, folio, &res, 0, len);
+ bch2_folio_reservation_put(c, inode, &res);
- wait_for_stable_page(page);
+ folio_wait_stable(folio);
ret = VM_FAULT_LOCKED;
out:
bch2_pagecache_add_put(inode);
if (offset || length < folio_size(folio))
return;
- bch2_clear_page_bits(&folio->page);
+ bch2_clear_folio_bits(folio);
}
bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
if (folio_test_dirty(folio) || folio_test_writeback(folio))
return false;
- bch2_clear_page_bits(&folio->page);
+ bch2_clear_folio_bits(folio);
return true;
}
static void bch2_readpages_end_io(struct bio *bio)
{
- struct bvec_iter_all iter;
- struct bio_vec *bv;
-
- bio_for_each_segment_all(bv, bio, iter) {
- struct page *page = bv->bv_page;
+ struct folio_iter fi;
+ bio_for_each_folio_all(fi, bio) {
if (!bio->bi_status) {
- SetPageUptodate(page);
+ folio_mark_uptodate(fi.folio);
} else {
- ClearPageUptodate(page);
- SetPageError(page);
+ folio_clear_uptodate(fi.folio);
+ folio_set_error(fi.folio);
}
- unlock_page(page);
+ folio_unlock(fi.folio);
}
bio_put(bio);
struct readpages_iter {
struct address_space *mapping;
- struct page **pages;
- unsigned nr_pages;
unsigned idx;
- pgoff_t offset;
+ folios folios;
};
static int readpages_iter_init(struct readpages_iter *iter,
struct readahead_control *ractl)
{
- unsigned i, nr_pages = readahead_count(ractl);
+ struct folio **fi;
+ int ret;
memset(iter, 0, sizeof(*iter));
- iter->mapping = ractl->mapping;
- iter->offset = readahead_index(ractl);
- iter->nr_pages = nr_pages;
+ iter->mapping = ractl->mapping;
- iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
- if (!iter->pages)
- return -ENOMEM;
+ ret = filemap_get_contig_folios_d(iter->mapping,
+ ractl->_index << PAGE_SHIFT,
+ (ractl->_index + ractl->_nr_pages) << PAGE_SHIFT,
+ 0, mapping_gfp_mask(iter->mapping),
+ &iter->folios);
+ if (ret)
+ return ret;
- nr_pages = __readahead_batch(ractl, iter->pages, nr_pages);
- for (i = 0; i < nr_pages; i++) {
- __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL);
- put_page(iter->pages[i]);
+ darray_for_each(iter->folios, fi) {
+ ractl->_nr_pages -= 1U << folio_order(*fi);
+ __bch2_folio_create(*fi, __GFP_NOFAIL|GFP_KERNEL);
+ folio_put(*fi);
+ folio_put(*fi);
}
return 0;
}
-static inline struct page *readpage_iter_next(struct readpages_iter *iter)
+static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
{
- if (iter->idx >= iter->nr_pages)
+ if (iter->idx >= iter->folios.nr)
return NULL;
+ return iter->folios.data[iter->idx];
+}
- EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
-
- return iter->pages[iter->idx];
+static inline void readpage_iter_advance(struct readpages_iter *iter)
+{
+ iter->idx++;
}
static bool extent_partial_reads_expensive(struct bkey_s_c k)
return false;
}
-static void readpage_bio_extend(struct readpages_iter *iter,
- struct bio *bio,
- unsigned sectors_this_extent,
- bool get_more)
+static int readpage_bio_extend(struct btree_trans *trans,
+ struct readpages_iter *iter,
+ struct bio *bio,
+ unsigned sectors_this_extent,
+ bool get_more)
{
+ /* Don't hold btree locks while allocating memory: */
+ bch2_trans_unlock(trans);
+
while (bio_sectors(bio) < sectors_this_extent &&
bio->bi_vcnt < bio->bi_max_vecs) {
- pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
- struct page *page = readpage_iter_next(iter);
+ struct folio *folio = readpage_iter_peek(iter);
int ret;
- if (page) {
- if (iter->offset + iter->idx != page_offset)
- break;
-
- iter->idx++;
+ if (folio) {
+ readpage_iter_advance(iter);
} else {
+ pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
+
if (!get_more)
break;
- page = xa_load(&iter->mapping->i_pages, page_offset);
- if (page && !xa_is_value(page))
+ folio = xa_load(&iter->mapping->i_pages, folio_offset);
+ if (folio && !xa_is_value(folio))
break;
- page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
- if (!page)
+ folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
+ if (!folio)
break;
- if (!__bch2_page_state_create(page, 0)) {
- put_page(page);
+ if (!__bch2_folio_create(folio, GFP_KERNEL)) {
+ folio_put(folio);
break;
}
- ret = add_to_page_cache_lru(page, iter->mapping,
- page_offset, GFP_NOFS);
+ ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_KERNEL);
if (ret) {
- __bch2_page_state_release(page);
- put_page(page);
+ __bch2_folio_release(folio);
+ folio_put(folio);
break;
}
- put_page(page);
+ folio_put(folio);
}
- BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
+ BUG_ON(folio_sector(folio) != bio_end_sector(bio));
+
+ BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
}
+
+ return bch2_trans_relock(trans);
}
static void bchfs_read(struct btree_trans *trans,
sectors = min(sectors, k.k->size - offset_into_extent);
- if (readpages_iter)
- readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
- extent_partial_reads_expensive(k));
+ if (readpages_iter) {
+ ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
+ extent_partial_reads_expensive(k));
+ if (ret)
+ break;
+ }
bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
swap(rbio->bio.bi_iter.bi_size, bytes);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_io_opts opts;
struct btree_trans trans;
- struct page *page;
+ struct folio *folio;
struct readpages_iter readpages_iter;
int ret;
bch2_pagecache_add_get(inode);
- while ((page = readpage_iter_next(&readpages_iter))) {
- pgoff_t index = readpages_iter.offset + readpages_iter.idx;
+ while ((folio = readpage_iter_peek(&readpages_iter))) {
unsigned n = min_t(unsigned,
- readpages_iter.nr_pages -
+ readpages_iter.folios.nr -
readpages_iter.idx,
BIO_MAX_VECS);
struct bch_read_bio *rbio =
rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
- GFP_NOFS, &c->bio_read),
+ GFP_KERNEL, &c->bio_read),
opts);
- readpages_iter.idx++;
+ readpage_iter_advance(&readpages_iter);
- rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTORS_SHIFT;
+ rbio->bio.bi_iter.bi_sector = folio_sector(folio);
rbio->bio.bi_end_io = bch2_readpages_end_io;
- BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
+ BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
bchfs_read(&trans, rbio, inode_inum(inode),
&readpages_iter);
+ bch2_trans_unlock(&trans);
}
bch2_pagecache_add_put(inode);
bch2_trans_exit(&trans);
- kfree(readpages_iter.pages);
+ darray_exit(&readpages_iter.folios);
}
-static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
- subvol_inum inum, struct page *page)
+static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio,
+ subvol_inum inum, struct folio *folio)
{
struct btree_trans trans;
- bch2_page_state_create(page, __GFP_NOFAIL);
+ bch2_folio_create(folio, __GFP_NOFAIL);
- bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
- rbio->bio.bi_iter.bi_sector =
- (sector_t) page->index << PAGE_SECTORS_SHIFT;
- BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
+ rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
+ rbio->bio.bi_iter.bi_sector = folio_sector(folio);
+ BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
bch2_trans_init(&trans, c, 0, 0);
bchfs_read(&trans, rbio, inum, NULL);
bch2_trans_exit(&trans);
}
-static void bch2_read_single_page_end_io(struct bio *bio)
+static void bch2_read_single_folio_end_io(struct bio *bio)
{
complete(bio->bi_private);
}
-static int bch2_read_single_page(struct page *page,
- struct address_space *mapping)
+static int bch2_read_single_folio(struct folio *folio,
+ struct address_space *mapping)
{
struct bch_inode_info *inode = to_bch_ei(mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
- rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
+ rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
opts);
rbio->bio.bi_private = &done;
- rbio->bio.bi_end_io = bch2_read_single_page_end_io;
+ rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
- __bchfs_readpage(c, rbio, inode_inum(inode), page);
+ __bchfs_readfolio(c, rbio, inode_inum(inode), folio);
wait_for_completion(&done);
ret = blk_status_to_errno(rbio->bio.bi_status);
if (ret < 0)
return ret;
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
return 0;
}
int bch2_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
int ret;
- ret = bch2_read_single_page(page, page->mapping);
+ ret = bch2_read_single_folio(folio, folio->mapping);
folio_unlock(folio);
return bch2_err_class(ret);
}
struct bch_writepage_state {
struct bch_writepage_io *io;
struct bch_io_opts opts;
+ struct bch_folio_sector *tmp;
+ unsigned tmp_sectors;
};
static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
container_of(op, struct bch_writepage_io, op);
struct bch_fs *c = io->op.c;
struct bio *bio = &io->op.wbio.bio;
- struct bvec_iter_all iter;
- struct bio_vec *bvec;
+ struct folio_iter fi;
unsigned i;
if (io->op.error) {
set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
- bio_for_each_segment_all(bvec, bio, iter) {
- struct bch_page_state *s;
+ bio_for_each_folio_all(fi, bio) {
+ struct bch_folio *s;
- SetPageError(bvec->bv_page);
- mapping_set_error(bvec->bv_page->mapping, -EIO);
+ folio_set_error(fi.folio);
+ mapping_set_error(fi.folio->mapping, -EIO);
- s = __bch2_page_state(bvec->bv_page);
+ s = __bch2_folio(fi.folio);
spin_lock(&s->lock);
- for (i = 0; i < PAGE_SECTORS; i++)
+ for (i = 0; i < folio_sectors(fi.folio); i++)
s->s[i].nr_replicas = 0;
spin_unlock(&s->lock);
}
}
if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
- bio_for_each_segment_all(bvec, bio, iter) {
- struct bch_page_state *s;
+ bio_for_each_folio_all(fi, bio) {
+ struct bch_folio *s;
- s = __bch2_page_state(bvec->bv_page);
+ s = __bch2_folio(fi.folio);
spin_lock(&s->lock);
- for (i = 0; i < PAGE_SECTORS; i++)
+ for (i = 0; i < folio_sectors(fi.folio); i++)
s->s[i].nr_replicas = 0;
spin_unlock(&s->lock);
}
*/
i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
- bio_for_each_segment_all(bvec, bio, iter) {
- struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
+ bio_for_each_folio_all(fi, bio) {
+ struct bch_folio *s = __bch2_folio(fi.folio);
if (atomic_dec_and_test(&s->write_count))
- end_page_writeback(bvec->bv_page);
+ folio_end_writeback(fi.folio);
}
bio_put(&io->op.wbio.bio);
w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
REQ_OP_WRITE,
- GFP_NOFS,
+ GFP_KERNEL,
&c->writepage_bioset),
struct bch_writepage_io, op.wbio.bio);
op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
}
-static int __bch2_writepage(struct page *page,
+static int __bch2_writepage(struct folio *folio,
struct writeback_control *wbc,
void *data)
{
- struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
+ struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_writepage_state *w = data;
- struct bch_page_state *s, orig;
- unsigned i, offset, nr_replicas_this_write = U32_MAX;
+ struct bch_folio *s;
+ unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
loff_t i_size = i_size_read(&inode->v);
- pgoff_t end_index = i_size >> PAGE_SHIFT;
int ret;
- EBUG_ON(!PageUptodate(page));
+ EBUG_ON(!folio_test_uptodate(folio));
- /* Is the page fully inside i_size? */
- if (page->index < end_index)
+ /* Is the folio fully inside i_size? */
+ if (folio_end_pos(folio) <= i_size)
goto do_io;
- /* Is the page fully outside i_size? (truncate in progress) */
- offset = i_size & (PAGE_SIZE - 1);
- if (page->index > end_index || !offset) {
- unlock_page(page);
+ /* Is the folio fully outside i_size? (truncate in progress) */
+ if (folio_pos(folio) >= i_size) {
+ folio_unlock(folio);
return 0;
}
/*
- * The page straddles i_size. It must be zeroed out on each and every
+ * The folio straddles i_size. It must be zeroed out on each and every
* writepage invocation because it may be mmapped. "A file is mapped
- * in multiples of the page size. For a file that is not a multiple of
- * the page size, the remaining memory is zeroed when mapped, and
+ * in multiples of the folio size. For a file that is not a multiple of
+ * the folio size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- zero_user_segment(page, offset, PAGE_SIZE);
+ folio_zero_segment(folio,
+ i_size - folio_pos(folio),
+ folio_size(folio));
do_io:
- s = bch2_page_state_create(page, __GFP_NOFAIL);
+ f_sectors = folio_sectors(folio);
+ s = bch2_folio(folio);
+
+ if (f_sectors > w->tmp_sectors) {
+ kfree(w->tmp);
+ w->tmp = kzalloc(sizeof(struct bch_folio_sector) *
+ f_sectors, __GFP_NOFAIL);
+ w->tmp_sectors = f_sectors;
+ }
/*
* Things get really hairy with errors during writeback:
*/
- ret = bch2_get_page_disk_reservation(c, inode, page, false);
+ ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
BUG_ON(ret);
/* Before unlocking the page, get copy of reservations: */
spin_lock(&s->lock);
- orig = *s;
- spin_unlock(&s->lock);
+ memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
- for (i = 0; i < PAGE_SECTORS; i++) {
- if (s->s[i].state < SECTOR_DIRTY)
+ for (i = 0; i < f_sectors; i++) {
+ if (s->s[i].state < SECTOR_dirty)
continue;
nr_replicas_this_write =
s->s[i].replicas_reserved);
}
- for (i = 0; i < PAGE_SECTORS; i++) {
- if (s->s[i].state < SECTOR_DIRTY)
+ for (i = 0; i < f_sectors; i++) {
+ if (s->s[i].state < SECTOR_dirty)
continue;
s->s[i].nr_replicas = w->opts.compression
? 0 : nr_replicas_this_write;
s->s[i].replicas_reserved = 0;
- s->s[i].state = SECTOR_ALLOCATED;
+ folio_sector_set(folio, s, i, SECTOR_allocated);
}
+ spin_unlock(&s->lock);
BUG_ON(atomic_read(&s->write_count));
atomic_set(&s->write_count, 1);
- BUG_ON(PageWriteback(page));
- set_page_writeback(page);
+ BUG_ON(folio_test_writeback(folio));
+ folio_start_writeback(folio);
- unlock_page(page);
+ folio_unlock(folio);
offset = 0;
while (1) {
unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
u64 sector;
- while (offset < PAGE_SECTORS &&
- orig.s[offset].state < SECTOR_DIRTY)
+ while (offset < f_sectors &&
+ w->tmp[offset].state < SECTOR_dirty)
offset++;
- if (offset == PAGE_SECTORS)
+ if (offset == f_sectors)
break;
- while (offset + sectors < PAGE_SECTORS &&
- orig.s[offset + sectors].state >= SECTOR_DIRTY) {
- reserved_sectors += orig.s[offset + sectors].replicas_reserved;
- dirty_sectors += orig.s[offset + sectors].state == SECTOR_DIRTY;
+ while (offset + sectors < f_sectors &&
+ w->tmp[offset + sectors].state >= SECTOR_dirty) {
+ reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
+ dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
sectors++;
}
BUG_ON(!sectors);
- sector = ((u64) page->index << PAGE_SECTORS_SHIFT) + offset;
+ sector = folio_sector(folio) + offset;
if (w->io &&
(w->io->op.res.nr_replicas != nr_replicas_this_write ||
- bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
+ bio_full(&w->io->op.wbio.bio, sectors << 9) ||
w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
(BIO_MAX_VECS * PAGE_SIZE) ||
bio_end_sector(&w->io->op.wbio.bio) != sector))
atomic_inc(&s->write_count);
BUG_ON(inode != w->io->inode);
- BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
+ BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
sectors << 9, offset << 9));
/* Check for writing past i_size: */
}
if (atomic_dec_and_test(&s->write_count))
- end_page_writeback(page);
+ folio_end_writeback(folio);
return 0;
}
if (w.io)
bch2_writepage_do_io(&w);
blk_finish_plug(&plug);
+ kfree(w.tmp);
return bch2_err_class(ret);
}
{
struct bch_inode_info *inode = to_bch_ei(mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch2_page_reservation *res;
- pgoff_t index = pos >> PAGE_SHIFT;
- unsigned offset = pos & (PAGE_SIZE - 1);
- struct page *page;
+ struct bch2_folio_reservation *res;
+ struct folio *folio;
+ unsigned offset;
int ret = -ENOMEM;
res = kmalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return -ENOMEM;
- bch2_page_reservation_init(c, inode, res);
+ bch2_folio_reservation_init(c, inode, res);
*fsdata = res;
bch2_pagecache_add_get(inode);
- page = grab_cache_page_write_begin(mapping, index);
- if (!page)
+ folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
+ FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR_OR_NULL(folio))
goto err_unlock;
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
goto out;
- /* If we're writing entire page, don't need to read it in first: */
- if (len == PAGE_SIZE)
+ offset = pos - folio_pos(folio);
+ len = min_t(size_t, len, folio_end_pos(folio) - pos);
+
+ /* If we're writing entire folio, don't need to read it in first: */
+ if (!offset && len == folio_size(folio))
goto out;
if (!offset && pos + len >= inode->v.i_size) {
- zero_user_segment(page, len, PAGE_SIZE);
- flush_dcache_page(page);
+ folio_zero_segment(folio, len, folio_size(folio));
+ flush_dcache_folio(folio);
goto out;
}
- if (index > inode->v.i_size >> PAGE_SHIFT) {
- zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
- flush_dcache_page(page);
+ if (folio_pos(folio) >= inode->v.i_size) {
+ folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
+ flush_dcache_folio(folio);
goto out;
}
readpage:
- ret = bch2_read_single_page(page, mapping);
+ ret = bch2_read_single_folio(folio, mapping);
if (ret)
goto err;
out:
- if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
- ret = bch2_page_state_set(c, inode_inum(inode), &page, 1);
- if (ret)
- goto err;
- }
+ ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
+ if (ret)
+ goto err;
- ret = bch2_page_reservation_get(c, inode, page, res, offset, len);
+ ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
if (ret) {
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
/*
- * If the page hasn't been read in, we won't know if we
+ * If the folio hasn't been read in, we won't know if we
* actually need a reservation - we don't actually need
- * to read here, we just need to check if the page is
+ * to read here, we just need to check if the folio is
* fully backed by uncompressed data:
*/
goto readpage;
goto err;
}
- *pagep = page;
+ *pagep = &folio->page;
return 0;
err:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
*pagep = NULL;
err_unlock:
bch2_pagecache_add_put(inode);
{
struct bch_inode_info *inode = to_bch_ei(mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch2_page_reservation *res = fsdata;
- unsigned offset = pos & (PAGE_SIZE - 1);
+ struct bch2_folio_reservation *res = fsdata;
+ struct folio *folio = page_folio(page);
+ unsigned offset = pos - folio_pos(folio);
lockdep_assert_held(&inode->v.i_rwsem);
+ BUG_ON(offset + copied > folio_size(folio));
- if (unlikely(copied < len && !PageUptodate(page))) {
+ if (unlikely(copied < len && !folio_test_uptodate(folio))) {
/*
- * The page needs to be read in, but that would destroy
+ * The folio needs to be read in, but that would destroy
* our partial write - simplest thing is to just force
* userspace to redo the write:
*/
- zero_user(page, 0, PAGE_SIZE);
- flush_dcache_page(page);
+ folio_zero_range(folio, 0, folio_size(folio));
+ flush_dcache_folio(folio);
copied = 0;
}
spin_unlock(&inode->v.i_lock);
if (copied) {
- if (!PageUptodate(page))
- SetPageUptodate(page);
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
- bch2_set_page_dirty(c, inode, page, res, offset, copied);
+ bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
inode->ei_last_dirtied = (unsigned long) current;
}
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
bch2_pagecache_add_put(inode);
- bch2_page_reservation_put(c, inode, res);
+ bch2_folio_reservation_put(c, inode, res);
kfree(res);
return copied;
}
-#define WRITE_BATCH_PAGES 32
+static noinline void folios_trunc(folios *folios, struct folio **fi)
+{
+ while (folios->data + folios->nr > fi) {
+ struct folio *f = darray_pop(folios);
+
+ folio_unlock(f);
+ folio_put(f);
+ }
+}
static int __bch2_buffered_write(struct bch_inode_info *inode,
struct address_space *mapping,
loff_t pos, unsigned len)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct page *pages[WRITE_BATCH_PAGES];
- struct bch2_page_reservation res;
- unsigned long index = pos >> PAGE_SHIFT;
- unsigned offset = pos & (PAGE_SIZE - 1);
- unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
- unsigned i, reserved = 0, set_dirty = 0;
- unsigned copied = 0, nr_pages_copied = 0;
+ struct bch2_folio_reservation res;
+ folios folios;
+ struct folio **fi, *f;
+ unsigned copied = 0, f_offset;
+ u64 end = pos + len, f_pos;
+ loff_t last_folio_pos = inode->v.i_size;
int ret = 0;
BUG_ON(!len);
- BUG_ON(nr_pages > ARRAY_SIZE(pages));
- bch2_page_reservation_init(c, inode, &res);
+ bch2_folio_reservation_init(c, inode, &res);
+ darray_init(&folios);
- for (i = 0; i < nr_pages; i++) {
- pages[i] = grab_cache_page_write_begin(mapping, index + i);
- if (!pages[i]) {
- nr_pages = i;
- if (!i) {
- ret = -ENOMEM;
- goto out;
- }
- len = min_t(unsigned, len,
- nr_pages * PAGE_SIZE - offset);
- break;
- }
- }
+ ret = filemap_get_contig_folios_d(mapping, pos, end,
+ FGP_LOCK|FGP_WRITE|FGP_STABLE|FGP_CREAT,
+ mapping_gfp_mask(mapping),
+ &folios);
+ if (ret)
+ goto out;
+
+ BUG_ON(!folios.nr);
- if (offset && !PageUptodate(pages[0])) {
- ret = bch2_read_single_page(pages[0], mapping);
+ f = darray_first(folios);
+ if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
+ ret = bch2_read_single_folio(f, mapping);
if (ret)
goto out;
}
- if ((pos + len) & (PAGE_SIZE - 1) &&
- !PageUptodate(pages[nr_pages - 1])) {
- if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
- zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
+ f = darray_last(folios);
+ end = min(end, folio_end_pos(f));
+ last_folio_pos = folio_pos(f);
+ if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
+ if (end >= inode->v.i_size) {
+ folio_zero_range(f, 0, folio_size(f));
} else {
- ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
+ ret = bch2_read_single_folio(f, mapping);
if (ret)
goto out;
}
}
- while (reserved < len) {
- unsigned i = (offset + reserved) >> PAGE_SHIFT;
- struct page *page = pages[i];
- unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
- unsigned pg_len = min_t(unsigned, len - reserved,
- PAGE_SIZE - pg_offset);
+ ret = bch2_folio_set(c, inode_inum(inode), folios.data, folios.nr);
+ if (ret)
+ goto out;
- if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
- ret = bch2_page_state_set(c, inode_inum(inode),
- pages + i, nr_pages - i);
- if (ret)
- goto out;
- }
+ f_pos = pos;
+ f_offset = pos - folio_pos(darray_first(folios));
+ darray_for_each(folios, fi) {
+ struct folio *f = *fi;
+ u64 f_len = min(end, folio_end_pos(f)) - f_pos;
/*
* XXX: per POSIX and fstests generic/275, on -ENOSPC we're
* we aren't completely out of disk space - we don't do that
* yet:
*/
- ret = bch2_page_reservation_get(c, inode, page, &res,
- pg_offset, pg_len);
+ ret = bch2_folio_reservation_get(c, inode, f, &res, f_offset, f_len);
if (unlikely(ret)) {
- if (!reserved)
+ folios_trunc(&folios, fi);
+ if (!folios.nr)
goto out;
+
+ end = min(end, folio_end_pos(darray_last(folios)));
break;
}
- reserved += pg_len;
+ f_pos = folio_end_pos(f);
+ f_offset = 0;
}
if (mapping_writably_mapped(mapping))
- for (i = 0; i < nr_pages; i++)
- flush_dcache_page(pages[i]);
-
- while (copied < reserved) {
- struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
- unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
- unsigned pg_len = min_t(unsigned, reserved - copied,
- PAGE_SIZE - pg_offset);
- unsigned pg_copied = copy_page_from_iter_atomic(page,
- pg_offset, pg_len, iter);
-
- if (!pg_copied)
+ darray_for_each(folios, fi)
+ flush_dcache_folio(*fi);
+
+ f_pos = pos;
+ f_offset = pos - folio_pos(darray_first(folios));
+ darray_for_each(folios, fi) {
+ struct folio *f = *fi;
+ u64 f_len = min(end, folio_end_pos(f)) - f_pos;
+ unsigned f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
+
+ if (!f_copied) {
+ folios_trunc(&folios, fi);
break;
+ }
- if (!PageUptodate(page) &&
- pg_copied != PAGE_SIZE &&
- pos + copied + pg_copied < inode->v.i_size) {
- zero_user(page, 0, PAGE_SIZE);
+ if (!folio_test_uptodate(f) &&
+ f_copied != folio_size(f) &&
+ pos + copied + f_copied < inode->v.i_size) {
+ folio_zero_range(f, 0, folio_size(f));
+ folios_trunc(&folios, fi);
break;
}
- flush_dcache_page(page);
- copied += pg_copied;
+ flush_dcache_folio(f);
+ copied += f_copied;
- if (pg_copied != pg_len)
+ if (f_copied != f_len) {
+ folios_trunc(&folios, fi + 1);
break;
+ }
+
+ f_pos = folio_end_pos(f);
+ f_offset = 0;
}
if (!copied)
goto out;
+ end = pos + copied;
+
spin_lock(&inode->v.i_lock);
- if (pos + copied > inode->v.i_size)
- i_size_write(&inode->v, pos + copied);
+ if (end > inode->v.i_size)
+ i_size_write(&inode->v, end);
spin_unlock(&inode->v.i_lock);
- while (set_dirty < copied) {
- struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
- unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
- unsigned pg_len = min_t(unsigned, copied - set_dirty,
- PAGE_SIZE - pg_offset);
+ f_pos = pos;
+ f_offset = pos - folio_pos(darray_first(folios));
+ darray_for_each(folios, fi) {
+ struct folio *f = *fi;
+ u64 f_len = min(end, folio_end_pos(f)) - f_pos;
- if (!PageUptodate(page))
- SetPageUptodate(page);
+ if (!folio_test_uptodate(f))
+ folio_mark_uptodate(f);
- bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len);
- unlock_page(page);
- put_page(page);
+ bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
- set_dirty += pg_len;
+ f_pos = folio_end_pos(f);
+ f_offset = 0;
}
- nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
inode->ei_last_dirtied = (unsigned long) current;
out:
- for (i = nr_pages_copied; i < nr_pages; i++) {
- unlock_page(pages[i]);
- put_page(pages[i]);
+ darray_for_each(folios, fi) {
+ folio_unlock(*fi);
+ folio_put(*fi);
}
- bch2_page_reservation_put(c, inode, &res);
+ /*
+ * If the last folio added to the mapping starts beyond current EOF, we
+ * performed a short write but left around at least one post-EOF folio.
+ * Clean up the mapping before we return.
+ */
+ if (last_folio_pos >= inode->v.i_size)
+ truncate_pagecache(&inode->v, inode->v.i_size);
+
+ darray_exit(&folios);
+ bch2_folio_reservation_put(c, inode, &res);
return copied ?: ret;
}
do {
unsigned offset = pos & (PAGE_SIZE - 1);
- unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
- PAGE_SIZE * WRITE_BATCH_PAGES - offset);
+ unsigned bytes = iov_iter_count(iter);
again:
/*
* Bring in the user page that we will copy from _first_.
&c->bio_read);
bio->bi_end_io = bch2_direct_IO_read_split_endio;
start:
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
+ bio->bi_opf = REQ_OP_READ|REQ_SYNC;
bio->bi_iter.bi_sector = offset >> 9;
bio->bi_private = dio;
static void bch2_dio_write_loop_async(struct bch_write_op *);
static __always_inline long bch2_dio_write_done(struct dio_write *dio);
+/*
+ * We're going to return -EIOCBQUEUED, but we haven't finished consuming the
+ * iov_iter yet, so we need to stash a copy of the iovec: it might be on the
+ * caller's stack, we're not guaranteed that it will live for the duration of
+ * the IO:
+ */
static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
{
struct iovec *iov = dio->inline_vecs;
+ /*
+ * iov_iter has a single embedded iovec - nothing to do:
+ */
+ if (iter_is_ubuf(&dio->iter))
+ return 0;
+
+ /*
+ * We don't currently handle non-iovec iov_iters here - return an error,
+ * and we'll fall back to doing the IO synchronously:
+ */
+ if (!iter_is_iovec(&dio->iter))
+ return -1;
+
if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
GFP_KERNEL);
dio->free_iov = true;
}
- memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
- dio->iter.iov = iov;
+ memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
+ dio->iter.__iov = iov;
return 0;
}
bch2_pagecache_block_put(inode);
if (dio->free_iov)
- kfree(dio->iter.iov);
+ kfree(dio->iter.__iov);
ret = dio->op.error ?: ((long) dio->written << 9);
bio_put(&dio->op.wbio.bio);
struct kiocb *req = dio->req;
struct bch_inode_info *inode = dio->inode;
struct bio *bio = &dio->op.wbio.bio;
- struct bvec_iter_all iter;
- struct bio_vec *bv;
req->ki_pos += (u64) dio->op.written << 9;
dio->written += dio->op.written;
mutex_unlock(&inode->ei_quota_lock);
}
- if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
- bio_for_each_segment_all(bv, bio, iter)
- put_page(bv->bv_page);
+ bio_release_pages(bio, false);
if (unlikely(dio->op.error))
set_bit(EI_INODE_ERROR, &inode->ei_flags);
err:
dio->op.error = ret;
- if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
- struct bvec_iter_all iter;
- struct bio_vec *bv;
-
- bio_for_each_segment_all(bv, bio, iter)
- put_page(bv->bv_page);
- }
+ bio_release_pages(bio, false);
bch2_quota_reservation_put(c, inode, &dio->quota_res);
goto out;
goto err;
for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
- if (bkey_extent_is_data(k.k)) {
+ if (bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k)) {
ret = 1;
break;
}
return ret;
}
-static int __bch2_truncate_page(struct bch_inode_info *inode,
- pgoff_t index, loff_t start, loff_t end)
+static int __bch2_truncate_folio(struct bch_inode_info *inode,
+ pgoff_t index, loff_t start, loff_t end)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct address_space *mapping = inode->v.i_mapping;
- struct bch_page_state *s;
+ struct bch_folio *s;
unsigned start_offset = start & (PAGE_SIZE - 1);
unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
unsigned i;
- struct page *page;
+ struct folio *folio;
s64 i_sectors_delta = 0;
int ret = 0;
+ u64 end_pos;
- /* Page boundary? Nothing to do */
- if (!((index == start >> PAGE_SHIFT && start_offset) ||
- (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
- return 0;
-
- /* Above i_size? */
- if (index << PAGE_SHIFT >= inode->v.i_size)
- return 0;
-
- page = find_lock_page(mapping, index);
- if (!page) {
+ folio = filemap_lock_folio(mapping, index);
+ if (IS_ERR_OR_NULL(folio)) {
/*
* XXX: we're doing two index lookups when we end up reading the
- * page
+ * folio
*/
ret = range_has_data(c, inode->ei_subvol,
POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
if (ret <= 0)
return ret;
- page = find_or_create_page(mapping, index, GFP_KERNEL);
- if (unlikely(!page)) {
+ folio = __filemap_get_folio(mapping, index,
+ FGP_LOCK|FGP_CREAT, GFP_KERNEL);
+ if (unlikely(IS_ERR_OR_NULL(folio))) {
ret = -ENOMEM;
goto out;
}
}
- s = bch2_page_state_create(page, 0);
+ BUG_ON(start >= folio_end_pos(folio));
+ BUG_ON(end <= folio_pos(folio));
+
+ start_offset = max(start, folio_pos(folio)) - folio_pos(folio);
+ end_offset = min_t(u64, end, folio_end_pos(folio)) - folio_pos(folio);
+
+ /* Folio boundary? Nothing to do */
+ if (start_offset == 0 &&
+ end_offset == folio_size(folio)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ s = bch2_folio_create(folio, 0);
if (!s) {
ret = -ENOMEM;
goto unlock;
}
- if (!PageUptodate(page)) {
- ret = bch2_read_single_page(page, mapping);
+ if (!folio_test_uptodate(folio)) {
+ ret = bch2_read_single_folio(folio, mapping);
if (ret)
goto unlock;
}
- if (index != start >> PAGE_SHIFT)
- start_offset = 0;
- if (index != end >> PAGE_SHIFT)
- end_offset = PAGE_SIZE;
+ ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
+ if (ret)
+ goto unlock;
for (i = round_up(start_offset, block_bytes(c)) >> 9;
i < round_down(end_offset, block_bytes(c)) >> 9;
i++) {
s->s[i].nr_replicas = 0;
- if (s->s[i].state == SECTOR_DIRTY)
- i_sectors_delta--;
- s->s[i].state = SECTOR_UNALLOCATED;
+
+ i_sectors_delta -= s->s[i].state == SECTOR_dirty;
+ folio_sector_set(folio, s, i, SECTOR_unallocated);
}
i_sectors_acct(c, inode, NULL, i_sectors_delta);
/*
- * Caller needs to know whether this page will be written out by
+ * Caller needs to know whether this folio will be written out by
* writeback - doing an i_size update if necessary - or whether it will
- * be responsible for the i_size update:
+ * be responsible for the i_size update.
+ *
+ * Note that we shouldn't ever see a folio beyond EOF, but check and
+ * warn if so. This has been observed by failure to clean up folios
+ * after a short write and there's still a chance reclaim will fix
+ * things up.
*/
- ret = s->s[(min_t(u64, inode->v.i_size - (index << PAGE_SHIFT),
- PAGE_SIZE) - 1) >> 9].state >= SECTOR_DIRTY;
+ WARN_ON_ONCE(folio_pos(folio) >= inode->v.i_size);
+ end_pos = folio_end_pos(folio);
+ if (inode->v.i_size > folio_pos(folio))
+ end_pos = min_t(u64, inode->v.i_size, end_pos);
+ ret = s->s[folio_pos_to_s(folio, end_pos - 1)].state >= SECTOR_dirty;
- zero_user_segment(page, start_offset, end_offset);
+ folio_zero_segment(folio, start_offset, end_offset);
/*
* Bit of a hack - we don't want truncate to fail due to -ENOSPC.
*
- * XXX: because we aren't currently tracking whether the page has actual
+ * XXX: because we aren't currently tracking whether the folio has actual
* data in it (vs. just 0s, or only partially written) this wrong. ick.
*/
- BUG_ON(bch2_get_page_disk_reservation(c, inode, page, false));
+ BUG_ON(bch2_get_folio_disk_reservation(c, inode, folio, false));
/*
* This removes any writeable userspace mappings; we need to force
* .page_mkwrite to be called again before any mmapped writes, to
* redirty the full page:
*/
- page_mkclean(page);
- __set_page_dirty_nobuffers(page);
+ folio_mkclean(folio);
+ filemap_dirty_folio(mapping, folio);
unlock:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
out:
return ret;
}
-static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
+static int bch2_truncate_folio(struct bch_inode_info *inode, loff_t from)
{
- return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
- from, round_up(from, PAGE_SIZE));
+ return __bch2_truncate_folio(inode, from >> PAGE_SHIFT,
+ from, ANYSINT_MAX(loff_t));
}
-static int bch2_truncate_pages(struct bch_inode_info *inode,
- loff_t start, loff_t end)
+static int bch2_truncate_folios(struct bch_inode_info *inode,
+ loff_t start, loff_t end)
{
- int ret = __bch2_truncate_page(inode, start >> PAGE_SHIFT,
- start, end);
+ int ret = __bch2_truncate_folio(inode, start >> PAGE_SHIFT,
+ start, end);
if (ret >= 0 &&
start >> PAGE_SHIFT != end >> PAGE_SHIFT)
- ret = __bch2_truncate_page(inode,
- end >> PAGE_SHIFT,
- start, end);
+ ret = __bch2_truncate_folio(inode,
+ (end - 1) >> PAGE_SHIFT,
+ start, end);
return ret;
}
-static int bch2_extend(struct user_namespace *mnt_userns,
+static int bch2_extend(struct mnt_idmap *idmap,
struct bch_inode_info *inode,
struct bch_inode_unpacked *inode_u,
struct iattr *iattr)
truncate_setsize(&inode->v, iattr->ia_size);
- return bch2_setattr_nonsize(mnt_userns, inode, iattr);
+ return bch2_setattr_nonsize(idmap, inode, iattr);
}
static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
return 0;
}
-int bch2_truncate(struct user_namespace *mnt_userns,
+int bch2_truncate(struct mnt_idmap *idmap,
struct bch_inode_info *inode, struct iattr *iattr)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
(u64) inode->v.i_size, inode_u.bi_size);
if (iattr->ia_size > inode->v.i_size) {
- ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
+ ret = bch2_extend(idmap, inode, &inode_u, iattr);
goto err;
}
iattr->ia_valid &= ~ATTR_SIZE;
- ret = bch2_truncate_page(inode, iattr->ia_size);
+ ret = bch2_truncate_folio(inode, iattr->ia_size);
if (unlikely(ret < 0))
goto err;
ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
mutex_unlock(&inode->ei_update_lock);
- ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
+ ret = bch2_setattr_nonsize(idmap, inode, iattr);
err:
bch2_pagecache_block_put(inode);
return bch2_err_class(ret);
bool truncated_last_page;
int ret = 0;
- ret = bch2_truncate_pages(inode, offset, end);
+ ret = bch2_truncate_folios(inode, offset, end);
if (unlikely(ret < 0))
goto err;
}
if (mode & FALLOC_FL_ZERO_RANGE) {
- ret = bch2_truncate_pages(inode, offset, end);
+ ret = bch2_truncate_folios(inode, offset, end);
if (unlikely(ret < 0))
return ret;
struct bch_fs *c = inode->v.i_sb->s_fs_info;
long ret;
- if (!percpu_ref_tryget_live(&c->writes))
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fallocate))
return -EROFS;
inode_lock(&inode->v);
err:
bch2_pagecache_block_put(inode);
inode_unlock(&inode->v);
- percpu_ref_put(&c->writes);
+ bch2_write_ref_put(c, BCH_WRITE_REF_fallocate);
return bch2_err_class(ret);
}
/* fseek: */
-static int page_data_offset(struct page *page, unsigned offset)
+static int folio_data_offset(struct folio *folio, loff_t pos)
{
- struct bch_page_state *s = bch2_page_state(page);
- unsigned i;
+ struct bch_folio *s = bch2_folio(folio);
+ unsigned i, sectors = folio_sectors(folio);
if (s)
- for (i = offset >> 9; i < PAGE_SECTORS; i++)
- if (s->s[i].state >= SECTOR_DIRTY)
- return i << 9;
+ for (i = folio_pos_to_s(folio, pos); i < sectors; i++)
+ if (s->s[i].state >= SECTOR_dirty)
+ return i << SECTOR_SHIFT;
return -1;
}
struct folio *folio = fbatch.folios[i];
folio_lock(folio);
-
- offset = page_data_offset(&folio->page,
- folio->index == start_index
- ? start_offset & (PAGE_SIZE - 1)
- : 0);
+ offset = folio_data_offset(folio,
+ max(folio_pos(folio), start_offset));
if (offset >= 0) {
- ret = clamp(((loff_t) folio->index << PAGE_SHIFT) +
- offset,
+ ret = clamp(folio_pos(folio) + offset,
start_offset, end_offset);
folio_unlock(folio);
folio_batch_release(&fbatch);
return ret;
}
-
folio_unlock(folio);
}
folio_batch_release(&fbatch);
return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
}
-static int __page_hole_offset(struct page *page, unsigned offset)
+static bool folio_hole_offset(struct address_space *mapping, loff_t *offset)
{
- struct bch_page_state *s = bch2_page_state(page);
- unsigned i;
-
- if (!s)
- return 0;
-
- for (i = offset >> 9; i < PAGE_SECTORS; i++)
- if (s->s[i].state < SECTOR_DIRTY)
- return i << 9;
-
- return -1;
-}
-
-static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
-{
- pgoff_t index = offset >> PAGE_SHIFT;
- struct page *page;
- int pg_offset;
- loff_t ret = -1;
+ struct folio *folio;
+ struct bch_folio *s;
+ unsigned i, sectors;
+ bool ret = true;
- page = find_lock_page(mapping, index);
- if (!page)
- return offset;
+ folio = filemap_lock_folio(mapping, *offset >> PAGE_SHIFT);
+ if (IS_ERR_OR_NULL(folio))
+ return true;
- pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
- if (pg_offset >= 0)
- ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
+ s = bch2_folio(folio);
+ if (!s)
+ goto unlock;
- unlock_page(page);
+ sectors = folio_sectors(folio);
+ for (i = folio_pos_to_s(folio, *offset); i < sectors; i++)
+ if (s->s[i].state < SECTOR_dirty) {
+ *offset = max(*offset,
+ folio_pos(folio) + (i << SECTOR_SHIFT));
+ goto unlock;
+ }
+ *offset = folio_end_pos(folio);
+ ret = false;
+unlock:
+ folio_unlock(folio);
return ret;
}
loff_t end_offset)
{
struct address_space *mapping = vinode->i_mapping;
- loff_t offset = start_offset, hole;
-
- while (offset < end_offset) {
- hole = page_hole_offset(mapping, offset);
- if (hole >= 0 && hole <= end_offset)
- return max(start_offset, hole);
+ loff_t offset = start_offset;
- offset += PAGE_SIZE;
- offset &= PAGE_MASK;
- }
+ while (offset < end_offset &&
+ !folio_hole_offset(mapping, &offset))
+ ;
- return end_offset;
+ return min(offset, end_offset);
}
static loff_t bch2_seek_hole(struct file *file, u64 offset)
int bch2_fs_fsio_init(struct bch_fs *c)
{
- int ret = 0;
-
- pr_verbose_init(c->opts, "");
-
if (bioset_init(&c->writepage_bioset,
4, offsetof(struct bch_writepage_io, op.wbio.bio),
- BIOSET_NEED_BVECS) ||
- bioset_init(&c->dio_read_bioset,
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_writepage_bioset_init;
+
+ if (bioset_init(&c->dio_read_bioset,
4, offsetof(struct dio_read, rbio.bio),
- BIOSET_NEED_BVECS) ||
- bioset_init(&c->dio_write_bioset,
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_dio_read_bioset_init;
+
+ if (bioset_init(&c->dio_write_bioset,
4, offsetof(struct dio_write, op.wbio.bio),
- BIOSET_NEED_BVECS) ||
- bioset_init(&c->nocow_flush_bioset,
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_dio_write_bioset_init;
+
+ if (bioset_init(&c->nocow_flush_bioset,
1, offsetof(struct nocow_flush, bio), 0))
- ret = -ENOMEM;
+ return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
- pr_verbose_init(c->opts, "ret %i", ret);
- return ret;
+ return 0;
}
#endif /* NO_BCACHEFS_FS */