#include "bcachefs.h"
#include "alloc_foreground.h"
-#include "bkey_on_stack.h"
+#include "bkey_buf.h"
#include "btree_update.h"
#include "buckets.h"
#include "clock.h"
#include "keylist.h"
#include "quota.h"
#include "reflink.h"
+#include "trace.h"
#include <linux/aio.h>
#include <linux/backing-dev.h>
#include <linux/migrate.h>
#include <linux/mmu_context.h>
#include <linux/pagevec.h>
+#include <linux/rmap.h>
#include <linux/sched/signal.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/uio.h>
#include <linux/writeback.h>
-#include <trace/events/bcachefs.h>
#include <trace/events/writeback.h>
+static void bch2_clamp_data_hole(struct inode *, u64 *, u64 *, unsigned);
+
+struct folio_vec {
+ struct folio *fv_folio;
+ size_t fv_offset;
+ size_t fv_len;
+};
+
+static inline struct folio_vec biovec_to_foliovec(struct bio_vec bv)
+{
+
+ struct folio *folio = page_folio(bv.bv_page);
+ size_t offset = (folio_page_idx(folio, bv.bv_page) << PAGE_SHIFT) +
+ bv.bv_offset;
+ size_t len = min_t(size_t, folio_size(folio) - offset, bv.bv_len);
+
+ return (struct folio_vec) {
+ .fv_folio = folio,
+ .fv_offset = offset,
+ .fv_len = len,
+ };
+}
+
+static inline struct folio_vec bio_iter_iovec_folio(struct bio *bio,
+ struct bvec_iter iter)
+{
+ return biovec_to_foliovec(bio_iter_iovec(bio, iter));
+}
+
+#define __bio_for_each_folio(bvl, bio, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = bio_iter_iovec_folio((bio), (iter))), 1); \
+ bio_advance_iter_single((bio), &(iter), (bvl).fv_len))
+
+/**
+ * bio_for_each_folio - iterate over folios within a bio
+ *
+ * Like other non-_all versions, this iterates over what bio->bi_iter currently
+ * points to. This version is for drivers, where the bio may have previously
+ * been split or cloned.
+ */
+#define bio_for_each_folio(bvl, bio, iter) \
+ __bio_for_each_folio(bvl, bio, iter, (bio)->bi_iter)
+
+/*
+ * Use u64 for the end pos and sector helpers because if the folio covers the
+ * max supported range of the mapping, the start offset of the next folio
+ * overflows loff_t. This breaks much of the range based processing in the
+ * buffered write path.
+ */
+static inline u64 folio_end_pos(struct folio *folio)
+{
+ return folio_pos(folio) + folio_size(folio);
+}
+
+static inline size_t folio_sectors(struct folio *folio)
+{
+ return PAGE_SECTORS << folio_order(folio);
+}
+
+static inline loff_t folio_sector(struct folio *folio)
+{
+ return folio_pos(folio) >> 9;
+}
+
+static inline u64 folio_end_sector(struct folio *folio)
+{
+ return folio_end_pos(folio) >> 9;
+}
+
+typedef DARRAY(struct folio *) folios;
+
+static int filemap_get_contig_folios_d(struct address_space *mapping,
+ loff_t start, u64 end,
+ int fgp_flags, gfp_t gfp,
+ folios *folios)
+{
+ struct folio *f;
+ u64 pos = start;
+ int ret = 0;
+
+ while (pos < end) {
+ if ((u64) pos >= (u64) start + (1ULL << 20))
+ fgp_flags &= ~FGP_CREAT;
+
+ ret = darray_make_room_gfp(folios, 1, gfp & GFP_KERNEL);
+ if (ret)
+ break;
+
+ f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
+ if (IS_ERR_OR_NULL(f))
+ break;
+
+ BUG_ON(folios->nr && folio_pos(f) != pos);
+
+ pos = folio_end_pos(f);
+ darray_push(folios, f);
+ }
+
+ if (!folios->nr && !ret && (fgp_flags & FGP_CREAT))
+ ret = -ENOMEM;
+
+ return folios->nr ? 0 : ret;
+}
+
+struct nocow_flush {
+ struct closure *cl;
+ struct bch_dev *ca;
+ struct bio bio;
+};
+
+static void nocow_flush_endio(struct bio *_bio)
+{
+
+ struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
+
+ closure_put(bio->cl);
+ percpu_ref_put(&bio->ca->io_ref);
+ bio_put(&bio->bio);
+}
+
+static void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct closure *cl)
+{
+ struct nocow_flush *bio;
+ struct bch_dev *ca;
+ struct bch_devs_mask devs;
+ unsigned dev;
+
+ dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
+ if (dev == BCH_SB_MEMBERS_MAX)
+ return;
+
+ devs = inode->ei_devs_need_flush;
+ memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
+
+ for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
+ rcu_read_lock();
+ ca = rcu_dereference(c->devs[dev]);
+ if (ca && !percpu_ref_tryget(&ca->io_ref))
+ ca = NULL;
+ rcu_read_unlock();
+
+ if (!ca)
+ continue;
+
+ bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
+ REQ_OP_FLUSH,
+ GFP_KERNEL,
+ &c->nocow_flush_bioset),
+ struct nocow_flush, bio);
+ bio->cl = cl;
+ bio->ca = ca;
+ bio->bio.bi_end_io = nocow_flush_endio;
+ closure_bio_submit(&bio->bio, cl);
+ }
+}
+
+static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
+ struct bch_inode_info *inode)
+{
+ struct closure cl;
+
+ closure_init_stack(&cl);
+ bch2_inode_flush_nocow_writes_async(c, inode, &cl);
+ closure_sync(&cl);
+
+ return 0;
+}
+
+static inline bool bio_full(struct bio *bio, unsigned len)
+{
+ if (bio->bi_vcnt >= bio->bi_max_vecs)
+ return true;
+ if (bio->bi_iter.bi_size > UINT_MAX - len)
+ return true;
+ return false;
+}
+
+static inline struct address_space *faults_disabled_mapping(void)
+{
+ return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
+}
+
+static inline void set_fdm_dropped_locks(void)
+{
+ current->faults_disabled_mapping =
+ (void *) (((unsigned long) current->faults_disabled_mapping)|1);
+}
+
+static inline bool fdm_dropped_locks(void)
+{
+ return ((unsigned long) current->faults_disabled_mapping) & 1;
+}
+
struct quota_res {
u64 sectors;
};
struct bch_writepage_io {
- struct closure cl;
struct bch_inode_info *inode;
/* must be last: */
};
struct dio_write {
- struct completion done;
struct kiocb *req;
+ struct address_space *mapping;
+ struct bch_inode_info *inode;
struct mm_struct *mm;
unsigned loop:1,
+ extending:1,
sync:1,
+ flush:1,
free_iov:1;
struct quota_res quota_res;
+ u64 written;
struct iov_iter iter;
struct iovec inline_vecs[2];
struct closure cl;
struct kiocb *req;
long ret;
+ bool should_dirty;
struct bch_read_bio rbio;
};
/* pagecache_block must be held */
-static int write_invalidate_inode_pages_range(struct address_space *mapping,
+static noinline int write_invalidate_inode_pages_range(struct address_space *mapping,
loff_t start, loff_t end)
{
int ret;
* is continually redirtying a specific page
*/
do {
- if (!mapping->nrpages &&
- !mapping->nrexceptional)
+ if (!mapping->nrpages)
return 0;
ret = filemap_write_and_wait_range(mapping, start, end);
#ifdef CONFIG_BCACHEFS_QUOTA
-static void bch2_quota_reservation_put(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct quota_res *res)
+static void __bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res)
{
- if (!res->sectors)
- return;
-
- mutex_lock(&inode->ei_quota_lock);
BUG_ON(res->sectors > inode->ei_quota_reserved);
bch2_quota_acct(c, inode->ei_qid, Q_SPC,
-((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
inode->ei_quota_reserved -= res->sectors;
- mutex_unlock(&inode->ei_quota_lock);
-
res->sectors = 0;
}
+static void bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res)
+{
+ if (res->sectors) {
+ mutex_lock(&inode->ei_quota_lock);
+ __bch2_quota_reservation_put(c, inode, res);
+ mutex_unlock(&inode->ei_quota_lock);
+ }
+}
+
static int bch2_quota_reservation_add(struct bch_fs *c,
struct bch_inode_info *inode,
struct quota_res *res,
- unsigned sectors,
+ u64 sectors,
bool check_enospc)
{
int ret;
+ if (test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags))
+ return 0;
+
mutex_lock(&inode->ei_quota_lock);
ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
#else
+static void __bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res) {}
+
static void bch2_quota_reservation_put(struct bch_fs *c,
struct bch_inode_info *inode,
- struct quota_res *res)
-{
-}
+ struct quota_res *res) {}
static int bch2_quota_reservation_add(struct bch_fs *c,
struct bch_inode_info *inode,
return bch2_write_inode(c, inode, inode_set_size, &s, fields);
}
-static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
+static void __i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
struct quota_res *quota_res, s64 sectors)
{
- if (!sectors)
- return;
+ bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
+ "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
+ inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
+ inode->ei_inode.bi_sectors);
+ inode->v.i_blocks += sectors;
- mutex_lock(&inode->ei_quota_lock);
#ifdef CONFIG_BCACHEFS_QUOTA
- if (quota_res && sectors > 0) {
+ if (quota_res &&
+ !test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags) &&
+ sectors > 0) {
BUG_ON(sectors > quota_res->sectors);
BUG_ON(sectors > inode->ei_quota_reserved);
bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
}
#endif
- inode->v.i_blocks += sectors;
- mutex_unlock(&inode->ei_quota_lock);
+}
+
+static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
+ struct quota_res *quota_res, s64 sectors)
+{
+ if (sectors) {
+ mutex_lock(&inode->ei_quota_lock);
+ __i_sectors_acct(c, inode, quota_res, sectors);
+ mutex_unlock(&inode->ei_quota_lock);
+ }
}
/* page state: */
/* stored in page->private: */
-struct bch_page_sector {
- /* Uncompressed, fully allocated replicas: */
- unsigned nr_replicas:3;
+#define BCH_FOLIO_SECTOR_STATE() \
+ x(unallocated) \
+ x(reserved) \
+ x(dirty) \
+ x(dirty_reserved) \
+ x(allocated)
+
+enum bch_folio_sector_state {
+#define x(n) SECTOR_##n,
+ BCH_FOLIO_SECTOR_STATE()
+#undef x
+};
+
+static const char * const bch2_folio_sector_states[] = {
+#define x(n) #n,
+ BCH_FOLIO_SECTOR_STATE()
+#undef x
+ NULL
+};
+
+static inline enum bch_folio_sector_state
+folio_sector_dirty(enum bch_folio_sector_state state)
+{
+ switch (state) {
+ case SECTOR_unallocated:
+ return SECTOR_dirty;
+ case SECTOR_reserved:
+ return SECTOR_dirty_reserved;
+ default:
+ return state;
+ }
+}
+
+static inline enum bch_folio_sector_state
+folio_sector_undirty(enum bch_folio_sector_state state)
+{
+ switch (state) {
+ case SECTOR_dirty:
+ return SECTOR_unallocated;
+ case SECTOR_dirty_reserved:
+ return SECTOR_reserved;
+ default:
+ return state;
+ }
+}
+
+static inline enum bch_folio_sector_state
+folio_sector_reserve(enum bch_folio_sector_state state)
+{
+ switch (state) {
+ case SECTOR_unallocated:
+ return SECTOR_reserved;
+ case SECTOR_dirty:
+ return SECTOR_dirty_reserved;
+ default:
+ return state;
+ }
+}
+
+struct bch_folio_sector {
+ /* Uncompressed, fully allocated replicas (or on disk reservation): */
+ unsigned nr_replicas:4;
- /* Owns PAGE_SECTORS * replicas_reserved sized reservation: */
- unsigned replicas_reserved:3;
+ /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
+ unsigned replicas_reserved:4;
/* i_sectors: */
- enum {
- SECTOR_UNALLOCATED,
- SECTOR_RESERVED,
- SECTOR_DIRTY,
- SECTOR_ALLOCATED,
- } state:2;
+ enum bch_folio_sector_state state:8;
};
-struct bch_page_state {
+struct bch_folio {
spinlock_t lock;
atomic_t write_count;
- struct bch_page_sector s[PAGE_SECTORS];
+ /*
+ * Is the sector state up to date with the btree?
+ * (Not the data itself)
+ */
+ bool uptodate;
+ struct bch_folio_sector s[];
};
-static inline struct bch_page_state *__bch2_page_state(struct page *page)
+static inline void folio_sector_set(struct folio *folio,
+ struct bch_folio *s,
+ unsigned i, unsigned n)
{
- return page_has_private(page)
- ? (struct bch_page_state *) page_private(page)
- : NULL;
+ s->s[i].state = n;
}
-static inline struct bch_page_state *bch2_page_state(struct page *page)
+/* file offset (to folio offset) to bch_folio_sector index */
+static inline int folio_pos_to_s(struct folio *folio, loff_t pos)
{
- EBUG_ON(!PageLocked(page));
-
- return __bch2_page_state(page);
+ u64 f_offset = pos - folio_pos(folio);
+ BUG_ON(pos < folio_pos(folio) || pos >= folio_end_pos(folio));
+ return f_offset >> SECTOR_SHIFT;
}
-/* for newly allocated pages: */
-static void __bch2_page_state_release(struct page *page)
+static inline struct bch_folio *__bch2_folio(struct folio *folio)
{
- struct bch_page_state *s = __bch2_page_state(page);
+ return folio_has_private(folio)
+ ? (struct bch_folio *) folio_get_private(folio)
+ : NULL;
+}
- if (!s)
- return;
+static inline struct bch_folio *bch2_folio(struct folio *folio)
+{
+ EBUG_ON(!folio_test_locked(folio));
- ClearPagePrivate(page);
- set_page_private(page, 0);
- put_page(page);
- kfree(s);
+ return __bch2_folio(folio);
}
-static void bch2_page_state_release(struct page *page)
+/* for newly allocated folios: */
+static void __bch2_folio_release(struct folio *folio)
{
- struct bch_page_state *s = bch2_page_state(page);
-
- if (!s)
- return;
+ kfree(folio_detach_private(folio));
+}
- ClearPagePrivate(page);
- set_page_private(page, 0);
- put_page(page);
- kfree(s);
+static void bch2_folio_release(struct folio *folio)
+{
+ EBUG_ON(!folio_test_locked(folio));
+ __bch2_folio_release(folio);
}
-/* for newly allocated pages: */
-static struct bch_page_state *__bch2_page_state_create(struct page *page,
- gfp_t gfp)
+/* for newly allocated folios: */
+static struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp)
{
- struct bch_page_state *s;
+ struct bch_folio *s;
- s = kzalloc(sizeof(*s), GFP_NOFS|gfp);
+ s = kzalloc(sizeof(*s) +
+ sizeof(struct bch_folio_sector) *
+ folio_sectors(folio), gfp);
if (!s)
return NULL;
spin_lock_init(&s->lock);
- /*
- * migrate_page_move_mapping() assumes that pages with private data
- * have their count elevated by 1.
- */
- get_page(page);
- set_page_private(page, (unsigned long) s);
- SetPagePrivate(page);
+ folio_attach_private(folio, s);
return s;
}
-static struct bch_page_state *bch2_page_state_create(struct page *page,
- gfp_t gfp)
+static struct bch_folio *bch2_folio_create(struct folio *folio, gfp_t gfp)
+{
+ return bch2_folio(folio) ?: __bch2_folio_create(folio, gfp);
+}
+
+static unsigned bkey_to_sector_state(struct bkey_s_c k)
+{
+ if (bkey_extent_is_reservation(k))
+ return SECTOR_reserved;
+ if (bkey_extent_is_allocation(k.k))
+ return SECTOR_allocated;
+ return SECTOR_unallocated;
+}
+
+static void __bch2_folio_set(struct folio *folio,
+ unsigned pg_offset, unsigned pg_len,
+ unsigned nr_ptrs, unsigned state)
{
- return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
+ struct bch_folio *s = bch2_folio(folio);
+ unsigned i, sectors = folio_sectors(folio);
+
+ BUG_ON(pg_offset >= sectors);
+ BUG_ON(pg_offset + pg_len > sectors);
+
+ spin_lock(&s->lock);
+
+ for (i = pg_offset; i < pg_offset + pg_len; i++) {
+ s->s[i].nr_replicas = nr_ptrs;
+ folio_sector_set(folio, s, i, state);
+ }
+
+ if (i == sectors)
+ s->uptodate = true;
+
+ spin_unlock(&s->lock);
+}
+
+/*
+ * Initialize bch_folio state (allocated/unallocated, nr_replicas) from the
+ * extents btree:
+ */
+static int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
+ struct folio **folios, unsigned nr_folios)
+{
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_folio *s;
+ u64 offset = folio_sector(folios[0]);
+ unsigned folio_idx;
+ u32 snapshot;
+ bool need_set = false;
+ int ret;
+
+ for (folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
+ s = bch2_folio_create(folios[folio_idx], GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ need_set |= !s->uptodate;
+ }
+
+ if (!need_set)
+ return 0;
+
+ folio_idx = 0;
+ bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
+
+ ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
+ SPOS(inum.inum, offset, snapshot),
+ BTREE_ITER_SLOTS, k, ret) {
+ unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
+ unsigned state = bkey_to_sector_state(k);
+
+ while (folio_idx < nr_folios) {
+ struct folio *folio = folios[folio_idx];
+ u64 folio_start = folio_sector(folio);
+ u64 folio_end = folio_end_sector(folio);
+ unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) - folio_start;
+ unsigned folio_len = min(k.k->p.offset, folio_end) - folio_offset - folio_start;
+
+ BUG_ON(k.k->p.offset < folio_start);
+ BUG_ON(bkey_start_offset(k.k) > folio_end);
+
+ if (!bch2_folio(folio)->uptodate)
+ __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
+
+ if (k.k->p.offset < folio_end)
+ break;
+ folio_idx++;
+ }
+
+ if (folio_idx == nr_folios)
+ break;
+ }
+
+ offset = iter.pos.offset;
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+ bch2_trans_exit(&trans);
+
+ return ret;
+}
+
+static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
+{
+ struct bvec_iter iter;
+ struct folio_vec fv;
+ unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
+ ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
+ unsigned state = bkey_to_sector_state(k);
+
+ bio_for_each_folio(fv, bio, iter)
+ __bch2_folio_set(fv.fv_folio,
+ fv.fv_offset >> 9,
+ fv.fv_len >> 9,
+ nr_ptrs, state);
+}
+
+static void mark_pagecache_unallocated(struct bch_inode_info *inode,
+ u64 start, u64 end)
+{
+ pgoff_t index = start >> PAGE_SECTORS_SHIFT;
+ pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
+ struct folio_batch fbatch;
+ unsigned i, j;
+
+ if (end <= start)
+ return;
+
+ folio_batch_init(&fbatch);
+
+ while (filemap_get_folios(inode->v.i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+ u64 folio_start = folio_sector(folio);
+ u64 folio_end = folio_end_sector(folio);
+ unsigned folio_offset = max(start, folio_start) - folio_start;
+ unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
+ struct bch_folio *s;
+
+ BUG_ON(end <= folio_start);
+
+ folio_lock(folio);
+ s = bch2_folio(folio);
+
+ if (s) {
+ spin_lock(&s->lock);
+ for (j = folio_offset; j < folio_offset + folio_len; j++)
+ s->s[j].nr_replicas = 0;
+ spin_unlock(&s->lock);
+ }
+
+ folio_unlock(folio);
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
+}
+
+static void mark_pagecache_reserved(struct bch_inode_info *inode,
+ u64 start, u64 end)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ pgoff_t index = start >> PAGE_SECTORS_SHIFT;
+ pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
+ struct folio_batch fbatch;
+ s64 i_sectors_delta = 0;
+ unsigned i, j;
+
+ if (end <= start)
+ return;
+
+ folio_batch_init(&fbatch);
+
+ while (filemap_get_folios(inode->v.i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+ u64 folio_start = folio_sector(folio);
+ u64 folio_end = folio_end_sector(folio);
+ unsigned folio_offset = max(start, folio_start) - folio_start;
+ unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
+ struct bch_folio *s;
+
+ BUG_ON(end <= folio_start);
+
+ folio_lock(folio);
+ s = bch2_folio(folio);
+
+ if (s) {
+ spin_lock(&s->lock);
+ for (j = folio_offset; j < folio_offset + folio_len; j++) {
+ i_sectors_delta -= s->s[j].state == SECTOR_dirty;
+ folio_sector_set(folio, s, j, folio_sector_reserve(s->s[j].state));
+ }
+ spin_unlock(&s->lock);
+ }
+
+ folio_unlock(folio);
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
+
+ i_sectors_acct(c, inode, NULL, i_sectors_delta);
}
static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
: c->opts.data_replicas;
}
-static inline unsigned sectors_to_reserve(struct bch_page_sector *s,
- unsigned nr_replicas)
+static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
+ unsigned nr_replicas)
{
return max(0, (int) nr_replicas -
s->nr_replicas -
s->replicas_reserved);
}
-static int bch2_get_page_disk_reservation(struct bch_fs *c,
+static int bch2_get_folio_disk_reservation(struct bch_fs *c,
struct bch_inode_info *inode,
- struct page *page, bool check_enospc)
+ struct folio *folio, bool check_enospc)
{
- struct bch_page_state *s = bch2_page_state_create(page, 0);
+ struct bch_folio *s = bch2_folio_create(folio, 0);
unsigned nr_replicas = inode_nr_replicas(c, inode);
struct disk_reservation disk_res = { 0 };
- unsigned i, disk_res_sectors = 0;
+ unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0;
int ret;
if (!s)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(s->s); i++)
+ for (i = 0; i < sectors; i++)
disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
if (!disk_res_sectors)
if (unlikely(ret))
return ret;
- for (i = 0; i < ARRAY_SIZE(s->s); i++)
+ for (i = 0; i < sectors; i++)
s->s[i].replicas_reserved +=
sectors_to_reserve(&s->s[i], nr_replicas);
return 0;
}
-struct bch2_page_reservation {
+struct bch2_folio_reservation {
struct disk_reservation disk;
struct quota_res quota;
};
-static void bch2_page_reservation_init(struct bch_fs *c,
+static void bch2_folio_reservation_init(struct bch_fs *c,
struct bch_inode_info *inode,
- struct bch2_page_reservation *res)
+ struct bch2_folio_reservation *res)
{
memset(res, 0, sizeof(*res));
res->disk.nr_replicas = inode_nr_replicas(c, inode);
}
-static void bch2_page_reservation_put(struct bch_fs *c,
+static void bch2_folio_reservation_put(struct bch_fs *c,
struct bch_inode_info *inode,
- struct bch2_page_reservation *res)
+ struct bch2_folio_reservation *res)
{
bch2_disk_reservation_put(c, &res->disk);
bch2_quota_reservation_put(c, inode, &res->quota);
}
-static int bch2_page_reservation_get(struct bch_fs *c,
- struct bch_inode_info *inode, struct page *page,
- struct bch2_page_reservation *res,
- unsigned offset, unsigned len, bool check_enospc)
+static int bch2_folio_reservation_get(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct folio *folio,
+ struct bch2_folio_reservation *res,
+ unsigned offset, unsigned len)
{
- struct bch_page_state *s = bch2_page_state_create(page, 0);
+ struct bch_folio *s = bch2_folio_create(folio, 0);
unsigned i, disk_sectors = 0, quota_sectors = 0;
int ret;
if (!s)
return -ENOMEM;
+ BUG_ON(!s->uptodate);
+
for (i = round_down(offset, block_bytes(c)) >> 9;
i < round_up(offset + len, block_bytes(c)) >> 9;
i++) {
disk_sectors += sectors_to_reserve(&s->s[i],
res->disk.nr_replicas);
- quota_sectors += s->s[i].state == SECTOR_UNALLOCATED;
+ quota_sectors += s->s[i].state == SECTOR_unallocated;
}
if (disk_sectors) {
- ret = bch2_disk_reservation_add(c, &res->disk,
- disk_sectors,
- !check_enospc
- ? BCH_DISK_RESERVATION_NOFAIL
- : 0);
+ ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0);
if (unlikely(ret))
return ret;
}
if (quota_sectors) {
ret = bch2_quota_reservation_add(c, inode, &res->quota,
- quota_sectors,
- check_enospc);
+ quota_sectors, true);
if (unlikely(ret)) {
struct disk_reservation tmp = {
.sectors = disk_sectors
return 0;
}
-static void bch2_clear_page_bits(struct page *page)
+static void bch2_clear_folio_bits(struct folio *folio)
{
- struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
+ struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_page_state *s = bch2_page_state(page);
+ struct bch_folio *s = bch2_folio(folio);
struct disk_reservation disk_res = { 0 };
- int i, dirty_sectors = 0;
+ int i, sectors = folio_sectors(folio), dirty_sectors = 0;
if (!s)
return;
- EBUG_ON(!PageLocked(page));
- EBUG_ON(PageWriteback(page));
+ EBUG_ON(!folio_test_locked(folio));
+ EBUG_ON(folio_test_writeback(folio));
- for (i = 0; i < ARRAY_SIZE(s->s); i++) {
+ for (i = 0; i < sectors; i++) {
disk_res.sectors += s->s[i].replicas_reserved;
s->s[i].replicas_reserved = 0;
- if (s->s[i].state == SECTOR_DIRTY) {
- dirty_sectors++;
- s->s[i].state = SECTOR_UNALLOCATED;
- }
+ dirty_sectors -= s->s[i].state == SECTOR_dirty;
+ folio_sector_set(folio, s, i, folio_sector_undirty(s->s[i].state));
}
bch2_disk_reservation_put(c, &disk_res);
- if (dirty_sectors)
- i_sectors_acct(c, inode, NULL, -dirty_sectors);
+ i_sectors_acct(c, inode, NULL, dirty_sectors);
- bch2_page_state_release(page);
+ bch2_folio_release(folio);
}
-static void bch2_set_page_dirty(struct bch_fs *c,
- struct bch_inode_info *inode, struct page *page,
- struct bch2_page_reservation *res,
+static void bch2_set_folio_dirty(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct folio *folio,
+ struct bch2_folio_reservation *res,
unsigned offset, unsigned len)
{
- struct bch_page_state *s = bch2_page_state(page);
+ struct bch_folio *s = bch2_folio(folio);
unsigned i, dirty_sectors = 0;
- WARN_ON((u64) page_offset(page) + offset + len >
+ WARN_ON((u64) folio_pos(folio) + offset + len >
round_up((u64) i_size_read(&inode->v), block_bytes(c)));
+ BUG_ON(!s->uptodate);
+
spin_lock(&s->lock);
for (i = round_down(offset, block_bytes(c)) >> 9;
s->s[i].replicas_reserved += sectors;
res->disk.sectors -= sectors;
- if (s->s[i].state == SECTOR_UNALLOCATED)
- dirty_sectors++;
+ dirty_sectors += s->s[i].state == SECTOR_unallocated;
- s->s[i].state = max_t(unsigned, s->s[i].state, SECTOR_DIRTY);
+ folio_sector_set(folio, s, i, folio_sector_dirty(s->s[i].state));
}
spin_unlock(&s->lock);
- if (dirty_sectors)
- i_sectors_acct(c, inode, &res->quota, dirty_sectors);
+ i_sectors_acct(c, inode, &res->quota, dirty_sectors);
- if (!PageDirty(page))
- __set_page_dirty_nobuffers(page);
+ if (!folio_test_dirty(folio))
+ filemap_dirty_folio(inode->v.i_mapping, folio);
}
vm_fault_t bch2_page_fault(struct vm_fault *vmf)
{
struct file *file = vmf->vma->vm_file;
+ struct address_space *mapping = file->f_mapping;
+ struct address_space *fdm = faults_disabled_mapping();
struct bch_inode_info *inode = file_bch_inode(file);
- int ret;
+ vm_fault_t ret;
+
+ if (fdm == mapping)
+ return VM_FAULT_SIGBUS;
+
+ /* Lock ordering: */
+ if (fdm > mapping) {
+ struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ if (bch2_pagecache_add_tryget(inode))
+ goto got_lock;
+
+ bch2_pagecache_block_put(fdm_host);
+
+ bch2_pagecache_add_get(inode);
+ bch2_pagecache_add_put(inode);
+
+ bch2_pagecache_block_get(fdm_host);
+
+ /* Signal that lock has been dropped: */
+ set_fdm_dropped_locks();
+ return VM_FAULT_SIGBUS;
+ }
+
+ bch2_pagecache_add_get(inode);
+got_lock:
ret = filemap_fault(vmf);
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
return ret;
}
vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct file *file = vmf->vma->vm_file;
struct bch_inode_info *inode = file_bch_inode(file);
struct address_space *mapping = file->f_mapping;
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch2_page_reservation res;
+ struct bch2_folio_reservation res;
unsigned len;
loff_t isize;
- int ret = VM_FAULT_LOCKED;
+ vm_fault_t ret;
- bch2_page_reservation_init(c, inode, &res);
+ bch2_folio_reservation_init(c, inode, &res);
sb_start_pagefault(inode->v.i_sb);
file_update_time(file);
* a write_invalidate_inode_pages_range() that works without dropping
* page lock before invalidating page
*/
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
- lock_page(page);
+ folio_lock(folio);
isize = i_size_read(&inode->v);
- if (page->mapping != mapping || page_offset(page) >= isize) {
- unlock_page(page);
+ if (folio->mapping != mapping || folio_pos(folio) >= isize) {
+ folio_unlock(folio);
ret = VM_FAULT_NOPAGE;
goto out;
}
- len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
+ len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio));
- if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
- unlock_page(page);
+ if (bch2_folio_set(c, inode_inum(inode), &folio, 1) ?:
+ bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
+ folio_unlock(folio);
ret = VM_FAULT_SIGBUS;
goto out;
}
- bch2_set_page_dirty(c, inode, page, &res, 0, len);
- bch2_page_reservation_put(c, inode, &res);
+ bch2_set_folio_dirty(c, inode, folio, &res, 0, len);
+ bch2_folio_reservation_put(c, inode, &res);
- wait_for_stable_page(page);
+ folio_wait_stable(folio);
+ ret = VM_FAULT_LOCKED;
out:
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
sb_end_pagefault(inode->v.i_sb);
return ret;
}
-void bch2_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
{
- if (offset || length < PAGE_SIZE)
+ if (offset || length < folio_size(folio))
return;
- bch2_clear_page_bits(page);
+ bch2_clear_folio_bits(folio);
}
-int bch2_releasepage(struct page *page, gfp_t gfp_mask)
+bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
{
- if (PageDirty(page))
- return 0;
-
- bch2_clear_page_bits(page);
- return 1;
-}
-
-#ifdef CONFIG_MIGRATION
-int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
- struct page *page, enum migrate_mode mode)
-{
- int ret;
-
- EBUG_ON(!PageLocked(page));
- EBUG_ON(!PageLocked(newpage));
-
- ret = migrate_page_move_mapping(mapping, newpage, page, 0);
- if (ret != MIGRATEPAGE_SUCCESS)
- return ret;
-
- if (PagePrivate(page)) {
- ClearPagePrivate(page);
- get_page(newpage);
- set_page_private(newpage, page_private(page));
- set_page_private(page, 0);
- put_page(page);
- SetPagePrivate(newpage);
- }
+ if (folio_test_dirty(folio) || folio_test_writeback(folio))
+ return false;
- if (mode != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
- else
- migrate_page_states(newpage, page);
- return MIGRATEPAGE_SUCCESS;
+ bch2_clear_folio_bits(folio);
+ return true;
}
-#endif
/* readpage(s): */
static void bch2_readpages_end_io(struct bio *bio)
{
- struct bvec_iter_all iter;
- struct bio_vec *bv;
-
- bio_for_each_segment_all(bv, bio, iter) {
- struct page *page = bv->bv_page;
+ struct folio_iter fi;
+ bio_for_each_folio_all(fi, bio) {
if (!bio->bi_status) {
- SetPageUptodate(page);
+ folio_mark_uptodate(fi.folio);
} else {
- ClearPageUptodate(page);
- SetPageError(page);
+ folio_clear_uptodate(fi.folio);
+ folio_set_error(fi.folio);
}
- unlock_page(page);
+ folio_unlock(fi.folio);
}
bio_put(bio);
}
-static inline void page_state_init_for_read(struct page *page)
-{
- SetPagePrivate(page);
- page->private = 0;
-}
-
struct readpages_iter {
struct address_space *mapping;
- struct page **pages;
- unsigned nr_pages;
- unsigned nr_added;
unsigned idx;
- pgoff_t offset;
+ folios folios;
};
static int readpages_iter_init(struct readpages_iter *iter,
- struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
+ struct readahead_control *ractl)
{
- memset(iter, 0, sizeof(*iter));
-
- iter->mapping = mapping;
- iter->offset = list_last_entry(pages, struct page, lru)->index;
-
- iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
- if (!iter->pages)
- return -ENOMEM;
-
- while (!list_empty(pages)) {
- struct page *page = list_last_entry(pages, struct page, lru);
-
- __bch2_page_state_create(page, __GFP_NOFAIL);
-
- iter->pages[iter->nr_pages++] = page;
- list_del(&page->lru);
- }
-
- return 0;
-}
-
-static inline struct page *readpage_iter_next(struct readpages_iter *iter)
-{
- struct page *page;
- unsigned i;
+ struct folio **fi;
int ret;
- BUG_ON(iter->idx > iter->nr_added);
- BUG_ON(iter->nr_added > iter->nr_pages);
-
- if (iter->idx < iter->nr_added)
- goto out;
+ memset(iter, 0, sizeof(*iter));
- while (1) {
- if (iter->idx == iter->nr_pages)
- return NULL;
-
- ret = add_to_page_cache_lru_vec(iter->mapping,
- iter->pages + iter->nr_added,
- iter->nr_pages - iter->nr_added,
- iter->offset + iter->nr_added,
- GFP_NOFS);
- if (ret > 0)
- break;
+ iter->mapping = ractl->mapping;
- page = iter->pages[iter->nr_added];
- iter->idx++;
- iter->nr_added++;
+ ret = filemap_get_contig_folios_d(iter->mapping,
+ ractl->_index << PAGE_SHIFT,
+ (ractl->_index + ractl->_nr_pages) << PAGE_SHIFT,
+ 0, mapping_gfp_mask(iter->mapping),
+ &iter->folios);
+ if (ret)
+ return ret;
- __bch2_page_state_release(page);
- put_page(page);
+ darray_for_each(iter->folios, fi) {
+ ractl->_nr_pages -= 1U << folio_order(*fi);
+ __bch2_folio_create(*fi, __GFP_NOFAIL|GFP_KERNEL);
+ folio_put(*fi);
+ folio_put(*fi);
}
- iter->nr_added += ret;
-
- for (i = iter->idx; i < iter->nr_added; i++)
- put_page(iter->pages[i]);
-out:
- EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
-
- return iter->pages[iter->idx];
+ return 0;
}
-static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
+static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
{
- struct bvec_iter iter;
- struct bio_vec bv;
- unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
- ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
- unsigned state = k.k->type == KEY_TYPE_reservation
- ? SECTOR_RESERVED
- : SECTOR_ALLOCATED;
-
- bio_for_each_segment(bv, bio, iter) {
- struct bch_page_state *s = bch2_page_state(bv.bv_page);
- unsigned i;
-
- for (i = bv.bv_offset >> 9;
- i < (bv.bv_offset + bv.bv_len) >> 9;
- i++) {
- s->s[i].nr_replicas = nr_ptrs;
- s->s[i].state = state;
- }
- }
+ if (iter->idx >= iter->folios.nr)
+ return NULL;
+ return iter->folios.data[iter->idx];
+}
+
+static inline void readpage_iter_advance(struct readpages_iter *iter)
+{
+ iter->idx++;
}
static bool extent_partial_reads_expensive(struct bkey_s_c k)
return false;
}
-static void readpage_bio_extend(struct readpages_iter *iter,
- struct bio *bio,
- unsigned sectors_this_extent,
- bool get_more)
+static int readpage_bio_extend(struct btree_trans *trans,
+ struct readpages_iter *iter,
+ struct bio *bio,
+ unsigned sectors_this_extent,
+ bool get_more)
{
+ /* Don't hold btree locks while allocating memory: */
+ bch2_trans_unlock(trans);
+
while (bio_sectors(bio) < sectors_this_extent &&
bio->bi_vcnt < bio->bi_max_vecs) {
- pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
- struct page *page = readpage_iter_next(iter);
+ struct folio *folio = readpage_iter_peek(iter);
int ret;
- if (page) {
- if (iter->offset + iter->idx != page_offset)
- break;
-
- iter->idx++;
+ if (folio) {
+ readpage_iter_advance(iter);
} else {
+ pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
+
if (!get_more)
break;
- page = xa_load(&iter->mapping->i_pages, page_offset);
- if (page && !xa_is_value(page))
+ folio = xa_load(&iter->mapping->i_pages, folio_offset);
+ if (folio && !xa_is_value(folio))
break;
- page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
- if (!page)
+ folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
+ if (!folio)
break;
- if (!__bch2_page_state_create(page, 0)) {
- put_page(page);
+ if (!__bch2_folio_create(folio, GFP_KERNEL)) {
+ folio_put(folio);
break;
}
- ret = add_to_page_cache_lru(page, iter->mapping,
- page_offset, GFP_NOFS);
+ ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_KERNEL);
if (ret) {
- __bch2_page_state_release(page);
- put_page(page);
+ __bch2_folio_release(folio);
+ folio_put(folio);
break;
}
- put_page(page);
+ folio_put(folio);
}
- BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
+ BUG_ON(folio_sector(folio) != bio_end_sector(bio));
+
+ BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
}
+
+ return bch2_trans_relock(trans);
}
-static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
- struct bch_read_bio *rbio, u64 inum,
+static void bchfs_read(struct btree_trans *trans,
+ struct bch_read_bio *rbio,
+ subvol_inum inum,
struct readpages_iter *readpages_iter)
{
struct bch_fs *c = trans->c;
- struct bkey_on_stack sk;
+ struct btree_iter iter;
+ struct bkey_buf sk;
int flags = BCH_READ_RETRY_IF_STALE|
BCH_READ_MAY_PROMOTE;
+ u32 snapshot;
int ret = 0;
rbio->c = c;
rbio->start_time = local_clock();
+ rbio->subvol = inum.subvol;
- bkey_on_stack_init(&sk);
+ bch2_bkey_buf_init(&sk);
retry:
+ bch2_trans_begin(trans);
+ iter = (struct btree_iter) { NULL };
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+ SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
+ BTREE_ITER_SLOTS);
while (1) {
struct bkey_s_c k;
unsigned bytes, sectors, offset_into_extent;
+ enum btree_id data_btree = BTREE_ID_extents;
+
+ /*
+ * read_extent -> io_time_reset may cause a transaction restart
+ * without returning an error, we need to check for that here:
+ */
+ ret = bch2_trans_relock(trans);
+ if (ret)
+ break;
- bch2_btree_iter_set_pos(iter,
- POS(inum, rbio->bio.bi_iter.bi_sector));
+ bch2_btree_iter_set_pos(&iter,
+ POS(inum.inum, rbio->bio.bi_iter.bi_sector));
- k = bch2_btree_iter_peek_slot(iter);
+ k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
break;
- bkey_on_stack_reassemble(&sk, c, k);
- k = bkey_i_to_s_c(sk.k);
-
- offset_into_extent = iter->pos.offset -
+ offset_into_extent = iter.pos.offset -
bkey_start_offset(k.k);
sectors = k.k->size - offset_into_extent;
- ret = bch2_read_indirect_extent(trans,
- &offset_into_extent, sk.k);
+ bch2_bkey_buf_reassemble(&sk, c, k);
+
+ ret = bch2_read_indirect_extent(trans, &data_btree,
+ &offset_into_extent, &sk);
if (ret)
break;
- sectors = min(sectors, k.k->size - offset_into_extent);
+ k = bkey_i_to_s_c(sk.k);
- bch2_trans_unlock(trans);
+ sectors = min(sectors, k.k->size - offset_into_extent);
- if (readpages_iter)
- readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
- extent_partial_reads_expensive(k));
+ if (readpages_iter) {
+ ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
+ extent_partial_reads_expensive(k));
+ if (ret)
+ break;
+ }
bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
swap(rbio->bio.bi_iter.bi_size, bytes);
if (rbio->bio.bi_iter.bi_size == bytes)
flags |= BCH_READ_LAST_FRAGMENT;
- if (bkey_extent_is_allocation(k.k))
- bch2_add_page_sectors(&rbio->bio, k);
+ bch2_bio_page_state_set(&rbio->bio, k);
- bch2_read_extent(c, rbio, k, offset_into_extent, flags);
+ bch2_read_extent(trans, rbio, iter.pos,
+ data_btree, k, offset_into_extent, flags);
if (flags & BCH_READ_LAST_FRAGMENT)
break;
swap(rbio->bio.bi_iter.bi_size, bytes);
bio_advance(&rbio->bio, bytes);
+
+ ret = btree_trans_too_many_iters(trans);
+ if (ret)
+ break;
}
+err:
+ bch2_trans_iter_exit(trans, &iter);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
if (ret) {
- bcache_io_error(c, &rbio->bio, "btree IO error %i", ret);
+ bch_err_inum_offset_ratelimited(c,
+ iter.pos.inode,
+ iter.pos.offset << 9,
+ "read error %i from btree lookup", ret);
+ rbio->bio.bi_status = BLK_STS_IOERR;
bio_endio(&rbio->bio);
}
- bkey_on_stack_exit(&sk, c);
+ bch2_bkey_buf_exit(&sk, c);
}
-int bch2_readpages(struct file *file, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
+void bch2_readahead(struct readahead_control *ractl)
{
- struct bch_inode_info *inode = to_bch_ei(mapping->host);
+ struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
+ struct bch_io_opts opts;
struct btree_trans trans;
- struct btree_iter *iter;
- struct page *page;
+ struct folio *folio;
struct readpages_iter readpages_iter;
int ret;
- ret = readpages_iter_init(&readpages_iter, mapping, pages, nr_pages);
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+
+ ret = readpages_iter_init(&readpages_iter, ractl);
BUG_ON(ret);
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
- BTREE_ITER_SLOTS);
+ bch2_pagecache_add_get(inode);
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
-
- while ((page = readpage_iter_next(&readpages_iter))) {
- pgoff_t index = readpages_iter.offset + readpages_iter.idx;
+ while ((folio = readpage_iter_peek(&readpages_iter))) {
unsigned n = min_t(unsigned,
- readpages_iter.nr_pages -
+ readpages_iter.folios.nr -
readpages_iter.idx,
- BIO_MAX_PAGES);
+ BIO_MAX_VECS);
struct bch_read_bio *rbio =
- rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
+ rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
+ GFP_KERNEL, &c->bio_read),
opts);
- readpages_iter.idx++;
+ readpage_iter_advance(&readpages_iter);
- bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
- rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTOR_SHIFT;
+ rbio->bio.bi_iter.bi_sector = folio_sector(folio);
rbio->bio.bi_end_io = bch2_readpages_end_io;
- BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
+ BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
- bchfs_read(&trans, iter, rbio, inode->v.i_ino,
+ bchfs_read(&trans, rbio, inode_inum(inode),
&readpages_iter);
+ bch2_trans_unlock(&trans);
}
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
bch2_trans_exit(&trans);
- kfree(readpages_iter.pages);
-
- return 0;
+ darray_exit(&readpages_iter.folios);
}
-static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
- u64 inum, struct page *page)
+static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio,
+ subvol_inum inum, struct folio *folio)
{
struct btree_trans trans;
- struct btree_iter *iter;
- bch2_page_state_create(page, __GFP_NOFAIL);
+ bch2_folio_create(folio, __GFP_NOFAIL);
- bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
- rbio->bio.bi_iter.bi_sector =
- (sector_t) page->index << PAGE_SECTOR_SHIFT;
- BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
+ rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
+ rbio->bio.bi_iter.bi_sector = folio_sector(folio);
+ BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
- BTREE_ITER_SLOTS);
-
- bchfs_read(&trans, iter, rbio, inum, NULL);
-
+ bchfs_read(&trans, rbio, inum, NULL);
bch2_trans_exit(&trans);
}
-int bch2_readpage(struct file *file, struct page *page)
-{
- struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
- struct bch_read_bio *rbio;
-
- rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
- rbio->bio.bi_end_io = bch2_readpages_end_io;
-
- __bchfs_readpage(c, rbio, inode->v.i_ino, page);
- return 0;
-}
-
-static void bch2_read_single_page_end_io(struct bio *bio)
+static void bch2_read_single_folio_end_io(struct bio *bio)
{
complete(bio->bi_private);
}
-static int bch2_read_single_page(struct page *page,
- struct address_space *mapping)
+static int bch2_read_single_folio(struct folio *folio,
+ struct address_space *mapping)
{
struct bch_inode_info *inode = to_bch_ei(mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_read_bio *rbio;
+ struct bch_io_opts opts;
int ret;
DECLARE_COMPLETION_ONSTACK(done);
- rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
- io_opts(c, &inode->ei_inode));
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+
+ rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
+ opts);
rbio->bio.bi_private = &done;
- rbio->bio.bi_end_io = bch2_read_single_page_end_io;
+ rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
- __bchfs_readpage(c, rbio, inode->v.i_ino, page);
+ __bchfs_readfolio(c, rbio, inode_inum(inode), folio);
wait_for_completion(&done);
ret = blk_status_to_errno(rbio->bio.bi_status);
if (ret < 0)
return ret;
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
return 0;
}
+int bch2_read_folio(struct file *file, struct folio *folio)
+{
+ int ret;
+
+ ret = bch2_read_single_folio(folio, folio->mapping);
+ folio_unlock(folio);
+ return bch2_err_class(ret);
+}
+
/* writepages: */
struct bch_writepage_state {
struct bch_writepage_io *io;
struct bch_io_opts opts;
+ struct bch_folio_sector *tmp;
+ unsigned tmp_sectors;
};
static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
struct bch_inode_info *inode)
{
- return (struct bch_writepage_state) {
- .opts = io_opts(c, &inode->ei_inode)
- };
-}
-
-static void bch2_writepage_io_free(struct closure *cl)
-{
- struct bch_writepage_io *io = container_of(cl,
- struct bch_writepage_io, cl);
+ struct bch_writepage_state ret = { 0 };
- bio_put(&io->op.wbio.bio);
+ bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
+ return ret;
}
-static void bch2_writepage_io_done(struct closure *cl)
+static void bch2_writepage_io_done(struct bch_write_op *op)
{
- struct bch_writepage_io *io = container_of(cl,
- struct bch_writepage_io, cl);
+ struct bch_writepage_io *io =
+ container_of(op, struct bch_writepage_io, op);
struct bch_fs *c = io->op.c;
struct bio *bio = &io->op.wbio.bio;
- struct bvec_iter_all iter;
- struct bio_vec *bvec;
+ struct folio_iter fi;
unsigned i;
if (io->op.error) {
- bio_for_each_segment_all(bvec, bio, iter) {
- struct bch_page_state *s;
+ set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
- SetPageError(bvec->bv_page);
- mapping_set_error(bvec->bv_page->mapping, -EIO);
+ bio_for_each_folio_all(fi, bio) {
+ struct bch_folio *s;
- s = __bch2_page_state(bvec->bv_page);
+ folio_set_error(fi.folio);
+ mapping_set_error(fi.folio->mapping, -EIO);
+
+ s = __bch2_folio(fi.folio);
spin_lock(&s->lock);
- for (i = 0; i < PAGE_SECTORS; i++)
+ for (i = 0; i < folio_sectors(fi.folio); i++)
s->s[i].nr_replicas = 0;
spin_unlock(&s->lock);
}
}
if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
- bio_for_each_segment_all(bvec, bio, iter) {
- struct bch_page_state *s;
+ bio_for_each_folio_all(fi, bio) {
+ struct bch_folio *s;
- s = __bch2_page_state(bvec->bv_page);
+ s = __bch2_folio(fi.folio);
spin_lock(&s->lock);
- for (i = 0; i < PAGE_SECTORS; i++)
+ for (i = 0; i < folio_sectors(fi.folio); i++)
s->s[i].nr_replicas = 0;
spin_unlock(&s->lock);
}
* racing with fallocate can cause us to add fewer sectors than
* expected - but we shouldn't add more sectors than expected:
*/
- BUG_ON(io->op.i_sectors_delta > 0);
+ WARN_ON_ONCE(io->op.i_sectors_delta > 0);
/*
* (error (due to going RO) halfway through a page can screw that up
*/
i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
- bio_for_each_segment_all(bvec, bio, iter) {
- struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
+ bio_for_each_folio_all(fi, bio) {
+ struct bch_folio *s = __bch2_folio(fi.folio);
if (atomic_dec_and_test(&s->write_count))
- end_page_writeback(bvec->bv_page);
+ folio_end_writeback(fi.folio);
}
- closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
+ bio_put(&io->op.wbio.bio);
}
static void bch2_writepage_do_io(struct bch_writepage_state *w)
struct bch_writepage_io *io = w->io;
w->io = NULL;
- closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
- continue_at(&io->cl, bch2_writepage_io_done, NULL);
+ closure_call(&io->op.cl, bch2_write, NULL, NULL);
}
/*
{
struct bch_write_op *op;
- w->io = container_of(bio_alloc_bioset(GFP_NOFS,
- BIO_MAX_PAGES,
+ w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
+ REQ_OP_WRITE,
+ GFP_KERNEL,
&c->writepage_bioset),
struct bch_writepage_io, op.wbio.bio);
- closure_init(&w->io->cl, NULL);
w->io->inode = inode;
-
op = &w->io->op;
bch2_write_op_init(op, c, w->opts);
op->target = w->opts.foreground_target;
- op_journal_seq_set(op, &inode->ei_journal_seq);
op->nr_replicas = nr_replicas;
op->res.nr_replicas = nr_replicas;
op->write_point = writepoint_hashed(inode->ei_last_dirtied);
+ op->subvol = inode->ei_subvol;
op->pos = POS(inode->v.i_ino, sector);
+ op->end_io = bch2_writepage_io_done;
+ op->devs_need_flush = &inode->ei_devs_need_flush;
op->wbio.bio.bi_iter.bi_sector = sector;
op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
}
-static int __bch2_writepage(struct page *page,
+static int __bch2_writepage(struct folio *folio,
struct writeback_control *wbc,
void *data)
{
- struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
+ struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_writepage_state *w = data;
- struct bch_page_state *s, orig;
- unsigned i, offset, nr_replicas_this_write = U32_MAX;
+ struct bch_folio *s;
+ unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
loff_t i_size = i_size_read(&inode->v);
- pgoff_t end_index = i_size >> PAGE_SHIFT;
int ret;
- EBUG_ON(!PageUptodate(page));
+ EBUG_ON(!folio_test_uptodate(folio));
- /* Is the page fully inside i_size? */
- if (page->index < end_index)
+ /* Is the folio fully inside i_size? */
+ if (folio_end_pos(folio) <= i_size)
goto do_io;
- /* Is the page fully outside i_size? (truncate in progress) */
- offset = i_size & (PAGE_SIZE - 1);
- if (page->index > end_index || !offset) {
- unlock_page(page);
+ /* Is the folio fully outside i_size? (truncate in progress) */
+ if (folio_pos(folio) >= i_size) {
+ folio_unlock(folio);
return 0;
}
/*
- * The page straddles i_size. It must be zeroed out on each and every
+ * The folio straddles i_size. It must be zeroed out on each and every
* writepage invocation because it may be mmapped. "A file is mapped
- * in multiples of the page size. For a file that is not a multiple of
- * the page size, the remaining memory is zeroed when mapped, and
+ * in multiples of the folio size. For a file that is not a multiple of
+ * the folio size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- zero_user_segment(page, offset, PAGE_SIZE);
+ folio_zero_segment(folio,
+ i_size - folio_pos(folio),
+ folio_size(folio));
do_io:
- s = bch2_page_state_create(page, __GFP_NOFAIL);
+ f_sectors = folio_sectors(folio);
+ s = bch2_folio(folio);
- ret = bch2_get_page_disk_reservation(c, inode, page, true);
- if (ret) {
- SetPageError(page);
- mapping_set_error(page->mapping, ret);
- unlock_page(page);
- return 0;
+ if (f_sectors > w->tmp_sectors) {
+ kfree(w->tmp);
+ w->tmp = kzalloc(sizeof(struct bch_folio_sector) *
+ f_sectors, __GFP_NOFAIL);
+ w->tmp_sectors = f_sectors;
}
+ /*
+ * Things get really hairy with errors during writeback:
+ */
+ ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
+ BUG_ON(ret);
+
/* Before unlocking the page, get copy of reservations: */
- orig = *s;
+ spin_lock(&s->lock);
+ memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
- for (i = 0; i < PAGE_SECTORS; i++) {
- if (s->s[i].state < SECTOR_DIRTY)
+ for (i = 0; i < f_sectors; i++) {
+ if (s->s[i].state < SECTOR_dirty)
continue;
nr_replicas_this_write =
s->s[i].replicas_reserved);
}
- for (i = 0; i < PAGE_SECTORS; i++) {
- if (s->s[i].state < SECTOR_DIRTY)
+ for (i = 0; i < f_sectors; i++) {
+ if (s->s[i].state < SECTOR_dirty)
continue;
s->s[i].nr_replicas = w->opts.compression
? 0 : nr_replicas_this_write;
s->s[i].replicas_reserved = 0;
- s->s[i].state = SECTOR_ALLOCATED;
+ folio_sector_set(folio, s, i, SECTOR_allocated);
}
+ spin_unlock(&s->lock);
BUG_ON(atomic_read(&s->write_count));
atomic_set(&s->write_count, 1);
- BUG_ON(PageWriteback(page));
- set_page_writeback(page);
+ BUG_ON(folio_test_writeback(folio));
+ folio_start_writeback(folio);
- unlock_page(page);
+ folio_unlock(folio);
offset = 0;
while (1) {
- unsigned sectors = 1, dirty_sectors = 0, reserved_sectors = 0;
+ unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
u64 sector;
- while (offset < PAGE_SECTORS &&
- orig.s[offset].state < SECTOR_DIRTY)
+ while (offset < f_sectors &&
+ w->tmp[offset].state < SECTOR_dirty)
offset++;
- if (offset == PAGE_SECTORS)
+ if (offset == f_sectors)
break;
- sector = ((u64) page->index << PAGE_SECTOR_SHIFT) + offset;
-
- while (offset + sectors < PAGE_SECTORS &&
- orig.s[offset + sectors].state >= SECTOR_DIRTY)
+ while (offset + sectors < f_sectors &&
+ w->tmp[offset + sectors].state >= SECTOR_dirty) {
+ reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
+ dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
sectors++;
-
- for (i = offset; i < offset + sectors; i++) {
- reserved_sectors += orig.s[i].replicas_reserved;
- dirty_sectors += orig.s[i].state == SECTOR_DIRTY;
}
+ BUG_ON(!sectors);
+
+ sector = folio_sector(folio) + offset;
if (w->io &&
(w->io->op.res.nr_replicas != nr_replicas_this_write ||
- bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
- w->io->op.wbio.bio.bi_iter.bi_size >= (256U << 20) ||
+ bio_full(&w->io->op.wbio.bio, sectors << 9) ||
+ w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
+ (BIO_MAX_VECS * PAGE_SIZE) ||
bio_end_sector(&w->io->op.wbio.bio) != sector))
bch2_writepage_do_io(w);
atomic_inc(&s->write_count);
BUG_ON(inode != w->io->inode);
- BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
+ BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
sectors << 9, offset << 9));
/* Check for writing past i_size: */
- WARN_ON((bio_end_sector(&w->io->op.wbio.bio) << 9) >
- round_up(i_size, block_bytes(c)));
+ WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
+ round_up(i_size, block_bytes(c)) &&
+ !test_bit(BCH_FS_EMERGENCY_RO, &c->flags),
+ "writing past i_size: %llu > %llu (unrounded %llu)\n",
+ bio_end_sector(&w->io->op.wbio.bio) << 9,
+ round_up(i_size, block_bytes(c)),
+ i_size);
w->io->op.res.sectors += reserved_sectors;
w->io->op.i_sectors_delta -= dirty_sectors;
}
if (atomic_dec_and_test(&s->write_count))
- end_page_writeback(page);
+ folio_end_writeback(folio);
return 0;
}
if (w.io)
bch2_writepage_do_io(&w);
blk_finish_plug(&plug);
- return ret;
-}
-
-int bch2_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
- struct bch_writepage_state w =
- bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
- int ret;
-
- ret = __bch2_writepage(page, wbc, &w);
- if (w.io)
- bch2_writepage_do_io(&w);
-
- return ret;
+ kfree(w.tmp);
+ return bch2_err_class(ret);
}
/* buffered writes: */
int bch2_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
struct bch_inode_info *inode = to_bch_ei(mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch2_page_reservation *res;
- pgoff_t index = pos >> PAGE_SHIFT;
- unsigned offset = pos & (PAGE_SIZE - 1);
- struct page *page;
+ struct bch2_folio_reservation *res;
+ struct folio *folio;
+ unsigned offset;
int ret = -ENOMEM;
res = kmalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return -ENOMEM;
- bch2_page_reservation_init(c, inode, res);
+ bch2_folio_reservation_init(c, inode, res);
*fsdata = res;
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
+ folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
+ FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR_OR_NULL(folio))
goto err_unlock;
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
goto out;
- /* If we're writing entire page, don't need to read it in first: */
- if (len == PAGE_SIZE)
+ offset = pos - folio_pos(folio);
+ len = min_t(size_t, len, folio_end_pos(folio) - pos);
+
+ /* If we're writing entire folio, don't need to read it in first: */
+ if (!offset && len == folio_size(folio))
goto out;
if (!offset && pos + len >= inode->v.i_size) {
- zero_user_segment(page, len, PAGE_SIZE);
- flush_dcache_page(page);
+ folio_zero_segment(folio, len, folio_size(folio));
+ flush_dcache_folio(folio);
goto out;
}
- if (index > inode->v.i_size >> PAGE_SHIFT) {
- zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
- flush_dcache_page(page);
+ if (folio_pos(folio) >= inode->v.i_size) {
+ folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
+ flush_dcache_folio(folio);
goto out;
}
readpage:
- ret = bch2_read_single_page(page, mapping);
+ ret = bch2_read_single_folio(folio, mapping);
if (ret)
goto err;
out:
- ret = bch2_page_reservation_get(c, inode, page, res,
- offset, len, true);
+ ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
+ if (ret)
+ goto err;
+
+ ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
if (ret) {
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
/*
- * If the page hasn't been read in, we won't know if we
+ * If the folio hasn't been read in, we won't know if we
* actually need a reservation - we don't actually need
- * to read here, we just need to check if the page is
+ * to read here, we just need to check if the folio is
* fully backed by uncompressed data:
*/
goto readpage;
goto err;
}
- *pagep = page;
+ *pagep = &folio->page;
return 0;
err:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
*pagep = NULL;
err_unlock:
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
kfree(res);
*fsdata = NULL;
- return ret;
+ return bch2_err_class(ret);
}
int bch2_write_end(struct file *file, struct address_space *mapping,
{
struct bch_inode_info *inode = to_bch_ei(mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch2_page_reservation *res = fsdata;
- unsigned offset = pos & (PAGE_SIZE - 1);
+ struct bch2_folio_reservation *res = fsdata;
+ struct folio *folio = page_folio(page);
+ unsigned offset = pos - folio_pos(folio);
lockdep_assert_held(&inode->v.i_rwsem);
+ BUG_ON(offset + copied > folio_size(folio));
- if (unlikely(copied < len && !PageUptodate(page))) {
+ if (unlikely(copied < len && !folio_test_uptodate(folio))) {
/*
- * The page needs to be read in, but that would destroy
+ * The folio needs to be read in, but that would destroy
* our partial write - simplest thing is to just force
* userspace to redo the write:
*/
- zero_user(page, 0, PAGE_SIZE);
- flush_dcache_page(page);
+ folio_zero_range(folio, 0, folio_size(folio));
+ flush_dcache_folio(folio);
copied = 0;
}
spin_unlock(&inode->v.i_lock);
if (copied) {
- if (!PageUptodate(page))
- SetPageUptodate(page);
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
- bch2_set_page_dirty(c, inode, page, res, offset, copied);
+ bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
inode->ei_last_dirtied = (unsigned long) current;
}
- unlock_page(page);
- put_page(page);
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ folio_unlock(folio);
+ folio_put(folio);
+ bch2_pagecache_add_put(inode);
- bch2_page_reservation_put(c, inode, res);
+ bch2_folio_reservation_put(c, inode, res);
kfree(res);
return copied;
}
-#define WRITE_BATCH_PAGES 32
+static noinline void folios_trunc(folios *folios, struct folio **fi)
+{
+ while (folios->data + folios->nr > fi) {
+ struct folio *f = darray_pop(folios);
+
+ folio_unlock(f);
+ folio_put(f);
+ }
+}
static int __bch2_buffered_write(struct bch_inode_info *inode,
struct address_space *mapping,
loff_t pos, unsigned len)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct page *pages[WRITE_BATCH_PAGES];
- struct bch2_page_reservation res;
- unsigned long index = pos >> PAGE_SHIFT;
- unsigned offset = pos & (PAGE_SIZE - 1);
- unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
- unsigned i, reserved = 0, set_dirty = 0;
- unsigned copied = 0, nr_pages_copied = 0;
+ struct bch2_folio_reservation res;
+ folios folios;
+ struct folio **fi, *f;
+ unsigned copied = 0, f_offset;
+ u64 end = pos + len, f_pos;
+ loff_t last_folio_pos = inode->v.i_size;
int ret = 0;
BUG_ON(!len);
- BUG_ON(nr_pages > ARRAY_SIZE(pages));
- bch2_page_reservation_init(c, inode, &res);
+ bch2_folio_reservation_init(c, inode, &res);
+ darray_init(&folios);
- for (i = 0; i < nr_pages; i++) {
- pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
- if (!pages[i]) {
- nr_pages = i;
- if (!i) {
- ret = -ENOMEM;
- goto out;
- }
- len = min_t(unsigned, len,
- nr_pages * PAGE_SIZE - offset);
- break;
- }
- }
+ ret = filemap_get_contig_folios_d(mapping, pos, end,
+ FGP_LOCK|FGP_WRITE|FGP_STABLE|FGP_CREAT,
+ mapping_gfp_mask(mapping),
+ &folios);
+ if (ret)
+ goto out;
+
+ BUG_ON(!folios.nr);
- if (offset && !PageUptodate(pages[0])) {
- ret = bch2_read_single_page(pages[0], mapping);
+ f = darray_first(folios);
+ if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
+ ret = bch2_read_single_folio(f, mapping);
if (ret)
goto out;
}
- if ((pos + len) & (PAGE_SIZE - 1) &&
- !PageUptodate(pages[nr_pages - 1])) {
- if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
- zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
+ f = darray_last(folios);
+ end = min(end, folio_end_pos(f));
+ last_folio_pos = folio_pos(f);
+ if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
+ if (end >= inode->v.i_size) {
+ folio_zero_range(f, 0, folio_size(f));
} else {
- ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
+ ret = bch2_read_single_folio(f, mapping);
if (ret)
goto out;
}
}
- while (reserved < len) {
- struct page *page = pages[(offset + reserved) >> PAGE_SHIFT];
- unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
- unsigned pg_len = min_t(unsigned, len - reserved,
- PAGE_SIZE - pg_offset);
-retry_reservation:
- ret = bch2_page_reservation_get(c, inode, page, &res,
- pg_offset, pg_len, true);
+ ret = bch2_folio_set(c, inode_inum(inode), folios.data, folios.nr);
+ if (ret)
+ goto out;
- if (ret && !PageUptodate(page)) {
- ret = bch2_read_single_page(page, mapping);
- if (!ret)
- goto retry_reservation;
- }
+ f_pos = pos;
+ f_offset = pos - folio_pos(darray_first(folios));
+ darray_for_each(folios, fi) {
+ struct folio *f = *fi;
+ u64 f_len = min(end, folio_end_pos(f)) - f_pos;
- if (ret)
- goto out;
+ /*
+ * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
+ * supposed to write as much as we have disk space for.
+ *
+ * On failure here we should still write out a partial page if
+ * we aren't completely out of disk space - we don't do that
+ * yet:
+ */
+ ret = bch2_folio_reservation_get(c, inode, f, &res, f_offset, f_len);
+ if (unlikely(ret)) {
+ folios_trunc(&folios, fi);
+ if (!folios.nr)
+ goto out;
- reserved += pg_len;
+ end = min(end, folio_end_pos(darray_last(folios)));
+ break;
+ }
+
+ f_pos = folio_end_pos(f);
+ f_offset = 0;
}
if (mapping_writably_mapped(mapping))
- for (i = 0; i < nr_pages; i++)
- flush_dcache_page(pages[i]);
-
- while (copied < len) {
- struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
- unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
- unsigned pg_len = min_t(unsigned, len - copied,
- PAGE_SIZE - pg_offset);
- unsigned pg_copied = iov_iter_copy_from_user_atomic(page,
- iter, pg_offset, pg_len);
-
- if (!pg_copied)
+ darray_for_each(folios, fi)
+ flush_dcache_folio(*fi);
+
+ f_pos = pos;
+ f_offset = pos - folio_pos(darray_first(folios));
+ darray_for_each(folios, fi) {
+ struct folio *f = *fi;
+ u64 f_len = min(end, folio_end_pos(f)) - f_pos;
+ unsigned f_copied = copy_page_from_iter_atomic(&f->page, f_offset, f_len, iter);
+
+ if (!f_copied) {
+ folios_trunc(&folios, fi);
+ break;
+ }
+
+ if (!folio_test_uptodate(f) &&
+ f_copied != folio_size(f) &&
+ pos + copied + f_copied < inode->v.i_size) {
+ folio_zero_range(f, 0, folio_size(f));
+ folios_trunc(&folios, fi);
+ break;
+ }
+
+ flush_dcache_folio(f);
+ copied += f_copied;
+
+ if (f_copied != f_len) {
+ folios_trunc(&folios, fi + 1);
break;
+ }
- flush_dcache_page(page);
- iov_iter_advance(iter, pg_copied);
- copied += pg_copied;
+ f_pos = folio_end_pos(f);
+ f_offset = 0;
}
if (!copied)
goto out;
- if (copied < len &&
- ((offset + copied) & (PAGE_SIZE - 1))) {
- struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
-
- if (!PageUptodate(page)) {
- zero_user(page, 0, PAGE_SIZE);
- copied -= (offset + copied) & (PAGE_SIZE - 1);
- }
- }
+ end = pos + copied;
spin_lock(&inode->v.i_lock);
- if (pos + copied > inode->v.i_size)
- i_size_write(&inode->v, pos + copied);
+ if (end > inode->v.i_size)
+ i_size_write(&inode->v, end);
spin_unlock(&inode->v.i_lock);
- while (set_dirty < copied) {
- struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
- unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
- unsigned pg_len = min_t(unsigned, copied - set_dirty,
- PAGE_SIZE - pg_offset);
+ f_pos = pos;
+ f_offset = pos - folio_pos(darray_first(folios));
+ darray_for_each(folios, fi) {
+ struct folio *f = *fi;
+ u64 f_len = min(end, folio_end_pos(f)) - f_pos;
- if (!PageUptodate(page))
- SetPageUptodate(page);
+ if (!folio_test_uptodate(f))
+ folio_mark_uptodate(f);
- bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len);
- unlock_page(page);
- put_page(page);
+ bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
- set_dirty += pg_len;
+ f_pos = folio_end_pos(f);
+ f_offset = 0;
}
- nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
inode->ei_last_dirtied = (unsigned long) current;
out:
- for (i = nr_pages_copied; i < nr_pages; i++) {
- unlock_page(pages[i]);
- put_page(pages[i]);
+ darray_for_each(folios, fi) {
+ folio_unlock(*fi);
+ folio_put(*fi);
}
- bch2_page_reservation_put(c, inode, &res);
+ /*
+ * If the last folio added to the mapping starts beyond current EOF, we
+ * performed a short write but left around at least one post-EOF folio.
+ * Clean up the mapping before we return.
+ */
+ if (last_folio_pos >= inode->v.i_size)
+ truncate_pagecache(&inode->v, inode->v.i_size);
+
+ darray_exit(&folios);
+ bch2_folio_reservation_put(c, inode, &res);
return copied ?: ret;
}
ssize_t written = 0;
int ret = 0;
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
do {
unsigned offset = pos & (PAGE_SIZE - 1);
- unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
- PAGE_SIZE * WRITE_BATCH_PAGES - offset);
+ unsigned bytes = iov_iter_count(iter);
again:
/*
* Bring in the user page that we will copy from _first_.
* to check that the address is actually valid, when atomic
* usercopies are used, below.
*/
- if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
+ if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
bytes = min_t(unsigned long, iov_iter_count(iter),
PAGE_SIZE - offset);
- if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
+ if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
ret = -EFAULT;
break;
}
}
pos += ret;
written += ret;
+ ret = 0;
balance_dirty_pages_ratelimited(mapping);
} while (iov_iter_count(iter));
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
return written ? written : ret;
}
/* O_DIRECT reads */
+static void bio_check_or_release(struct bio *bio, bool check_dirty)
+{
+ if (check_dirty) {
+ bio_check_pages_dirty(bio);
+ } else {
+ bio_release_pages(bio, false);
+ bio_put(bio);
+ }
+}
+
static void bch2_dio_read_complete(struct closure *cl)
{
struct dio_read *dio = container_of(cl, struct dio_read, cl);
- dio->req->ki_complete(dio->req, dio->ret, 0);
- bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
+ dio->req->ki_complete(dio->req, dio->ret);
+ bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
}
static void bch2_direct_IO_read_endio(struct bio *bio)
static void bch2_direct_IO_read_split_endio(struct bio *bio)
{
+ struct dio_read *dio = bio->bi_private;
+ bool should_dirty = dio->should_dirty;
+
bch2_direct_IO_read_endio(bio);
- bio_check_pages_dirty(bio); /* transfers ownership */
+ bio_check_or_release(bio, should_dirty);
}
static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
struct file *file = req->ki_filp;
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
+ struct bch_io_opts opts;
struct dio_read *dio;
struct bio *bio;
loff_t offset = req->ki_pos;
size_t shorten;
ssize_t ret;
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+
if ((offset|iter->count) & (block_bytes(c) - 1))
return -EINVAL;
shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
iter->count -= shorten;
- bio = bio_alloc_bioset(GFP_KERNEL,
- iov_iter_npages(iter, BIO_MAX_PAGES),
+ bio = bio_alloc_bioset(NULL,
+ bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+ REQ_OP_READ,
+ GFP_KERNEL,
&c->dio_read_bioset);
bio->bi_end_io = bch2_direct_IO_read_endio;
CLOSURE_REMAINING_INITIALIZER + 1);
}
- dio->req = req;
- dio->ret = ret;
+ dio->req = req;
+ dio->ret = ret;
+ /*
+ * This is one of the sketchier things I've encountered: we have to skip
+ * the dirtying of requests that are internal from the kernel (i.e. from
+ * loopback), because we'll deadlock on page_lock.
+ */
+ dio->should_dirty = iter_is_iovec(iter);
+
+ goto start;
+ while (iter->count) {
+ bio = bio_alloc_bioset(NULL,
+ bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+ REQ_OP_READ,
+ GFP_KERNEL,
+ &c->bio_read);
+ bio->bi_end_io = bch2_direct_IO_read_split_endio;
+start:
+ bio->bi_opf = REQ_OP_READ|REQ_SYNC;
+ bio->bi_iter.bi_sector = offset >> 9;
+ bio->bi_private = dio;
+
+ ret = bio_iov_iter_get_pages(bio, iter);
+ if (ret < 0) {
+ /* XXX: fault inject this path */
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio_endio(bio);
+ break;
+ }
+
+ offset += bio->bi_iter.bi_size;
+
+ if (dio->should_dirty)
+ bio_set_pages_dirty(bio);
+
+ if (iter->count)
+ closure_get(&dio->cl);
+
+ bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
+ }
+
+ iter->count += shorten;
+
+ if (sync) {
+ closure_sync(&dio->cl);
+ closure_debug_destroy(&dio->cl);
+ ret = dio->ret;
+ bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
+ return ret;
+ } else {
+ return -EIOCBQUEUED;
+ }
+}
+
+ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct file *file = iocb->ki_filp;
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct address_space *mapping = file->f_mapping;
+ size_t count = iov_iter_count(iter);
+ ssize_t ret;
+
+ if (!count)
+ return 0; /* skip atime */
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ struct blk_plug plug;
+
+ if (unlikely(mapping->nrpages)) {
+ ret = filemap_write_and_wait_range(mapping,
+ iocb->ki_pos,
+ iocb->ki_pos + count - 1);
+ if (ret < 0)
+ goto out;
+ }
+
+ file_accessed(file);
+
+ blk_start_plug(&plug);
+ ret = bch2_direct_IO_read(iocb, iter);
+ blk_finish_plug(&plug);
+
+ if (ret >= 0)
+ iocb->ki_pos += ret;
+ } else {
+ bch2_pagecache_add_get(inode);
+ ret = generic_file_read_iter(iocb, iter);
+ bch2_pagecache_add_put(inode);
+ }
+out:
+ return bch2_err_class(ret);
+}
+
+/* O_DIRECT writes */
+
+static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
+ u64 offset, u64 size,
+ unsigned nr_replicas, bool compressed)
+{
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u64 end = offset + size;
+ u32 snapshot;
+ bool ret = true;
+ int err;
+
+ bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
+
+ err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
+ if (err)
+ goto err;
+
+ for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
+ SPOS(inum.inum, offset, snapshot),
+ BTREE_ITER_SLOTS, k, err) {
+ if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
+ break;
+
+ if (k.k->p.snapshot != snapshot ||
+ nr_replicas > bch2_bkey_replicas(c, k) ||
+ (!compressed && bch2_bkey_sectors_compressed(k))) {
+ ret = false;
+ break;
+ }
+ }
+
+ offset = iter.pos.offset;
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(err, BCH_ERR_transaction_restart))
+ goto retry;
+ bch2_trans_exit(&trans);
+
+ return err ? false : ret;
+}
+
+static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct bch_inode_info *inode = dio->inode;
+ struct bio *bio = &dio->op.wbio.bio;
+
+ return bch2_check_range_allocated(c, inode_inum(inode),
+ dio->op.pos.offset, bio_sectors(bio),
+ dio->op.opts.data_replicas,
+ dio->op.opts.compression != 0);
+}
+
+static void bch2_dio_write_loop_async(struct bch_write_op *);
+static __always_inline long bch2_dio_write_done(struct dio_write *dio);
+
+/*
+ * We're going to return -EIOCBQUEUED, but we haven't finished consuming the
+ * iov_iter yet, so we need to stash a copy of the iovec: it might be on the
+ * caller's stack, we're not guaranteed that it will live for the duration of
+ * the IO:
+ */
+static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
+{
+ struct iovec *iov = dio->inline_vecs;
+
+ /*
+ * iov_iter has a single embedded iovec - nothing to do:
+ */
+ if (iter_is_ubuf(&dio->iter))
+ return 0;
+
+ /*
+ * We don't currently handle non-iovec iov_iters here - return an error,
+ * and we'll fall back to doing the IO synchronously:
+ */
+ if (!iter_is_iovec(&dio->iter))
+ return -1;
+
+ if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
+ iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
+ GFP_KERNEL);
+ if (unlikely(!iov))
+ return -ENOMEM;
+
+ dio->free_iov = true;
+ }
+
+ memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
+ dio->iter.__iov = iov;
+ return 0;
+}
+
+static void bch2_dio_write_flush_done(struct closure *cl)
+{
+ struct dio_write *dio = container_of(cl, struct dio_write, op.cl);
+ struct bch_fs *c = dio->op.c;
- goto start;
- while (iter->count) {
- bio = bio_alloc_bioset(GFP_KERNEL,
- iov_iter_npages(iter, BIO_MAX_PAGES),
- &c->bio_read);
- bio->bi_end_io = bch2_direct_IO_read_split_endio;
-start:
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
- bio->bi_iter.bi_sector = offset >> 9;
- bio->bi_private = dio;
+ closure_debug_destroy(cl);
- ret = bio_iov_iter_get_pages(bio, iter);
- if (ret < 0) {
- /* XXX: fault inject this path */
- bio->bi_status = BLK_STS_RESOURCE;
- bio_endio(bio);
- break;
- }
+ dio->op.error = bch2_journal_error(&c->journal);
- offset += bio->bi_iter.bi_size;
- bio_set_pages_dirty(bio);
+ bch2_dio_write_done(dio);
+}
- if (iter->count)
- closure_get(&dio->cl);
+static noinline void bch2_dio_write_flush(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct bch_inode_unpacked inode;
+ int ret;
- bch2_read(c, rbio_init(bio, opts), inode->v.i_ino);
- }
+ dio->flush = 0;
- iter->count += shorten;
+ closure_init(&dio->op.cl, NULL);
- if (sync) {
- closure_sync(&dio->cl);
- closure_debug_destroy(&dio->cl);
- ret = dio->ret;
- bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
- return ret;
+ if (!dio->op.error) {
+ ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
+ if (ret) {
+ dio->op.error = ret;
+ } else {
+ bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq, &dio->op.cl);
+ bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
+ }
+ }
+
+ if (dio->sync) {
+ closure_sync(&dio->op.cl);
+ closure_debug_destroy(&dio->op.cl);
} else {
- return -EIOCBQUEUED;
+ continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
}
}
-ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+static __always_inline long bch2_dio_write_done(struct dio_write *dio)
{
- struct file *file = iocb->ki_filp;
- struct bch_inode_info *inode = file_bch_inode(file);
- struct address_space *mapping = file->f_mapping;
- size_t count = iov_iter_count(iter);
- ssize_t ret;
+ struct kiocb *req = dio->req;
+ struct bch_inode_info *inode = dio->inode;
+ bool sync = dio->sync;
+ long ret;
- if (!count)
- return 0; /* skip atime */
+ if (unlikely(dio->flush)) {
+ bch2_dio_write_flush(dio);
+ if (!sync)
+ return -EIOCBQUEUED;
+ }
- if (iocb->ki_flags & IOCB_DIRECT) {
- struct blk_plug plug;
+ bch2_pagecache_block_put(inode);
- ret = filemap_write_and_wait_range(mapping,
- iocb->ki_pos,
- iocb->ki_pos + count - 1);
- if (ret < 0)
- return ret;
+ if (dio->free_iov)
+ kfree(dio->iter.__iov);
- file_accessed(file);
+ ret = dio->op.error ?: ((long) dio->written << 9);
+ bio_put(&dio->op.wbio.bio);
- blk_start_plug(&plug);
- ret = bch2_direct_IO_read(iocb, iter);
- blk_finish_plug(&plug);
+ /* inode->i_dio_count is our ref on inode and thus bch_fs */
+ inode_dio_end(&inode->v);
- if (ret >= 0)
- iocb->ki_pos += ret;
- } else {
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
- ret = generic_file_read_iter(iocb, iter);
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
- }
+ if (ret < 0)
+ ret = bch2_err_class(ret);
+ if (!sync) {
+ req->ki_complete(req, ret);
+ ret = -EIOCBQUEUED;
+ }
return ret;
}
-/* O_DIRECT writes */
+static __always_inline void bch2_dio_write_end(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct kiocb *req = dio->req;
+ struct bch_inode_info *inode = dio->inode;
+ struct bio *bio = &dio->op.wbio.bio;
+
+ req->ki_pos += (u64) dio->op.written << 9;
+ dio->written += dio->op.written;
+
+ if (dio->extending) {
+ spin_lock(&inode->v.i_lock);
+ if (req->ki_pos > inode->v.i_size)
+ i_size_write(&inode->v, req->ki_pos);
+ spin_unlock(&inode->v.i_lock);
+ }
+
+ if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
+ mutex_lock(&inode->ei_quota_lock);
+ __i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
+ __bch2_quota_reservation_put(c, inode, &dio->quota_res);
+ mutex_unlock(&inode->ei_quota_lock);
+ }
+
+ bio_release_pages(bio, false);
-static long bch2_dio_write_loop(struct dio_write *dio)
+ if (unlikely(dio->op.error))
+ set_bit(EI_INODE_ERROR, &inode->ei_flags);
+}
+
+static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
{
- bool kthread = (current->flags & PF_KTHREAD) != 0;
struct bch_fs *c = dio->op.c;
struct kiocb *req = dio->req;
- struct address_space *mapping = req->ki_filp->f_mapping;
- struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
+ struct address_space *mapping = dio->mapping;
+ struct bch_inode_info *inode = dio->inode;
+ struct bch_io_opts opts;
struct bio *bio = &dio->op.wbio.bio;
- struct bvec_iter_all iter;
- struct bio_vec *bv;
- unsigned unaligned;
- u64 new_i_size;
- bool sync;
+ unsigned unaligned, iter_count;
+ bool sync = dio->sync, dropped_locks;
long ret;
- if (dio->loop)
- goto loop;
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
while (1) {
- if (kthread)
- use_mm(dio->mm);
- BUG_ON(current->faults_disabled_mapping);
+ iter_count = dio->iter.count;
+
+ EBUG_ON(current->faults_disabled_mapping);
current->faults_disabled_mapping = mapping;
ret = bio_iov_iter_get_pages(bio, &dio->iter);
+ dropped_locks = fdm_dropped_locks();
+
current->faults_disabled_mapping = NULL;
- if (kthread)
- unuse_mm(dio->mm);
+
+ /*
+ * If the fault handler returned an error but also signalled
+ * that it dropped & retook ei_pagecache_lock, we just need to
+ * re-shoot down the page cache and retry:
+ */
+ if (dropped_locks && ret)
+ ret = 0;
if (unlikely(ret < 0))
goto err;
+ if (unlikely(dropped_locks)) {
+ ret = write_invalidate_inode_pages_range(mapping,
+ req->ki_pos,
+ req->ki_pos + iter_count - 1);
+ if (unlikely(ret))
+ goto err;
+
+ if (!bio->bi_iter.bi_size)
+ continue;
+ }
+
unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
bio->bi_iter.bi_size -= unaligned;
iov_iter_revert(&dio->iter, unaligned);
* bio_iov_iter_get_pages was only able to get <
* blocksize worth of pages:
*/
- bio_for_each_segment_all(bv, bio, iter)
- put_page(bv->bv_page);
ret = -EFAULT;
goto err;
}
- dio->op.pos = POS(inode->v.i_ino,
- (req->ki_pos >> 9) + dio->op.written);
-
- task_io_account_write(bio->bi_iter.bi_size);
+ bch2_write_op_init(&dio->op, c, opts);
+ dio->op.end_io = sync
+ ? NULL
+ : bch2_dio_write_loop_async;
+ dio->op.target = dio->op.opts.foreground_target;
+ dio->op.write_point = writepoint_hashed((unsigned long) current);
+ dio->op.nr_replicas = dio->op.opts.data_replicas;
+ dio->op.subvol = inode->ei_subvol;
+ dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
+ dio->op.devs_need_flush = &inode->ei_devs_need_flush;
+
+ if (sync)
+ dio->op.flags |= BCH_WRITE_SYNC;
+ dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
+
+ ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
+ bio_sectors(bio), true);
+ if (unlikely(ret))
+ goto err;
- if (!dio->sync && !dio->loop && dio->iter.count) {
- struct iovec *iov = dio->inline_vecs;
+ ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
+ dio->op.opts.data_replicas, 0);
+ if (unlikely(ret) &&
+ !bch2_dio_write_check_allocated(dio))
+ goto err;
- if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
- iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
- GFP_KERNEL);
- if (unlikely(!iov)) {
- dio->sync = true;
- goto do_io;
- }
+ task_io_account_write(bio->bi_iter.bi_size);
- dio->free_iov = true;
- }
+ if (unlikely(dio->iter.count) &&
+ !dio->sync &&
+ !dio->loop &&
+ bch2_dio_write_copy_iov(dio))
+ dio->sync = sync = true;
- memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
- dio->iter.iov = iov;
- }
-do_io:
dio->loop = true;
closure_call(&dio->op.cl, bch2_write, NULL, NULL);
- if (dio->sync)
- wait_for_completion(&dio->done);
- else
+ if (!sync)
return -EIOCBQUEUED;
-loop:
- i_sectors_acct(c, inode, &dio->quota_res,
- dio->op.i_sectors_delta);
- dio->op.i_sectors_delta = 0;
-
- new_i_size = req->ki_pos + ((u64) dio->op.written << 9);
- spin_lock(&inode->v.i_lock);
- if (new_i_size > inode->v.i_size)
- i_size_write(&inode->v, new_i_size);
- spin_unlock(&inode->v.i_lock);
+ bch2_dio_write_end(dio);
- bio_for_each_segment_all(bv, bio, iter)
- put_page(bv->bv_page);
- if (!dio->iter.count || dio->op.error)
+ if (likely(!dio->iter.count) || dio->op.error)
break;
- bio_reset(bio);
- reinit_completion(&dio->done);
+ bio_reset(bio, NULL, REQ_OP_WRITE);
}
-
- ret = dio->op.error ?: ((long) dio->op.written << 9);
+out:
+ return bch2_dio_write_done(dio);
err:
- bch2_pagecache_block_put(&inode->ei_pagecache_lock);
- bch2_disk_reservation_put(c, &dio->op.res);
- bch2_quota_reservation_put(c, inode, &dio->quota_res);
+ dio->op.error = ret;
- if (dio->free_iov)
- kfree(dio->iter.iov);
+ bio_release_pages(bio, false);
- sync = dio->sync;
- bio_put(bio);
+ bch2_quota_reservation_put(c, inode, &dio->quota_res);
+ goto out;
+}
- /* inode->i_dio_count is our ref on inode and thus bch_fs */
- inode_dio_end(&inode->v);
+static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
+{
+ struct mm_struct *mm = dio->mm;
- if (!sync) {
- req->ki_complete(req, ret, 0);
- ret = -EIOCBQUEUED;
- }
- return ret;
+ bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
+
+ if (mm)
+ kthread_use_mm(mm);
+ bch2_dio_write_loop(dio);
+ if (mm)
+ kthread_unuse_mm(mm);
}
static void bch2_dio_write_loop_async(struct bch_write_op *op)
{
struct dio_write *dio = container_of(op, struct dio_write, op);
- if (dio->sync)
- complete(&dio->done);
+ bch2_dio_write_end(dio);
+
+ if (likely(!dio->iter.count) || dio->op.error)
+ bch2_dio_write_done(dio);
else
- bch2_dio_write_loop(dio);
+ bch2_dio_write_continue(dio);
}
static noinline
struct address_space *mapping = file->f_mapping;
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
struct dio_write *dio;
struct bio *bio;
bool locked = true, extending;
goto err;
inode_dio_begin(&inode->v);
- bch2_pagecache_block_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_block_get(inode);
extending = req->ki_pos + iter->count > inode->v.i_size;
if (!extending) {
locked = false;
}
- bio = bio_alloc_bioset(GFP_KERNEL,
- iov_iter_npages(iter, BIO_MAX_PAGES),
+ bio = bio_alloc_bioset(NULL,
+ bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+ REQ_OP_WRITE,
+ GFP_KERNEL,
&c->dio_write_bioset);
dio = container_of(bio, struct dio_write, op.wbio.bio);
- init_completion(&dio->done);
dio->req = req;
+ dio->mapping = mapping;
+ dio->inode = inode;
dio->mm = current->mm;
dio->loop = false;
+ dio->extending = extending;
dio->sync = is_sync_kiocb(req) || extending;
+ dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
dio->free_iov = false;
dio->quota_res.sectors = 0;
+ dio->written = 0;
dio->iter = *iter;
+ dio->op.c = c;
- bch2_write_op_init(&dio->op, c, opts);
- dio->op.end_io = bch2_dio_write_loop_async;
- dio->op.target = opts.foreground_target;
- op_journal_seq_set(&dio->op, &inode->ei_journal_seq);
- dio->op.write_point = writepoint_hashed((unsigned long) current);
- dio->op.flags |= BCH_WRITE_NOPUT_RESERVATION;
-
- if ((req->ki_flags & IOCB_DSYNC) &&
- !c->opts.journal_flush_disabled)
- dio->op.flags |= BCH_WRITE_FLUSH;
-
- ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
- iter->count >> 9, true);
- if (unlikely(ret))
- goto err_put_bio;
-
- dio->op.nr_replicas = dio->op.opts.data_replicas;
-
- ret = bch2_disk_reservation_get(c, &dio->op.res, iter->count >> 9,
- dio->op.opts.data_replicas, 0);
- if (unlikely(ret) &&
- !bch2_check_range_allocated(c, POS(inode->v.i_ino,
- req->ki_pos >> 9),
- iter->count >> 9,
- dio->op.opts.data_replicas))
- goto err_put_bio;
-
- ret = write_invalidate_inode_pages_range(mapping,
- req->ki_pos,
- req->ki_pos + iter->count - 1);
- if (unlikely(ret))
- goto err_put_bio;
+ if (unlikely(mapping->nrpages)) {
+ ret = write_invalidate_inode_pages_range(mapping,
+ req->ki_pos,
+ req->ki_pos + iter->count - 1);
+ if (unlikely(ret))
+ goto err_put_bio;
+ }
ret = bch2_dio_write_loop(dio);
err:
if (locked)
inode_unlock(&inode->v);
- if (ret > 0)
- req->ki_pos += ret;
return ret;
err_put_bio:
- bch2_pagecache_block_put(&inode->ei_pagecache_lock);
- bch2_disk_reservation_put(c, &dio->op.res);
- bch2_quota_reservation_put(c, inode, &dio->quota_res);
+ bch2_pagecache_block_put(inode);
bio_put(bio);
inode_dio_end(&inode->v);
goto err;
struct bch_inode_info *inode = file_bch_inode(file);
ssize_t ret;
- if (iocb->ki_flags & IOCB_DIRECT)
- return bch2_direct_write(iocb, from);
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ ret = bch2_direct_write(iocb, from);
+ goto out;
+ }
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(&inode->v);
if (ret > 0)
ret = generic_write_sync(iocb, ret);
-
- return ret;
+out:
+ return bch2_err_class(ret);
}
/* fsync: */
-int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+/*
+ * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
+ * insert trigger: look up the btree inode instead
+ */
+static int bch2_flush_inode(struct bch_fs *c,
+ struct bch_inode_info *inode)
{
- struct bch_inode_info *inode = file_bch_inode(file);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- int ret, ret2;
+ struct bch_inode_unpacked u;
+ int ret;
- ret = file_write_and_wait_range(file, start, end);
+ if (c->opts.journal_flush_disabled)
+ return 0;
+
+ ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
if (ret)
return ret;
- if (datasync && !(inode->v.i_state & I_DIRTY_DATASYNC))
- goto out;
+ return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
+ bch2_inode_flush_nocow_writes(c, inode);
+}
- ret = sync_inode_metadata(&inode->v, 1);
- if (ret)
- return ret;
-out:
- if (!c->opts.journal_flush_disabled)
- ret = bch2_journal_flush_seq(&c->journal,
- inode->ei_journal_seq);
- ret2 = file_check_and_advance_wb_err(file);
+int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ int ret, ret2, ret3;
- return ret ?: ret2;
+ ret = file_write_and_wait_range(file, start, end);
+ ret2 = sync_inode_metadata(&inode->v, 1);
+ ret3 = bch2_flush_inode(c, inode);
+
+ return bch2_err_class(ret ?: ret2 ?: ret3);
}
/* truncate: */
-static inline int range_has_data(struct bch_fs *c,
- struct bpos start,
- struct bpos end)
+static inline int range_has_data(struct bch_fs *c, u32 subvol,
+ struct bpos start,
+ struct bpos end)
{
struct btree_trans trans;
- struct btree_iter *iter;
+ struct btree_iter iter;
struct bkey_s_c k;
int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) {
- if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
- break;
+ ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
+ if (ret)
+ goto err;
- if (bkey_extent_is_data(k.k)) {
+ for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
+ if (bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k)) {
ret = 1;
break;
}
- }
+ start = iter.pos;
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
- return bch2_trans_exit(&trans) ?: ret;
+ bch2_trans_exit(&trans);
+ return ret;
}
-static int __bch2_truncate_page(struct bch_inode_info *inode,
- pgoff_t index, loff_t start, loff_t end)
+static int __bch2_truncate_folio(struct bch_inode_info *inode,
+ pgoff_t index, loff_t start, loff_t end)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct address_space *mapping = inode->v.i_mapping;
- struct bch_page_state *s;
+ struct bch_folio *s;
unsigned start_offset = start & (PAGE_SIZE - 1);
unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
unsigned i;
- struct page *page;
+ struct folio *folio;
+ s64 i_sectors_delta = 0;
int ret = 0;
+ u64 end_pos;
- /* Page boundary? Nothing to do */
- if (!((index == start >> PAGE_SHIFT && start_offset) ||
- (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
- return 0;
-
- /* Above i_size? */
- if (index << PAGE_SHIFT >= inode->v.i_size)
- return 0;
-
- page = find_lock_page(mapping, index);
- if (!page) {
+ folio = filemap_lock_folio(mapping, index);
+ if (IS_ERR_OR_NULL(folio)) {
/*
* XXX: we're doing two index lookups when we end up reading the
- * page
+ * folio
*/
- ret = range_has_data(c,
- POS(inode->v.i_ino, index << PAGE_SECTOR_SHIFT),
- POS(inode->v.i_ino, (index + 1) << PAGE_SECTOR_SHIFT));
+ ret = range_has_data(c, inode->ei_subvol,
+ POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
+ POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
if (ret <= 0)
return ret;
- page = find_or_create_page(mapping, index, GFP_KERNEL);
- if (unlikely(!page)) {
+ folio = __filemap_get_folio(mapping, index,
+ FGP_LOCK|FGP_CREAT, GFP_KERNEL);
+ if (unlikely(IS_ERR_OR_NULL(folio))) {
ret = -ENOMEM;
goto out;
}
}
- s = bch2_page_state_create(page, 0);
+ BUG_ON(start >= folio_end_pos(folio));
+ BUG_ON(end <= folio_pos(folio));
+
+ start_offset = max(start, folio_pos(folio)) - folio_pos(folio);
+ end_offset = min_t(u64, end, folio_end_pos(folio)) - folio_pos(folio);
+
+ /* Folio boundary? Nothing to do */
+ if (start_offset == 0 &&
+ end_offset == folio_size(folio)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ s = bch2_folio_create(folio, 0);
if (!s) {
ret = -ENOMEM;
goto unlock;
}
- if (!PageUptodate(page)) {
- ret = bch2_read_single_page(page, mapping);
+ if (!folio_test_uptodate(folio)) {
+ ret = bch2_read_single_folio(folio, mapping);
if (ret)
goto unlock;
}
- if (index != start >> PAGE_SHIFT)
- start_offset = 0;
- if (index != end >> PAGE_SHIFT)
- end_offset = PAGE_SIZE;
+ ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
+ if (ret)
+ goto unlock;
for (i = round_up(start_offset, block_bytes(c)) >> 9;
i < round_down(end_offset, block_bytes(c)) >> 9;
i++) {
s->s[i].nr_replicas = 0;
- s->s[i].state = SECTOR_UNALLOCATED;
+
+ i_sectors_delta -= s->s[i].state == SECTOR_dirty;
+ folio_sector_set(folio, s, i, SECTOR_unallocated);
}
- zero_user_segment(page, start_offset, end_offset);
+ i_sectors_acct(c, inode, NULL, i_sectors_delta);
+
+ /*
+ * Caller needs to know whether this folio will be written out by
+ * writeback - doing an i_size update if necessary - or whether it will
+ * be responsible for the i_size update.
+ *
+ * Note that we shouldn't ever see a folio beyond EOF, but check and
+ * warn if so. This has been observed by failure to clean up folios
+ * after a short write and there's still a chance reclaim will fix
+ * things up.
+ */
+ WARN_ON_ONCE(folio_pos(folio) >= inode->v.i_size);
+ end_pos = folio_end_pos(folio);
+ if (inode->v.i_size > folio_pos(folio))
+ end_pos = min_t(u64, inode->v.i_size, end_pos);
+ ret = s->s[folio_pos_to_s(folio, end_pos - 1)].state >= SECTOR_dirty;
+
+ folio_zero_segment(folio, start_offset, end_offset);
/*
* Bit of a hack - we don't want truncate to fail due to -ENOSPC.
*
- * XXX: because we aren't currently tracking whether the page has actual
+ * XXX: because we aren't currently tracking whether the folio has actual
* data in it (vs. just 0s, or only partially written) this wrong. ick.
*/
- ret = bch2_get_page_disk_reservation(c, inode, page, false);
- BUG_ON(ret);
+ BUG_ON(bch2_get_folio_disk_reservation(c, inode, folio, false));
- __set_page_dirty_nobuffers(page);
+ /*
+ * This removes any writeable userspace mappings; we need to force
+ * .page_mkwrite to be called again before any mmapped writes, to
+ * redirty the full page:
+ */
+ folio_mkclean(folio);
+ filemap_dirty_folio(mapping, folio);
unlock:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
out:
return ret;
}
-static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
+static int bch2_truncate_folio(struct bch_inode_info *inode, loff_t from)
+{
+ return __bch2_truncate_folio(inode, from >> PAGE_SHIFT,
+ from, ANYSINT_MAX(loff_t));
+}
+
+static int bch2_truncate_folios(struct bch_inode_info *inode,
+ loff_t start, loff_t end)
{
- return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
- from, round_up(from, PAGE_SIZE));
+ int ret = __bch2_truncate_folio(inode, start >> PAGE_SHIFT,
+ start, end);
+
+ if (ret >= 0 &&
+ start >> PAGE_SHIFT != end >> PAGE_SHIFT)
+ ret = __bch2_truncate_folio(inode,
+ (end - 1) >> PAGE_SHIFT,
+ start, end);
+ return ret;
}
-static int bch2_extend(struct bch_inode_info *inode,
+static int bch2_extend(struct mnt_idmap *idmap,
+ struct bch_inode_info *inode,
struct bch_inode_unpacked *inode_u,
struct iattr *iattr)
{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct address_space *mapping = inode->v.i_mapping;
int ret;
return ret;
truncate_setsize(&inode->v, iattr->ia_size);
- setattr_copy(&inode->v, iattr);
-
- mutex_lock(&inode->ei_update_lock);
- ret = bch2_write_inode_size(c, inode, inode->v.i_size,
- ATTR_MTIME|ATTR_CTIME);
- mutex_unlock(&inode->ei_update_lock);
- return ret;
+ return bch2_setattr_nonsize(idmap, inode, iattr);
}
static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
struct bch_inode_unpacked *bi,
void *p)
{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
-
bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
- bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
return 0;
}
return 0;
}
-int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
+int bch2_truncate(struct mnt_idmap *idmap,
+ struct bch_inode_info *inode, struct iattr *iattr)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct address_space *mapping = inode->v.i_mapping;
struct bch_inode_unpacked inode_u;
- struct btree_trans trans;
- struct btree_iter *iter;
u64 new_i_size = iattr->ia_size;
s64 i_sectors_delta = 0;
int ret = 0;
- inode_dio_wait(&inode->v);
- bch2_pagecache_block_get(&inode->ei_pagecache_lock);
-
/*
- * fetch current on disk i_size: inode is locked, i_size can only
- * increase underneath us:
+ * If the truncate call with change the size of the file, the
+ * cmtimes should be updated. If the size will not change, we
+ * do not need to update the cmtimes.
*/
- bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino, 0);
- ret = PTR_ERR_OR_ZERO(iter);
- bch2_trans_exit(&trans);
+ if (iattr->ia_size != inode->v.i_size) {
+ if (!(iattr->ia_valid & ATTR_MTIME))
+ ktime_get_coarse_real_ts64(&iattr->ia_mtime);
+ if (!(iattr->ia_valid & ATTR_CTIME))
+ ktime_get_coarse_real_ts64(&iattr->ia_ctime);
+ iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
+ }
+ inode_dio_wait(&inode->v);
+ bch2_pagecache_block_get(inode);
+
+ ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
if (ret)
goto err;
if (ret)
goto err;
- BUG_ON(inode->v.i_size < inode_u.bi_size);
+ WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
+ inode->v.i_size < inode_u.bi_size,
+ "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
+ (u64) inode->v.i_size, inode_u.bi_size);
if (iattr->ia_size > inode->v.i_size) {
- ret = bch2_extend(inode, &inode_u, iattr);
+ ret = bch2_extend(idmap, inode, &inode_u, iattr);
goto err;
}
- ret = bch2_truncate_page(inode, iattr->ia_size);
- if (unlikely(ret))
+ iattr->ia_valid &= ~ATTR_SIZE;
+
+ ret = bch2_truncate_folio(inode, iattr->ia_size);
+ if (unlikely(ret < 0))
goto err;
/*
truncate_setsize(&inode->v, iattr->ia_size);
- ret = bch2_fpunch(c, inode->v.i_ino,
+ ret = bch2_fpunch(c, inode_inum(inode),
round_up(iattr->ia_size, block_bytes(c)) >> 9,
- U64_MAX, &inode->ei_journal_seq, &i_sectors_delta);
+ U64_MAX, &i_sectors_delta);
i_sectors_acct(c, inode, NULL, i_sectors_delta);
+ bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
+ !bch2_journal_error(&c->journal), c,
+ "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
+ inode->v.i_ino, (u64) inode->v.i_blocks,
+ inode->ei_inode.bi_sectors);
if (unlikely(ret))
goto err;
- setattr_copy(&inode->v, iattr);
-
mutex_lock(&inode->ei_update_lock);
- ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL,
- ATTR_MTIME|ATTR_CTIME);
+ ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
mutex_unlock(&inode->ei_update_lock);
+
+ ret = bch2_setattr_nonsize(idmap, inode, iattr);
err:
- bch2_pagecache_block_put(&inode->ei_pagecache_lock);
- return ret;
+ bch2_pagecache_block_put(inode);
+ return bch2_err_class(ret);
}
/* fallocate: */
+static int inode_update_times_fn(struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi, void *p)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+
+ bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
+ return 0;
+}
+
static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- u64 discard_start = round_up(offset, block_bytes(c)) >> 9;
- u64 discard_end = round_down(offset + len, block_bytes(c)) >> 9;
+ u64 end = offset + len;
+ u64 block_start = round_up(offset, block_bytes(c));
+ u64 block_end = round_down(end, block_bytes(c));
+ bool truncated_last_page;
int ret = 0;
- inode_lock(&inode->v);
- inode_dio_wait(&inode->v);
- bch2_pagecache_block_get(&inode->ei_pagecache_lock);
-
- ret = __bch2_truncate_page(inode,
- offset >> PAGE_SHIFT,
- offset, offset + len);
- if (unlikely(ret))
+ ret = bch2_truncate_folios(inode, offset, end);
+ if (unlikely(ret < 0))
goto err;
- if (offset >> PAGE_SHIFT !=
- (offset + len) >> PAGE_SHIFT) {
- ret = __bch2_truncate_page(inode,
- (offset + len) >> PAGE_SHIFT,
- offset, offset + len);
- if (unlikely(ret))
- goto err;
- }
+ truncated_last_page = ret;
- truncate_pagecache_range(&inode->v, offset, offset + len - 1);
+ truncate_pagecache_range(&inode->v, offset, end - 1);
- if (discard_start < discard_end) {
+ if (block_start < block_end) {
s64 i_sectors_delta = 0;
- ret = bch2_fpunch(c, inode->v.i_ino,
- discard_start, discard_end,
- &inode->ei_journal_seq,
+ ret = bch2_fpunch(c, inode_inum(inode),
+ block_start >> 9, block_end >> 9,
&i_sectors_delta);
i_sectors_acct(c, inode, NULL, i_sectors_delta);
}
-err:
- bch2_pagecache_block_put(&inode->ei_pagecache_lock);
- inode_unlock(&inode->v);
+ mutex_lock(&inode->ei_update_lock);
+ if (end >= inode->v.i_size && !truncated_last_page) {
+ ret = bch2_write_inode_size(c, inode, inode->v.i_size,
+ ATTR_MTIME|ATTR_CTIME);
+ } else {
+ ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
+ ATTR_MTIME|ATTR_CTIME);
+ }
+ mutex_unlock(&inode->ei_update_lock);
+err:
return ret;
}
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct address_space *mapping = inode->v.i_mapping;
- struct bkey_on_stack copy;
+ struct bkey_buf copy;
struct btree_trans trans;
- struct btree_iter *src, *dst, *del = NULL;
+ struct btree_iter src, dst, del;
loff_t shift, new_size;
- u64 src_start;
- int ret;
-
- if ((offset | len) & (block_bytes(c) - 1))
- return -EINVAL;
-
- bkey_on_stack_init(©);
- bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
-
- /*
- * We need i_mutex to keep the page cache consistent with the extents
- * btree, and the btree consistent with i_size - we don't need outside
- * locking for the extents btree itself, because we're using linked
- * iterators
- */
- inode_lock(&inode->v);
- inode_dio_wait(&inode->v);
- bch2_pagecache_block_get(&inode->ei_pagecache_lock);
+ u64 src_start;
+ int ret = 0;
+
+ if ((offset | len) & (block_bytes(c) - 1))
+ return -EINVAL;
if (insert) {
- ret = -EFBIG;
if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
- goto err;
+ return -EFBIG;
- ret = -EINVAL;
if (offset >= inode->v.i_size)
- goto err;
+ return -EINVAL;
src_start = U64_MAX;
shift = len;
} else {
- ret = -EINVAL;
if (offset + len >= inode->v.i_size)
- goto err;
+ return -EINVAL;
src_start = offset + len;
shift = -len;
ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
if (ret)
- goto err;
+ return ret;
if (insert) {
i_size_write(&inode->v, new_size);
} else {
s64 i_sectors_delta = 0;
- ret = bch2_fpunch(c, inode->v.i_ino,
+ ret = bch2_fpunch(c, inode_inum(inode),
offset >> 9, (offset + len) >> 9,
- &inode->ei_journal_seq,
&i_sectors_delta);
i_sectors_acct(c, inode, NULL, i_sectors_delta);
if (ret)
- goto err;
+ return ret;
}
- src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+ bch2_bkey_buf_init(©);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
+ bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
POS(inode->v.i_ino, src_start >> 9),
BTREE_ITER_INTENT);
- BUG_ON(IS_ERR_OR_NULL(src));
+ bch2_trans_copy_iter(&dst, &src);
+ bch2_trans_copy_iter(&del, &src);
- dst = bch2_trans_copy_iter(&trans, src);
- BUG_ON(IS_ERR_OR_NULL(dst));
-
- while (1) {
+ while (ret == 0 ||
+ bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
struct disk_reservation disk_res =
bch2_disk_reservation_init(c, 0);
struct bkey_i delete;
struct bpos next_pos;
struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
struct bpos atomic_end;
- unsigned commit_flags = 0;
+ unsigned trigger_flags = 0;
+ u32 snapshot;
+
+ bch2_trans_begin(&trans);
+
+ ret = bch2_subvolume_get_snapshot(&trans,
+ inode->ei_subvol, &snapshot);
+ if (ret)
+ continue;
+
+ bch2_btree_iter_set_snapshot(&src, snapshot);
+ bch2_btree_iter_set_snapshot(&dst, snapshot);
+ bch2_btree_iter_set_snapshot(&del, snapshot);
+
+ bch2_trans_begin(&trans);
k = insert
- ? bch2_btree_iter_peek_prev(src)
- : bch2_btree_iter_peek(src);
+ ? bch2_btree_iter_peek_prev(&src)
+ : bch2_btree_iter_peek_upto(&src, POS(inode->v.i_ino, U64_MAX));
if ((ret = bkey_err(k)))
- goto bkey_err;
+ continue;
if (!k.k || k.k->p.inode != inode->v.i_ino)
break;
- BUG_ON(bkey_cmp(src->pos, bkey_start_pos(k.k)));
-
if (insert &&
- bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
+ bkey_le(k.k->p, POS(inode->v.i_ino, offset >> 9)))
break;
reassemble:
- bkey_on_stack_reassemble(©, c, k);
+ bch2_bkey_buf_reassemble(©, c, k);
if (insert &&
- bkey_cmp(bkey_start_pos(k.k), move_pos) < 0) {
+ bkey_lt(bkey_start_pos(k.k), move_pos))
bch2_cut_front(move_pos, copy.k);
- bch2_btree_iter_set_pos(src, bkey_start_pos(©.k->k));
- }
copy.k->k.p.offset += shift >> 9;
- bch2_btree_iter_set_pos(dst, bkey_start_pos(©.k->k));
+ bch2_btree_iter_set_pos(&dst, bkey_start_pos(©.k->k));
- ret = bch2_extent_atomic_end(dst, copy.k, &atomic_end);
+ ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
if (ret)
- goto bkey_err;
+ continue;
- if (bkey_cmp(atomic_end, copy.k->k.p)) {
+ if (!bkey_eq(atomic_end, copy.k->k.p)) {
if (insert) {
move_pos = atomic_end;
move_pos.offset -= shift >> 9;
}
bkey_init(&delete.k);
- delete.k.p = src->pos;
- bch2_key_resize(&delete.k, copy.k->k.size);
+ delete.k.p = copy.k->k.p;
+ delete.k.size = copy.k->k.size;
+ delete.k.p.offset -= shift >> 9;
+ bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
- /*
- * If the new and old keys overlap (because we're moving an
- * extent that's bigger than the amount we're collapsing by),
- * we need to trim the delete key here so they don't overlap
- * because overlaps on insertions aren't handled before
- * triggers are run, so the overwrite will get double counted
- * by the triggers machinery:
- */
- if (insert &&
- bkey_cmp(bkey_start_pos(©.k->k), delete.k.p) < 0) {
- bch2_cut_back(bkey_start_pos(©.k->k), &delete);
- } else if (!insert &&
- bkey_cmp(copy.k->k.p,
- bkey_start_pos(&delete.k)) > 0) {
- bch2_cut_front(copy.k->k.p, &delete);
-
- del = bch2_trans_copy_iter(&trans, src);
- BUG_ON(IS_ERR_OR_NULL(del));
-
- bch2_btree_iter_set_pos(del,
- bkey_start_pos(&delete.k));
- }
-
- bch2_trans_update(&trans, dst, copy.k);
- bch2_trans_update(&trans, del ?: src, &delete);
-
- if (copy.k->k.size == k.k->size) {
- /*
- * If we're moving the entire extent, we can skip
- * running triggers:
- */
- commit_flags |= BTREE_INSERT_NOMARK;
- } else {
+ if (copy.k->k.size != k.k->size) {
/* We might end up splitting compressed extents: */
unsigned nr_ptrs =
bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
BUG_ON(ret);
}
- ret = bch2_trans_commit(&trans, &disk_res,
- &inode->ei_journal_seq,
- BTREE_INSERT_NOFAIL|
- commit_flags);
+ ret = bch2_btree_iter_traverse(&del) ?:
+ bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
+ bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
+ bch2_trans_commit(&trans, &disk_res, NULL,
+ BTREE_INSERT_NOFAIL);
bch2_disk_reservation_put(c, &disk_res);
-bkey_err:
- if (del)
- bch2_trans_iter_put(&trans, del);
- del = NULL;
if (!ret)
- bch2_btree_iter_set_pos(src, next_pos);
-
- if (ret == -EINTR)
- ret = 0;
- if (ret)
- goto err;
-
- bch2_trans_cond_resched(&trans);
+ bch2_btree_iter_set_pos(&src, next_pos);
}
- bch2_trans_unlock(&trans);
+ bch2_trans_iter_exit(&trans, &del);
+ bch2_trans_iter_exit(&trans, &dst);
+ bch2_trans_iter_exit(&trans, &src);
+ bch2_trans_exit(&trans);
+ bch2_bkey_buf_exit(©, c);
+ if (ret)
+ return ret;
+
+ mutex_lock(&inode->ei_update_lock);
if (!insert) {
i_size_write(&inode->v, new_size);
- mutex_lock(&inode->ei_update_lock);
ret = bch2_write_inode_size(c, inode, new_size,
ATTR_MTIME|ATTR_CTIME);
- mutex_unlock(&inode->ei_update_lock);
+ } else {
+ /* We need an inode update to update bi_journal_seq for fsync: */
+ ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
+ ATTR_MTIME|ATTR_CTIME);
}
-err:
- bch2_trans_exit(&trans);
- bkey_on_stack_exit(©, c);
- bch2_pagecache_block_put(&inode->ei_pagecache_lock);
- inode_unlock(&inode->v);
+ mutex_unlock(&inode->ei_update_lock);
return ret;
}
-static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
- loff_t offset, loff_t len)
+static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
+ u64 start_sector, u64 end_sector)
{
- struct address_space *mapping = inode->v.i_mapping;
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct btree_trans trans;
- struct btree_iter *iter;
- struct bpos end_pos;
- loff_t end = offset + len;
- loff_t block_start = round_down(offset, block_bytes(c));
- loff_t block_end = round_up(end, block_bytes(c));
- unsigned sectors;
- unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
- int ret;
-
- bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
-
- inode_lock(&inode->v);
- inode_dio_wait(&inode->v);
- bch2_pagecache_block_get(&inode->ei_pagecache_lock);
-
- if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
- ret = inode_newsize_ok(&inode->v, end);
- if (ret)
- goto err;
- }
-
- if (mode & FALLOC_FL_ZERO_RANGE) {
- ret = __bch2_truncate_page(inode,
- offset >> PAGE_SHIFT,
- offset, end);
-
- if (!ret &&
- offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
- ret = __bch2_truncate_page(inode,
- end >> PAGE_SHIFT,
- offset, end);
-
- if (unlikely(ret))
- goto err;
+ struct btree_iter iter;
+ struct bpos end_pos = POS(inode->v.i_ino, end_sector);
+ struct bch_io_opts opts;
+ int ret = 0;
- truncate_pagecache_range(&inode->v, offset, end - 1);
- }
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
- POS(inode->v.i_ino, block_start >> 9),
+ bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
+ POS(inode->v.i_ino, start_sector),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- end_pos = POS(inode->v.i_ino, block_end >> 9);
- while (bkey_cmp(iter->pos, end_pos) < 0) {
+ while (!ret && bkey_lt(iter.pos, end_pos)) {
s64 i_sectors_delta = 0;
- struct disk_reservation disk_res = { 0 };
struct quota_res quota_res = { 0 };
- struct bkey_i_reservation reservation;
struct bkey_s_c k;
+ unsigned sectors;
+ bool is_allocation;
+ u64 hole_start, hole_end;
+ u32 snapshot;
+
+ bch2_trans_begin(&trans);
+
+ ret = bch2_subvolume_get_snapshot(&trans,
+ inode->ei_subvol, &snapshot);
+ if (ret)
+ goto bkey_err;
- bch2_trans_reset(&trans, TRANS_RESET_MEM);
+ bch2_btree_iter_set_snapshot(&iter, snapshot);
- k = bch2_btree_iter_peek_slot(iter);
+ k = bch2_btree_iter_peek_slot(&iter);
if ((ret = bkey_err(k)))
goto bkey_err;
+ hole_start = iter.pos.offset;
+ hole_end = bpos_min(k.k->p, end_pos).offset;
+ is_allocation = bkey_extent_is_allocation(k.k);
+
/* already reserved */
- if (k.k->type == KEY_TYPE_reservation &&
- bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
- bch2_btree_iter_next_slot(iter);
+ if (bkey_extent_is_reservation(k) &&
+ bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
+ bch2_btree_iter_advance(&iter);
continue;
}
if (bkey_extent_is_data(k.k) &&
!(mode & FALLOC_FL_ZERO_RANGE)) {
- bch2_btree_iter_next_slot(iter);
+ bch2_btree_iter_advance(&iter);
continue;
}
- bkey_reservation_init(&reservation.k_i);
- reservation.k.type = KEY_TYPE_reservation;
- reservation.k.p = k.k->p;
- reservation.k.size = k.k->size;
+ if (!(mode & FALLOC_FL_ZERO_RANGE)) {
+ ret = drop_locks_do(&trans,
+ (bch2_clamp_data_hole(&inode->v,
+ &hole_start,
+ &hole_end,
+ opts.data_replicas), 0));
+ bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start));
+
+ if (ret)
+ goto bkey_err;
- bch2_cut_front(iter->pos, &reservation.k_i);
- bch2_cut_back(end_pos, &reservation.k_i);
+ if (hole_start == hole_end)
+ continue;
+ }
- sectors = reservation.k.size;
- reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
+ sectors = hole_end - hole_start;
- if (!bkey_extent_is_allocation(k.k)) {
+ if (!is_allocation) {
ret = bch2_quota_reservation_add(c, inode,
- "a_res,
- sectors, true);
+ "a_res, sectors, true);
if (unlikely(ret))
goto bkey_err;
}
- if (reservation.v.nr_replicas < replicas ||
- bch2_bkey_sectors_compressed(k)) {
- ret = bch2_disk_reservation_get(c, &disk_res, sectors,
- replicas, 0);
- if (unlikely(ret))
- goto bkey_err;
-
- reservation.v.nr_replicas = disk_res.nr_replicas;
- }
+ ret = bch2_extent_fallocate(&trans, inode_inum(inode), &iter,
+ sectors, opts, &i_sectors_delta,
+ writepoint_hashed((unsigned long) current));
+ if (ret)
+ goto bkey_err;
- ret = bch2_extent_update(&trans, iter, &reservation.k_i,
- &disk_res, &inode->ei_journal_seq,
- 0, &i_sectors_delta);
i_sectors_acct(c, inode, "a_res, i_sectors_delta);
+
+ drop_locks_do(&trans,
+ (mark_pagecache_reserved(inode, hole_start, iter.pos.offset), 0));
bkey_err:
bch2_quota_reservation_put(c, inode, "a_res);
- bch2_disk_reservation_put(c, &disk_res);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
ret = 0;
- if (ret)
- goto err;
}
- /*
- * Do we need to extend the file?
- *
- * If we zeroed up to the end of the file, we dropped whatever writes
- * were going to write out the current i_size, so we have to extend
- * manually even if FL_KEEP_SIZE was set:
- */
- if (end >= inode->v.i_size &&
- (!(mode & FALLOC_FL_KEEP_SIZE) ||
- (mode & FALLOC_FL_ZERO_RANGE))) {
- struct btree_iter *inode_iter;
- struct bch_inode_unpacked inode_u;
-
- do {
- bch2_trans_begin(&trans);
- inode_iter = bch2_inode_peek(&trans, &inode_u,
- inode->v.i_ino, 0);
- ret = PTR_ERR_OR_ZERO(inode_iter);
- } while (ret == -EINTR);
+ if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
+ struct quota_res quota_res = { 0 };
+ s64 i_sectors_delta = 0;
- bch2_trans_unlock(&trans);
+ bch2_fpunch_at(&trans, &iter, inode_inum(inode),
+ end_sector, &i_sectors_delta);
+ i_sectors_acct(c, inode, "a_res, i_sectors_delta);
+ bch2_quota_reservation_put(c, inode, "a_res);
+ }
- if (ret)
- goto err;
+ bch2_trans_iter_exit(&trans, &iter);
+ bch2_trans_exit(&trans);
+ return ret;
+}
- /*
- * Sync existing appends before extending i_size,
- * as in bch2_extend():
- */
- ret = filemap_write_and_wait_range(mapping,
- inode_u.bi_size, S64_MAX);
+static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
+ loff_t offset, loff_t len)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ u64 end = offset + len;
+ u64 block_start = round_down(offset, block_bytes(c));
+ u64 block_end = round_up(end, block_bytes(c));
+ bool truncated_last_page = false;
+ int ret, ret2 = 0;
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
+ ret = inode_newsize_ok(&inode->v, end);
if (ret)
- goto err;
+ return ret;
+ }
+
+ if (mode & FALLOC_FL_ZERO_RANGE) {
+ ret = bch2_truncate_folios(inode, offset, end);
+ if (unlikely(ret < 0))
+ return ret;
+
+ truncated_last_page = ret;
+
+ truncate_pagecache_range(&inode->v, offset, end - 1);
+
+ block_start = round_up(offset, block_bytes(c));
+ block_end = round_down(end, block_bytes(c));
+ }
+
+ ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
+
+ /*
+ * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
+ * so that the VFS cache i_size is consistent with the btree i_size:
+ */
+ if (ret &&
+ !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
+ return ret;
+
+ if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
+ end = inode->v.i_size;
- if (mode & FALLOC_FL_KEEP_SIZE)
- end = inode->v.i_size;
- else
- i_size_write(&inode->v, end);
+ if (end >= inode->v.i_size &&
+ (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
+ !(mode & FALLOC_FL_KEEP_SIZE))) {
+ spin_lock(&inode->v.i_lock);
+ i_size_write(&inode->v, end);
+ spin_unlock(&inode->v.i_lock);
mutex_lock(&inode->ei_update_lock);
- ret = bch2_write_inode_size(c, inode, end, 0);
+ ret2 = bch2_write_inode_size(c, inode, end, 0);
mutex_unlock(&inode->ei_update_lock);
}
-err:
- bch2_trans_exit(&trans);
- bch2_pagecache_block_put(&inode->ei_pagecache_lock);
- inode_unlock(&inode->v);
- return ret;
+
+ return ret ?: ret2;
}
long bch2_fallocate_dispatch(struct file *file, int mode,
struct bch_fs *c = inode->v.i_sb->s_fs_info;
long ret;
- if (!percpu_ref_tryget(&c->writes))
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fallocate))
return -EROFS;
+ inode_lock(&inode->v);
+ inode_dio_wait(&inode->v);
+ bch2_pagecache_block_get(inode);
+
+ ret = file_modified(file);
+ if (ret)
+ goto err;
+
if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
ret = bchfs_fallocate(inode, mode, offset, len);
else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
ret = bchfs_fcollapse_finsert(inode, offset, len, false);
else
ret = -EOPNOTSUPP;
+err:
+ bch2_pagecache_block_put(inode);
+ inode_unlock(&inode->v);
+ bch2_write_ref_put(c, BCH_WRITE_REF_fallocate);
- percpu_ref_put(&c->writes);
-
- return ret;
+ return bch2_err_class(ret);
}
-static void mark_range_unallocated(struct bch_inode_info *inode,
- loff_t start, loff_t end)
+/*
+ * Take a quota reservation for unallocated blocks in a given file range
+ * Does not check pagecache
+ */
+static int quota_reserve_range(struct bch_inode_info *inode,
+ struct quota_res *res,
+ u64 start, u64 end)
{
- pgoff_t index = start >> PAGE_SHIFT;
- pgoff_t end_index = (end - 1) >> PAGE_SHIFT;
- struct pagevec pvec;
-
- pagevec_init(&pvec);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u32 snapshot;
+ u64 sectors = end - start;
+ u64 pos = start;
+ int ret;
- do {
- unsigned nr_pages, i, j;
+ bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
- nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
- &index, end_index);
- if (nr_pages == 0)
- break;
+ ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
+ if (ret)
+ goto err;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
- struct bch_page_state *s;
+ bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
+ SPOS(inode->v.i_ino, pos, snapshot), 0);
+
+ while (!(ret = btree_trans_too_many_iters(&trans)) &&
+ (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
+ !(ret = bkey_err(k))) {
+ if (bkey_extent_is_allocation(k.k)) {
+ u64 s = min(end, k.k->p.offset) -
+ max(start, bkey_start_offset(k.k));
+ BUG_ON(s > sectors);
+ sectors -= s;
+ }
+ bch2_btree_iter_advance(&iter);
+ }
+ pos = iter.pos.offset;
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
- lock_page(page);
- s = bch2_page_state(page);
+ bch2_trans_exit(&trans);
- if (s) {
- spin_lock(&s->lock);
- for (j = 0; j < PAGE_SECTORS; j++)
- s->s[j].nr_replicas = 0;
- spin_unlock(&s->lock);
- }
+ if (ret)
+ return ret;
- unlock_page(page);
- }
- pagevec_release(&pvec);
- } while (index <= end_index);
+ return bch2_quota_reservation_add(c, inode, res, sectors, true);
}
loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
struct bch_inode_info *src = file_bch_inode(file_src);
struct bch_inode_info *dst = file_bch_inode(file_dst);
struct bch_fs *c = src->v.i_sb->s_fs_info;
+ struct quota_res quota_res = { 0 };
s64 i_sectors_delta = 0;
u64 aligned_len;
loff_t ret = 0;
bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
- file_update_time(file_dst);
-
inode_dio_wait(&src->v);
inode_dio_wait(&dst->v);
if (ret)
goto err;
- mark_range_unallocated(src, pos_src, pos_src + aligned_len);
+ ret = quota_reserve_range(dst, "a_res, pos_dst >> 9,
+ (pos_dst + aligned_len) >> 9);
+ if (ret)
+ goto err;
+
+ file_update_time(file_dst);
+
+ mark_pagecache_unallocated(src, pos_src >> 9,
+ (pos_src + aligned_len) >> 9);
ret = bch2_remap_range(c,
- POS(dst->v.i_ino, pos_dst >> 9),
- POS(src->v.i_ino, pos_src >> 9),
+ inode_inum(dst), pos_dst >> 9,
+ inode_inum(src), pos_src >> 9,
aligned_len >> 9,
- &dst->ei_journal_seq,
pos_dst + len, &i_sectors_delta);
if (ret < 0)
goto err;
*/
ret = min((u64) ret << 9, (u64) len);
- /* XXX get a quota reservation */
- i_sectors_acct(c, dst, NULL, i_sectors_delta);
+ i_sectors_acct(c, dst, "a_res, i_sectors_delta);
spin_lock(&dst->v.i_lock);
if (pos_dst + ret > dst->v.i_size)
i_size_write(&dst->v, pos_dst + ret);
spin_unlock(&dst->v.i_lock);
+
+ if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
+ IS_SYNC(file_inode(file_dst)))
+ ret = bch2_flush_inode(c, dst);
err:
+ bch2_quota_reservation_put(c, dst, "a_res);
bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
- return ret;
+ return bch2_err_class(ret);
}
/* fseek: */
-static int page_data_offset(struct page *page, unsigned offset)
+static int folio_data_offset(struct folio *folio, loff_t pos,
+ unsigned min_replicas)
{
- struct bch_page_state *s = bch2_page_state(page);
- unsigned i;
+ struct bch_folio *s = bch2_folio(folio);
+ unsigned i, sectors = folio_sectors(folio);
if (s)
- for (i = offset >> 9; i < PAGE_SECTORS; i++)
- if (s->s[i].state >= SECTOR_DIRTY)
- return i << 9;
+ for (i = folio_pos_to_s(folio, pos); i < sectors; i++)
+ if (s->s[i].state >= SECTOR_dirty &&
+ s->s[i].nr_replicas + s->s[i].replicas_reserved >= min_replicas)
+ return i << SECTOR_SHIFT;
return -1;
}
static loff_t bch2_seek_pagecache_data(struct inode *vinode,
loff_t start_offset,
- loff_t end_offset)
+ loff_t end_offset,
+ unsigned min_replicas)
{
- struct address_space *mapping = vinode->i_mapping;
- struct page *page;
+ struct folio_batch fbatch;
pgoff_t start_index = start_offset >> PAGE_SHIFT;
pgoff_t end_index = end_offset >> PAGE_SHIFT;
pgoff_t index = start_index;
+ unsigned i;
loff_t ret;
int offset;
- while (index <= end_index) {
- if (find_get_pages_range(mapping, &index, end_index, 1, &page)) {
- lock_page(page);
+ folio_batch_init(&fbatch);
- offset = page_data_offset(page,
- page->index == start_index
- ? start_offset & (PAGE_SIZE - 1)
- : 0);
+ while (filemap_get_folios(vinode->i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+
+ folio_lock(folio);
+ offset = folio_data_offset(folio,
+ max(folio_pos(folio), start_offset),
+ min_replicas);
if (offset >= 0) {
- ret = clamp(((loff_t) page->index << PAGE_SHIFT) +
- offset,
+ ret = clamp(folio_pos(folio) + offset,
start_offset, end_offset);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_batch_release(&fbatch);
return ret;
}
-
- unlock_page(page);
- put_page(page);
- } else {
- break;
+ folio_unlock(folio);
}
+ folio_batch_release(&fbatch);
+ cond_resched();
}
return end_offset;
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct btree_trans trans;
- struct btree_iter *iter;
+ struct btree_iter iter;
struct bkey_s_c k;
+ subvol_inum inum = inode_inum(inode);
u64 isize, next_data = MAX_LFS_FILESIZE;
+ u32 snapshot;
int ret;
isize = i_size_read(&inode->v);
return -ENXIO;
bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
- POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
- if (k.k->p.inode != inode->v.i_ino) {
- break;
- } else if (bkey_extent_is_data(k.k)) {
+ ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents,
+ SPOS(inode->v.i_ino, offset >> 9, snapshot),
+ POS(inode->v.i_ino, U64_MAX),
+ 0, k, ret) {
+ if (bkey_extent_is_data(k.k)) {
next_data = max(offset, bkey_start_offset(k.k) << 9);
break;
} else if (k.k->p.offset >> 9 > isize)
break;
}
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
- ret = bch2_trans_exit(&trans) ?: ret;
+ bch2_trans_exit(&trans);
if (ret)
return ret;
if (next_data > offset)
next_data = bch2_seek_pagecache_data(&inode->v,
- offset, next_data);
+ offset, next_data, 0);
if (next_data >= isize)
return -ENXIO;
return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
}
-static int __page_hole_offset(struct page *page, unsigned offset)
-{
- struct bch_page_state *s = bch2_page_state(page);
- unsigned i;
-
- if (!s)
- return 0;
-
- for (i = offset >> 9; i < PAGE_SECTORS; i++)
- if (s->s[i].state < SECTOR_DIRTY)
- return i << 9;
-
- return -1;
-}
-
-static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
+static bool folio_hole_offset(struct address_space *mapping, loff_t *offset,
+ unsigned min_replicas)
{
- pgoff_t index = offset >> PAGE_SHIFT;
- struct page *page;
- int pg_offset;
- loff_t ret = -1;
+ struct folio *folio;
+ struct bch_folio *s;
+ unsigned i, sectors;
+ bool ret = true;
- page = find_lock_entry(mapping, index);
- if (!page || xa_is_value(page))
- return offset;
+ folio = filemap_lock_folio(mapping, *offset >> PAGE_SHIFT);
+ if (IS_ERR_OR_NULL(folio))
+ return true;
- pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
- if (pg_offset >= 0)
- ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
+ s = bch2_folio(folio);
+ if (!s)
+ goto unlock;
- unlock_page(page);
+ sectors = folio_sectors(folio);
+ for (i = folio_pos_to_s(folio, *offset); i < sectors; i++)
+ if (s->s[i].state < SECTOR_dirty ||
+ s->s[i].nr_replicas + s->s[i].replicas_reserved < min_replicas) {
+ *offset = max(*offset,
+ folio_pos(folio) + (i << SECTOR_SHIFT));
+ goto unlock;
+ }
+ *offset = folio_end_pos(folio);
+ ret = false;
+unlock:
+ folio_unlock(folio);
return ret;
}
static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
loff_t start_offset,
- loff_t end_offset)
+ loff_t end_offset,
+ unsigned min_replicas)
{
struct address_space *mapping = vinode->i_mapping;
- loff_t offset = start_offset, hole;
+ loff_t offset = start_offset;
- while (offset < end_offset) {
- hole = page_hole_offset(mapping, offset);
- if (hole >= 0 && hole <= end_offset)
- return max(start_offset, hole);
+ while (offset < end_offset &&
+ !folio_hole_offset(mapping, &offset, min_replicas))
+ ;
- offset += PAGE_SIZE;
- offset &= PAGE_MASK;
- }
+ return min(offset, end_offset);
+}
- return end_offset;
+static void bch2_clamp_data_hole(struct inode *inode,
+ u64 *hole_start,
+ u64 *hole_end,
+ unsigned min_replicas)
+{
+ *hole_start = bch2_seek_pagecache_hole(inode,
+ *hole_start << 9, *hole_end << 9, min_replicas) >> 9;
+
+ if (*hole_start == *hole_end)
+ return;
+
+ *hole_end = bch2_seek_pagecache_data(inode,
+ *hole_start << 9, *hole_end << 9, min_replicas) >> 9;
}
static loff_t bch2_seek_hole(struct file *file, u64 offset)
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct btree_trans trans;
- struct btree_iter *iter;
+ struct btree_iter iter;
struct bkey_s_c k;
+ subvol_inum inum = inode_inum(inode);
u64 isize, next_hole = MAX_LFS_FILESIZE;
+ u32 snapshot;
int ret;
isize = i_size_read(&inode->v);
return -ENXIO;
bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
- POS(inode->v.i_ino, offset >> 9),
+ ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
+ SPOS(inode->v.i_ino, offset >> 9, snapshot),
BTREE_ITER_SLOTS, k, ret) {
if (k.k->p.inode != inode->v.i_ino) {
next_hole = bch2_seek_pagecache_hole(&inode->v,
- offset, MAX_LFS_FILESIZE);
+ offset, MAX_LFS_FILESIZE, 0);
break;
} else if (!bkey_extent_is_data(k.k)) {
next_hole = bch2_seek_pagecache_hole(&inode->v,
max(offset, bkey_start_offset(k.k) << 9),
- k.k->p.offset << 9);
+ k.k->p.offset << 9, 0);
if (next_hole < k.k->p.offset << 9)
break;
offset = max(offset, bkey_start_offset(k.k) << 9);
}
}
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
- ret = bch2_trans_exit(&trans) ?: ret;
+ bch2_trans_exit(&trans);
if (ret)
return ret;
loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
{
+ loff_t ret;
+
switch (whence) {
case SEEK_SET:
case SEEK_CUR:
case SEEK_END:
- return generic_file_llseek(file, offset, whence);
+ ret = generic_file_llseek(file, offset, whence);
+ break;
case SEEK_DATA:
- return bch2_seek_data(file, offset);
+ ret = bch2_seek_data(file, offset);
+ break;
case SEEK_HOLE:
- return bch2_seek_hole(file, offset);
+ ret = bch2_seek_hole(file, offset);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
- return -EINVAL;
+ return bch2_err_class(ret);
}
void bch2_fs_fsio_exit(struct bch_fs *c)
{
+ bioset_exit(&c->nocow_flush_bioset);
bioset_exit(&c->dio_write_bioset);
bioset_exit(&c->dio_read_bioset);
bioset_exit(&c->writepage_bioset);
int bch2_fs_fsio_init(struct bch_fs *c)
{
- int ret = 0;
-
- pr_verbose_init(c->opts, "");
-
if (bioset_init(&c->writepage_bioset,
4, offsetof(struct bch_writepage_io, op.wbio.bio),
- BIOSET_NEED_BVECS) ||
- bioset_init(&c->dio_read_bioset,
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_writepage_bioset_init;
+
+ if (bioset_init(&c->dio_read_bioset,
4, offsetof(struct dio_read, rbio.bio),
- BIOSET_NEED_BVECS) ||
- bioset_init(&c->dio_write_bioset,
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_dio_read_bioset_init;
+
+ if (bioset_init(&c->dio_write_bioset,
4, offsetof(struct dio_write, op.wbio.bio),
BIOSET_NEED_BVECS))
- ret = -ENOMEM;
+ return -BCH_ERR_ENOMEM_dio_write_bioset_init;
- pr_verbose_init(c->opts, "ret %i", ret);
- return ret;
+ if (bioset_init(&c->nocow_flush_bioset,
+ 1, offsetof(struct nocow_flush, bio), 0))
+ return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
+
+ return 0;
}
#endif /* NO_BCACHEFS_FS */