+// SPDX-License-Identifier: GPL-2.0
+#ifndef NO_BCACHEFS_FS
#include "bcachefs.h"
+#include "alloc_foreground.h"
+#include "bkey_buf.h"
#include "btree_update.h"
#include "buckets.h"
#include "clock.h"
#include "error.h"
+#include "extents.h"
+#include "extent_update.h"
#include "fs.h"
#include "fs-io.h"
#include "fsck.h"
#include "journal.h"
#include "io.h"
#include "keylist.h"
+#include "quota.h"
+#include "reflink.h"
#include <linux/aio.h>
#include <linux/backing-dev.h>
#include <linux/migrate.h>
#include <linux/mmu_context.h>
#include <linux/pagevec.h>
+#include <linux/rmap.h>
+#include <linux/sched/signal.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/uio.h>
#include <linux/writeback.h>
+
+#include <trace/events/bcachefs.h>
#include <trace/events/writeback.h>
-struct bio_set *bch2_writepage_bioset;
-struct bio_set *bch2_dio_read_bioset;
-struct bio_set *bch2_dio_write_bioset;
+static inline loff_t folio_end_pos(struct folio *folio)
+{
+ return folio_pos(folio) + folio_size(folio);
+}
+
+static inline size_t folio_sectors(struct folio *folio)
+{
+ return PAGE_SECTORS << folio_order(folio);
+}
+
+static inline loff_t folio_sector(struct folio *folio)
+{
+ return folio_pos(folio) >> 9;
+}
+
+static inline loff_t folio_end_sector(struct folio *folio)
+{
+ return folio_end_pos(folio) >> 9;
+}
+
+typedef DARRAY(struct folio *) folios;
+
+static int filemap_get_contig_folios_d(struct address_space *mapping,
+ loff_t start, loff_t end,
+ int fgp_flags, gfp_t gfp,
+ folios *folios)
+{
+ struct folio *f;
+ loff_t pos = start;
+ int ret = 0;
+
+ while (pos < end) {
+ if ((u64) pos >= (u64) start + (1ULL << 20))
+ fgp_flags &= ~FGP_CREAT;
+
+ ret = darray_make_room_gfp(folios, 1, gfp & GFP_KERNEL);
+ if (ret)
+ break;
+
+ f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
+ if (!f)
+ break;
+
+ BUG_ON(folios->nr && folio_pos(f) != pos);
+
+ pos = folio_end_pos(f);
+ darray_push(folios, f);
+ }
+
+ if (!folios->nr && !ret && (fgp_flags & FGP_CREAT))
+ ret = -ENOMEM;
+
+ return folios->nr ? 0 : ret;
+}
+
+struct nocow_flush {
+ struct closure *cl;
+ struct bch_dev *ca;
+ struct bio bio;
+};
+
+static void nocow_flush_endio(struct bio *_bio)
+{
+
+ struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
+
+ closure_put(bio->cl);
+ percpu_ref_put(&bio->ca->io_ref);
+ bio_put(&bio->bio);
+}
+
+static void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct closure *cl)
+{
+ struct nocow_flush *bio;
+ struct bch_dev *ca;
+ struct bch_devs_mask devs;
+ unsigned dev;
+
+ dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
+ if (dev == BCH_SB_MEMBERS_MAX)
+ return;
+
+ devs = inode->ei_devs_need_flush;
+ memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
+
+ for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
+ rcu_read_lock();
+ ca = rcu_dereference(c->devs[dev]);
+ if (ca && !percpu_ref_tryget(&ca->io_ref))
+ ca = NULL;
+ rcu_read_unlock();
+
+ if (!ca)
+ continue;
+
+ bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
+ REQ_OP_FLUSH,
+ GFP_KERNEL,
+ &c->nocow_flush_bioset),
+ struct nocow_flush, bio);
+ bio->cl = cl;
+ bio->ca = ca;
+ bio->bio.bi_end_io = nocow_flush_endio;
+ closure_bio_submit(&bio->bio, cl);
+ }
+}
+
+static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
+ struct bch_inode_info *inode)
+{
+ struct closure cl;
+
+ closure_init_stack(&cl);
+ bch2_inode_flush_nocow_writes_async(c, inode, &cl);
+ closure_sync(&cl);
+
+ return 0;
+}
+
+static inline bool bio_full(struct bio *bio, unsigned len)
+{
+ if (bio->bi_vcnt >= bio->bi_max_vecs)
+ return true;
+ if (bio->bi_iter.bi_size > UINT_MAX - len)
+ return true;
+ return false;
+}
+
+static inline struct address_space *faults_disabled_mapping(void)
+{
+ return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
+}
+
+static inline void set_fdm_dropped_locks(void)
+{
+ current->faults_disabled_mapping =
+ (void *) (((unsigned long) current->faults_disabled_mapping)|1);
+}
+
+static inline bool fdm_dropped_locks(void)
+{
+ return ((unsigned long) current->faults_disabled_mapping) & 1;
+}
+
+struct quota_res {
+ u64 sectors;
+};
+
+struct bch_writepage_io {
+ struct bch_inode_info *inode;
+
+ /* must be last: */
+ struct bch_write_op op;
+};
+
+struct dio_write {
+ struct kiocb *req;
+ struct address_space *mapping;
+ struct bch_inode_info *inode;
+ struct mm_struct *mm;
+ unsigned loop:1,
+ extending:1,
+ sync:1,
+ flush:1,
+ free_iov:1;
+ struct quota_res quota_res;
+ u64 written;
+
+ struct iov_iter iter;
+ struct iovec inline_vecs[2];
+
+ /* must be last: */
+ struct bch_write_op op;
+};
+
+struct dio_read {
+ struct closure cl;
+ struct kiocb *req;
+ long ret;
+ bool should_dirty;
+ struct bch_read_bio rbio;
+};
/* pagecache_block must be held */
-static int write_invalidate_inode_pages_range(struct address_space *mapping,
+static noinline int write_invalidate_inode_pages_range(struct address_space *mapping,
loff_t start, loff_t end)
{
int ret;
* is continually redirtying a specific page
*/
do {
- if (!mapping->nrpages &&
- !mapping->nrexceptional)
+ if (!mapping->nrpages)
return 0;
ret = filemap_write_and_wait_range(mapping, start, end);
return ret;
}
-/* i_size updates: */
-
-static int inode_set_size(struct bch_inode_info *ei,
- struct bch_inode_unpacked *bi,
- void *p)
-{
- loff_t *new_i_size = p;
-
- lockdep_assert_held(&ei->update_lock);
+/* quotas */
- bi->i_size = *new_i_size;
+#ifdef CONFIG_BCACHEFS_QUOTA
- if (atomic_long_read(&ei->i_size_dirty_count))
- bi->i_flags |= BCH_INODE_I_SIZE_DIRTY;
- else
- bi->i_flags &= ~BCH_INODE_I_SIZE_DIRTY;
+static void __bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res)
+{
+ BUG_ON(res->sectors > inode->ei_quota_reserved);
- return 0;
+ bch2_quota_acct(c, inode->ei_qid, Q_SPC,
+ -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
+ inode->ei_quota_reserved -= res->sectors;
+ res->sectors = 0;
}
-static int __must_check bch2_write_inode_size(struct bch_fs *c,
- struct bch_inode_info *ei,
- loff_t new_size)
+static void bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res)
{
- return __bch2_write_inode(c, ei, inode_set_size, &new_size);
+ if (res->sectors) {
+ mutex_lock(&inode->ei_quota_lock);
+ __bch2_quota_reservation_put(c, inode, res);
+ mutex_unlock(&inode->ei_quota_lock);
+ }
}
-static inline void i_size_dirty_put(struct bch_inode_info *ei)
+static int bch2_quota_reservation_add(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res,
+ u64 sectors,
+ bool check_enospc)
{
- atomic_long_dec_bug(&ei->i_size_dirty_count);
-}
+ int ret;
-static inline void i_size_dirty_get(struct bch_inode_info *ei)
-{
- lockdep_assert_held(&ei->vfs_inode.i_rwsem);
+ mutex_lock(&inode->ei_quota_lock);
+ ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
+ check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
+ if (likely(!ret)) {
+ inode->ei_quota_reserved += sectors;
+ res->sectors += sectors;
+ }
+ mutex_unlock(&inode->ei_quota_lock);
- atomic_long_inc(&ei->i_size_dirty_count);
+ return ret;
}
-/* i_sectors accounting: */
+#else
+
+static void __bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res) {}
+
+static void bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res) {}
-static enum extent_insert_hook_ret
-i_sectors_hook_fn(struct extent_insert_hook *hook,
- struct bpos committed_pos,
- struct bpos next_pos,
- struct bkey_s_c k,
- const struct bkey_i *insert)
+static int bch2_quota_reservation_add(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res,
+ unsigned sectors,
+ bool check_enospc)
{
- struct i_sectors_hook *h = container_of(hook,
- struct i_sectors_hook, hook);
- s64 sectors = next_pos.offset - committed_pos.offset;
- int sign = bkey_extent_is_allocation(&insert->k) -
- (k.k && bkey_extent_is_allocation(k.k));
+ return 0;
+}
- EBUG_ON(!(h->ei->i_flags & BCH_INODE_I_SECTORS_DIRTY));
- EBUG_ON(!atomic_long_read(&h->ei->i_sectors_dirty_count));
+#endif
- h->sectors += sectors * sign;
+/* i_size updates: */
- return BTREE_HOOK_DO_INSERT;
-}
+struct inode_new_size {
+ loff_t new_size;
+ u64 now;
+ unsigned fields;
+};
-static int inode_set_i_sectors_dirty(struct bch_inode_info *ei,
- struct bch_inode_unpacked *bi, void *p)
+static int inode_set_size(struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi,
+ void *p)
{
- BUG_ON(bi->i_flags & BCH_INODE_I_SECTORS_DIRTY);
+ struct inode_new_size *s = p;
+
+ bi->bi_size = s->new_size;
+ if (s->fields & ATTR_ATIME)
+ bi->bi_atime = s->now;
+ if (s->fields & ATTR_MTIME)
+ bi->bi_mtime = s->now;
+ if (s->fields & ATTR_CTIME)
+ bi->bi_ctime = s->now;
- bi->i_flags |= BCH_INODE_I_SECTORS_DIRTY;
return 0;
}
-static int inode_clear_i_sectors_dirty(struct bch_inode_info *ei,
- struct bch_inode_unpacked *bi,
- void *p)
+int __must_check bch2_write_inode_size(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ loff_t new_size, unsigned fields)
{
- BUG_ON(!(bi->i_flags & BCH_INODE_I_SECTORS_DIRTY));
+ struct inode_new_size s = {
+ .new_size = new_size,
+ .now = bch2_current_time(c),
+ .fields = fields,
+ };
- bi->i_sectors = atomic64_read(&ei->i_sectors);
- bi->i_flags &= ~BCH_INODE_I_SECTORS_DIRTY;
- return 0;
+ return bch2_write_inode(c, inode, inode_set_size, &s, fields);
}
-static void i_sectors_dirty_put(struct bch_inode_info *ei,
- struct i_sectors_hook *h)
+static void __i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
+ struct quota_res *quota_res, s64 sectors)
{
- struct inode *inode = &ei->vfs_inode;
+ bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
+ "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
+ inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
+ inode->ei_inode.bi_sectors);
+ inode->v.i_blocks += sectors;
+
+#ifdef CONFIG_BCACHEFS_QUOTA
+ if (quota_res && sectors > 0) {
+ BUG_ON(sectors > quota_res->sectors);
+ BUG_ON(sectors > inode->ei_quota_reserved);
- if (h->sectors) {
- spin_lock(&inode->i_lock);
- inode->i_blocks += h->sectors;
- spin_unlock(&inode->i_lock);
+ quota_res->sectors -= sectors;
+ inode->ei_quota_reserved -= sectors;
+ } else {
+ bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
+ }
+#endif
+}
- atomic64_add(h->sectors, &ei->i_sectors);
- EBUG_ON(atomic64_read(&ei->i_sectors) < 0);
+static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
+ struct quota_res *quota_res, s64 sectors)
+{
+ if (sectors) {
+ mutex_lock(&inode->ei_quota_lock);
+ __i_sectors_acct(c, inode, quota_res, sectors);
+ mutex_unlock(&inode->ei_quota_lock);
}
+}
+
+/* page state: */
- EBUG_ON(atomic_long_read(&ei->i_sectors_dirty_count) <= 0);
+/* stored in page->private: */
- mutex_lock(&ei->update_lock);
+#define BCH_FOLIO_SECTOR_STATE() \
+ x(unallocated) \
+ x(reserved) \
+ x(dirty) \
+ x(dirty_reserved) \
+ x(allocated)
+
+enum bch_folio_sector_state {
+#define x(n) SECTOR_##n,
+ BCH_FOLIO_SECTOR_STATE()
+#undef x
+};
- if (atomic_long_dec_and_test(&ei->i_sectors_dirty_count)) {
- struct bch_fs *c = ei->vfs_inode.i_sb->s_fs_info;
- int ret = __bch2_write_inode(c, ei, inode_clear_i_sectors_dirty, NULL);
+const char * const bch2_folio_sector_states[] = {
+#define x(n) #n,
+ BCH_FOLIO_SECTOR_STATE()
+#undef x
+ NULL
+};
- ret = ret;
+static inline enum bch_folio_sector_state
+folio_sector_dirty(enum bch_folio_sector_state state)
+{
+ switch (state) {
+ case SECTOR_unallocated:
+ return SECTOR_dirty;
+ case SECTOR_reserved:
+ return SECTOR_dirty_reserved;
+ default:
+ return state;
}
+}
- mutex_unlock(&ei->update_lock);
+static inline enum bch_folio_sector_state
+folio_sector_undirty(enum bch_folio_sector_state state)
+{
+ switch (state) {
+ case SECTOR_dirty:
+ return SECTOR_unallocated;
+ case SECTOR_dirty_reserved:
+ return SECTOR_reserved;
+ default:
+ return state;
+ }
}
-static int __must_check i_sectors_dirty_get(struct bch_inode_info *ei,
- struct i_sectors_hook *h)
+static inline enum bch_folio_sector_state
+folio_sector_reserve(enum bch_folio_sector_state state)
{
- int ret = 0;
+ switch (state) {
+ case SECTOR_unallocated:
+ return SECTOR_reserved;
+ case SECTOR_dirty:
+ return SECTOR_dirty_reserved;
+ default:
+ return state;
+ }
+}
- h->hook.fn = i_sectors_hook_fn;
- h->sectors = 0;
-#ifdef CONFIG_BCACHEFS_DEBUG
- h->ei = ei;
-#endif
+struct bch_folio_sector {
+ /* Uncompressed, fully allocated replicas (or on disk reservation): */
+ unsigned nr_replicas:4;
- if (atomic_long_inc_not_zero(&ei->i_sectors_dirty_count))
- return 0;
+ /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
+ unsigned replicas_reserved:4;
- mutex_lock(&ei->update_lock);
+ /* i_sectors: */
+ enum bch_folio_sector_state state:8;
+};
- if (!(ei->i_flags & BCH_INODE_I_SECTORS_DIRTY)) {
- struct bch_fs *c = ei->vfs_inode.i_sb->s_fs_info;
+struct bch_folio {
+ spinlock_t lock;
+ atomic_t write_count;
+ /*
+ * Is the sector state up to date with the btree?
+ * (Not the data itself)
+ */
+ bool uptodate;
+ struct bch_folio_sector s[];
+};
- ret = __bch2_write_inode(c, ei, inode_set_i_sectors_dirty, NULL);
- }
+static inline void folio_sector_set(struct folio *folio,
+ struct bch_folio *s,
+ unsigned i, unsigned n)
+{
+ s->s[i].state = n;
+}
- if (!ret)
- atomic_long_inc(&ei->i_sectors_dirty_count);
+static inline struct bch_folio *__bch2_folio(struct folio *folio)
+{
+ return folio_has_private(folio)
+ ? (struct bch_folio *) folio_get_private(folio)
+ : NULL;
+}
- mutex_unlock(&ei->update_lock);
+static inline struct bch_folio *bch2_folio(struct folio *folio)
+{
+ EBUG_ON(!folio_test_locked(folio));
- return ret;
+ return __bch2_folio(folio);
}
-struct bchfs_extent_trans_hook {
- struct bchfs_write_op *op;
- struct extent_insert_hook hook;
-
- struct bch_inode_unpacked inode_u;
- struct bkey_inode_buf inode_p;
+/* for newly allocated folios: */
+static void __bch2_folio_release(struct folio *folio)
+{
+ kfree(folio_detach_private(folio));
+}
- bool need_inode_update;
-};
+static void bch2_folio_release(struct folio *folio)
+{
+ EBUG_ON(!folio_test_locked(folio));
+ __bch2_folio_release(folio);
+}
-static enum extent_insert_hook_ret
-bchfs_extent_update_hook(struct extent_insert_hook *hook,
- struct bpos committed_pos,
- struct bpos next_pos,
- struct bkey_s_c k,
- const struct bkey_i *insert)
-{
- struct bchfs_extent_trans_hook *h = container_of(hook,
- struct bchfs_extent_trans_hook, hook);
- struct bch_inode_info *ei = h->op->ei;
- struct inode *inode = &ei->vfs_inode;
- int sign = bkey_extent_is_allocation(&insert->k) -
- (k.k && bkey_extent_is_allocation(k.k));
- s64 sectors = (s64) (next_pos.offset - committed_pos.offset) * sign;
- u64 offset = min(next_pos.offset << 9, h->op->new_i_size);
- bool do_pack = false;
-
- BUG_ON((next_pos.offset << 9) > round_up(offset, PAGE_SIZE));
-
- /* XXX: ei->i_size locking */
- if (offset > ei->i_size) {
- BUG_ON(ei->i_flags & BCH_INODE_I_SIZE_DIRTY);
-
- if (!h->need_inode_update) {
- h->need_inode_update = true;
- return BTREE_HOOK_RESTART_TRANS;
- }
+/* for newly allocated folios: */
+static struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp)
+{
+ struct bch_folio *s;
- h->inode_u.i_size = offset;
- do_pack = true;
+ s = kzalloc(sizeof(*s) +
+ sizeof(struct bch_folio_sector) *
+ folio_sectors(folio), GFP_NOFS|gfp);
+ if (!s)
+ return NULL;
- ei->i_size = offset;
+ spin_lock_init(&s->lock);
+ folio_attach_private(folio, s);
+ return s;
+}
- if (h->op->is_dio)
- i_size_write(inode, offset);
- }
+static struct bch_folio *bch2_folio_create(struct folio *folio, gfp_t gfp)
+{
+ return bch2_folio(folio) ?: __bch2_folio_create(folio, gfp);
+}
- if (sectors) {
- if (!h->need_inode_update) {
- h->need_inode_update = true;
- return BTREE_HOOK_RESTART_TRANS;
- }
+static unsigned bkey_to_sector_state(struct bkey_s_c k)
+{
+ if (bkey_extent_is_reservation(k))
+ return SECTOR_reserved;
+ if (bkey_extent_is_allocation(k.k))
+ return SECTOR_allocated;
+ return SECTOR_unallocated;
+}
- h->inode_u.i_sectors += sectors;
- do_pack = true;
+static void __bch2_folio_set(struct folio *folio,
+ unsigned pg_offset, unsigned pg_len,
+ unsigned nr_ptrs, unsigned state)
+{
+ struct bch_folio *s = bch2_folio_create(folio, __GFP_NOFAIL);
+ unsigned i, sectors = folio_sectors(folio);
- atomic64_add(sectors, &ei->i_sectors);
+ BUG_ON(pg_offset >= sectors);
+ BUG_ON(pg_offset + pg_len > sectors);
- h->op->sectors_added += sectors;
+ spin_lock(&s->lock);
- if (h->op->is_dio) {
- spin_lock(&inode->i_lock);
- inode->i_blocks += sectors;
- spin_unlock(&inode->i_lock);
- }
+ for (i = pg_offset; i < pg_offset + pg_len; i++) {
+ s->s[i].nr_replicas = nr_ptrs;
+ folio_sector_set(folio, s, i, state);
}
- if (do_pack)
- bch2_inode_pack(&h->inode_p, &h->inode_u);
+ if (i == sectors)
+ s->uptodate = true;
- return BTREE_HOOK_DO_INSERT;
+ spin_unlock(&s->lock);
}
-static int bchfs_write_index_update(struct bch_write_op *wop)
+/*
+ * Initialize bch_folio state (allocated/unallocated, nr_replicas) from the
+ * extents btree:
+ */
+static int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
+ struct folio **folios, unsigned nr_folios)
{
- struct bchfs_write_op *op = container_of(wop,
- struct bchfs_write_op, op);
- struct keylist *keys = &op->op.insert_keys;
- struct btree_iter extent_iter, inode_iter;
- struct bchfs_extent_trans_hook hook;
- struct bkey_i *k = bch2_keylist_front(keys);
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u64 offset = folio_sector(folios[0]);
+ unsigned folio_idx = 0;
+ u32 snapshot;
int ret;
- BUG_ON(k->k.p.inode != op->ei->vfs_inode.i_ino);
-
- bch2_btree_iter_init(&extent_iter, wop->c, BTREE_ID_EXTENTS,
- bkey_start_pos(&bch2_keylist_front(keys)->k),
- BTREE_ITER_INTENT);
- bch2_btree_iter_init(&inode_iter, wop->c, BTREE_ID_INODES,
- POS(extent_iter.pos.inode, 0),
- BTREE_ITER_INTENT);
-
- hook.op = op;
- hook.hook.fn = bchfs_extent_update_hook;
- hook.need_inode_update = false;
-
- do {
- ret = bch2_btree_iter_traverse(&extent_iter);
- if (ret)
- goto err;
+ bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
- /* XXX: ei->i_size locking */
- k = bch2_keylist_front(keys);
- if (min(k->k.p.offset << 9, op->new_i_size) > op->ei->i_size)
- hook.need_inode_update = true;
+ ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
- if (hook.need_inode_update) {
- struct bkey_s_c inode;
+ for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
+ SPOS(inum.inum, offset, snapshot),
+ BTREE_ITER_SLOTS, k, ret) {
+ unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
+ unsigned state = bkey_to_sector_state(k);
- if (!btree_iter_linked(&inode_iter))
- bch2_btree_iter_link(&extent_iter, &inode_iter);
+ while (folio_idx < nr_folios) {
+ struct folio *folio = folios[folio_idx];
+ u64 folio_start = folio_sector(folio);
+ u64 folio_end = folio_end_sector(folio);
+ unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) - folio_start;
+ unsigned folio_len = min(k.k->p.offset, folio_end) - folio_offset - folio_start;
- inode = bch2_btree_iter_peek_with_holes(&inode_iter);
- if ((ret = btree_iter_err(inode)))
- goto err;
+ BUG_ON(k.k->p.offset < folio_start);
+ BUG_ON(bkey_start_offset(k.k) > folio_end);
- if (WARN_ONCE(inode.k->type != BCH_INODE_FS,
- "inode %llu not found when updating",
- extent_iter.pos.inode)) {
- ret = -ENOENT;
- break;
- }
+ if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate)
+ __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
- if (WARN_ONCE(bkey_bytes(inode.k) >
- sizeof(hook.inode_p),
- "inode %llu too big (%zu bytes, buf %zu)",
- extent_iter.pos.inode,
- bkey_bytes(inode.k),
- sizeof(hook.inode_p))) {
- ret = -ENOENT;
+ if (k.k->p.offset < folio_end)
break;
- }
+ folio_idx++;
+ }
- bkey_reassemble(&hook.inode_p.inode.k_i, inode);
- ret = bch2_inode_unpack(bkey_s_c_to_inode(inode),
- &hook.inode_u);
- if (WARN_ONCE(ret,
- "error %i unpacking inode %llu",
- ret, extent_iter.pos.inode)) {
- ret = -ENOENT;
- break;
- }
+ if (folio_idx == nr_folios)
+ break;
+ }
- ret = bch2_btree_insert_at(wop->c, &wop->res,
- &hook.hook, op_journal_seq(wop),
- BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC,
- BTREE_INSERT_ENTRY(&extent_iter, k),
- BTREE_INSERT_ENTRY_EXTRA_RES(&inode_iter,
- &hook.inode_p.inode.k_i, 2));
- } else {
- ret = bch2_btree_insert_at(wop->c, &wop->res,
- &hook.hook, op_journal_seq(wop),
- BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC,
- BTREE_INSERT_ENTRY(&extent_iter, k));
- }
+ offset = iter.pos.offset;
+ bch2_trans_iter_exit(&trans, &iter);
err:
- if (ret == -EINTR)
- continue;
- if (ret)
- break;
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+ bch2_trans_exit(&trans);
- bch2_keylist_pop_front(keys);
- } while (!bch2_keylist_empty(keys));
+ return ret;
+}
- bch2_btree_iter_unlock(&extent_iter);
- bch2_btree_iter_unlock(&inode_iter);
+static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
+{
+ struct bvec_iter iter;
+ struct folio_vec fv;
+ unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
+ ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
+ unsigned state = bkey_to_sector_state(k);
- return ret;
+ bio_for_each_folio(fv, bio, iter)
+ __bch2_folio_set(fv.fv_folio,
+ fv.fv_offset >> 9,
+ fv.fv_len >> 9,
+ nr_ptrs, state);
}
-/* page state: */
+static void mark_pagecache_unallocated(struct bch_inode_info *inode,
+ u64 start, u64 end)
+{
+ pgoff_t index = start >> PAGE_SECTORS_SHIFT;
+ pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
+ struct folio_batch fbatch;
+ unsigned i, j;
-/* stored in page->private: */
+ if (end <= start)
+ return;
-/*
- * bch_page_state has to (unfortunately) be manipulated with cmpxchg - we could
- * almost protected it with the page lock, except that bch2_writepage_io_done has
- * to update the sector counts (and from interrupt/bottom half context).
- */
-struct bch_page_state {
-union { struct {
- /*
- * page is _fully_ written on disk, and not compressed - which means to
- * write this page we don't have to reserve space (the new write will
- * never take up more space on disk than what it's overwriting)
- */
- unsigned allocated:1;
+ folio_batch_init(&fbatch);
- /* Owns PAGE_SECTORS sized reservation: */
- unsigned reserved:1;
- unsigned nr_replicas:4;
+ while (filemap_get_folios(inode->v.i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+ u64 folio_start = folio_sector(folio);
+ u64 folio_end = folio_end_sector(folio);
+ unsigned folio_offset = max(start, folio_start) - folio_start;
+ unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
+ struct bch_folio *s;
- /*
- * Number of sectors on disk - for i_blocks
- * Uncompressed size, not compressed size:
- */
- u8 sectors;
- u8 dirty_sectors;
-};
- /* for cmpxchg: */
- unsigned long v;
-};
-};
+ BUG_ON(end <= folio_start);
-#define page_state_cmpxchg(_ptr, _new, _expr) \
-({ \
- unsigned long _v = READ_ONCE((_ptr)->v); \
- struct bch_page_state _old; \
- \
- do { \
- _old.v = _new.v = _v; \
- _expr; \
- \
- EBUG_ON(_new.sectors + _new.dirty_sectors > PAGE_SECTORS);\
- } while (_old.v != _new.v && \
- (_v = cmpxchg(&(_ptr)->v, _old.v, _new.v)) != _old.v); \
- \
- _old; \
-})
+ folio_lock(folio);
+ s = bch2_folio(folio);
+
+ if (s) {
+ spin_lock(&s->lock);
+ for (j = folio_offset; j < folio_offset + folio_len; j++)
+ s->s[j].nr_replicas = 0;
+ spin_unlock(&s->lock);
+ }
+
+ folio_unlock(folio);
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
+}
-static inline struct bch_page_state *page_state(struct page *page)
+static void mark_pagecache_reserved(struct bch_inode_info *inode,
+ u64 start, u64 end)
{
- struct bch_page_state *s = (void *) &page->private;
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ pgoff_t index = start >> PAGE_SECTORS_SHIFT;
+ pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
+ struct folio_batch fbatch;
+ s64 i_sectors_delta = 0;
+ unsigned i, j;
+
+ if (end <= start)
+ return;
- BUILD_BUG_ON(sizeof(*s) > sizeof(page->private));
+ folio_batch_init(&fbatch);
+
+ while (filemap_get_folios(inode->v.i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+ u64 folio_start = folio_sector(folio);
+ u64 folio_end = folio_end_sector(folio);
+ unsigned folio_offset = max(start, folio_start) - folio_start;
+ unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
+ struct bch_folio *s;
+
+ BUG_ON(end <= folio_start);
+
+ folio_lock(folio);
+ s = bch2_folio(folio);
+
+ if (s) {
+ spin_lock(&s->lock);
+ for (j = folio_offset; j < folio_offset + folio_len; j++) {
+ i_sectors_delta -= s->s[j].state == SECTOR_dirty;
+ folio_sector_set(folio, s, j, folio_sector_reserve(s->s[j].state));
+ }
+ spin_unlock(&s->lock);
+ }
- if (!PagePrivate(page))
- SetPagePrivate(page);
+ folio_unlock(folio);
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
- return s;
+ i_sectors_acct(c, inode, NULL, i_sectors_delta);
}
-static void bch2_put_page_reservation(struct bch_fs *c, struct page *page)
+static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
{
- struct disk_reservation res = { .sectors = PAGE_SECTORS };
- struct bch_page_state s;
-
- s = page_state_cmpxchg(page_state(page), s, {
- if (!s.reserved)
- return;
- s.reserved = 0;
- });
+ /* XXX: this should not be open coded */
+ return inode->ei_inode.bi_data_replicas
+ ? inode->ei_inode.bi_data_replicas - 1
+ : c->opts.data_replicas;
+}
- bch2_disk_reservation_put(c, &res);
+static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
+ unsigned nr_replicas)
+{
+ return max(0, (int) nr_replicas -
+ s->nr_replicas -
+ s->replicas_reserved);
}
-static int bch2_get_page_reservation(struct bch_fs *c, struct page *page,
- bool check_enospc)
+static int bch2_get_folio_disk_reservation(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct folio *folio, bool check_enospc)
{
- struct bch_page_state *s = page_state(page), new;
- struct disk_reservation res;
- int ret = 0;
+ struct bch_folio *s = bch2_folio_create(folio, 0);
+ unsigned nr_replicas = inode_nr_replicas(c, inode);
+ struct disk_reservation disk_res = { 0 };
+ unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0;
+ int ret;
+
+ if (!s)
+ return -ENOMEM;
- BUG_ON(s->allocated && s->sectors != PAGE_SECTORS);
+ for (i = 0; i < sectors; i++)
+ disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
- if (s->allocated || s->reserved)
+ if (!disk_res_sectors)
return 0;
- ret = bch2_disk_reservation_get(c, &res, PAGE_SECTORS, !check_enospc
- ? BCH_DISK_RESERVATION_NOFAIL : 0);
- if (ret)
+ ret = bch2_disk_reservation_get(c, &disk_res,
+ disk_res_sectors, 1,
+ !check_enospc
+ ? BCH_DISK_RESERVATION_NOFAIL
+ : 0);
+ if (unlikely(ret))
return ret;
- page_state_cmpxchg(s, new, {
- if (new.reserved) {
- bch2_disk_reservation_put(c, &res);
- return 0;
- }
- new.reserved = 1;
- new.nr_replicas = res.nr_replicas;
- });
+ for (i = 0; i < sectors; i++)
+ s->s[i].replicas_reserved +=
+ sectors_to_reserve(&s->s[i], nr_replicas);
return 0;
}
-static void bch2_clear_page_bits(struct page *page)
-{
- struct inode *inode = page->mapping->host;
- struct bch_fs *c = inode->i_sb->s_fs_info;
- struct disk_reservation res = { .sectors = PAGE_SECTORS };
- struct bch_page_state s;
-
- if (!PagePrivate(page))
- return;
+struct bch2_folio_reservation {
+ struct disk_reservation disk;
+ struct quota_res quota;
+};
- s = xchg(page_state(page), (struct bch_page_state) { .v = 0 });
- ClearPagePrivate(page);
+static void bch2_folio_reservation_init(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct bch2_folio_reservation *res)
+{
+ memset(res, 0, sizeof(*res));
- if (s.dirty_sectors) {
- spin_lock(&inode->i_lock);
- inode->i_blocks -= s.dirty_sectors;
- spin_unlock(&inode->i_lock);
- }
+ res->disk.nr_replicas = inode_nr_replicas(c, inode);
+}
- if (s.reserved)
- bch2_disk_reservation_put(c, &res);
+static void bch2_folio_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct bch2_folio_reservation *res)
+{
+ bch2_disk_reservation_put(c, &res->disk);
+ bch2_quota_reservation_put(c, inode, &res->quota);
}
-int bch2_set_page_dirty(struct page *page)
+static int bch2_folio_reservation_get(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct folio *folio,
+ struct bch2_folio_reservation *res,
+ unsigned offset, unsigned len)
{
- struct bch_page_state old, new;
+ struct bch_folio *s = bch2_folio_create(folio, 0);
+ unsigned i, disk_sectors = 0, quota_sectors = 0;
+ int ret;
- old = page_state_cmpxchg(page_state(page), new,
- new.dirty_sectors = PAGE_SECTORS - new.sectors;
- );
+ if (!s)
+ return -ENOMEM;
- if (old.dirty_sectors != new.dirty_sectors) {
- struct inode *inode = page->mapping->host;
+ BUG_ON(!s->uptodate);
- spin_lock(&inode->i_lock);
- inode->i_blocks += new.dirty_sectors - old.dirty_sectors;
- spin_unlock(&inode->i_lock);
+ for (i = round_down(offset, block_bytes(c)) >> 9;
+ i < round_up(offset + len, block_bytes(c)) >> 9;
+ i++) {
+ disk_sectors += sectors_to_reserve(&s->s[i],
+ res->disk.nr_replicas);
+ quota_sectors += s->s[i].state == SECTOR_unallocated;
}
- return __set_page_dirty_nobuffers(page);
-}
+ if (disk_sectors) {
+ ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0);
+ if (unlikely(ret))
+ return ret;
+ }
-/* readpages/writepages: */
+ if (quota_sectors) {
+ ret = bch2_quota_reservation_add(c, inode, &res->quota,
+ quota_sectors, true);
+ if (unlikely(ret)) {
+ struct disk_reservation tmp = {
+ .sectors = disk_sectors
+ };
-static bool bio_can_add_page_contig(struct bio *bio, struct page *page)
-{
- sector_t offset = (sector_t) page->index << (PAGE_SHIFT - 9);
+ bch2_disk_reservation_put(c, &tmp);
+ res->disk.sectors -= disk_sectors;
+ return ret;
+ }
+ }
- return bio->bi_vcnt < bio->bi_max_vecs &&
- bio_end_sector(bio) == offset;
+ return 0;
}
-static void __bio_add_page(struct bio *bio, struct page *page)
+static void bch2_clear_folio_bits(struct folio *folio)
{
- bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
- .bv_page = page,
- .bv_len = PAGE_SIZE,
- .bv_offset = 0,
- };
+ struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_folio *s = bch2_folio(folio);
+ struct disk_reservation disk_res = { 0 };
+ int i, sectors = folio_sectors(folio), dirty_sectors = 0;
- bio->bi_iter.bi_size += PAGE_SIZE;
-}
+ if (!s)
+ return;
-static int bio_add_page_contig(struct bio *bio, struct page *page)
-{
- sector_t offset = (sector_t) page->index << (PAGE_SHIFT - 9);
+ EBUG_ON(!folio_test_locked(folio));
+ EBUG_ON(folio_test_writeback(folio));
+
+ for (i = 0; i < sectors; i++) {
+ disk_res.sectors += s->s[i].replicas_reserved;
+ s->s[i].replicas_reserved = 0;
- BUG_ON(!bio->bi_max_vecs);
+ dirty_sectors -= s->s[i].state == SECTOR_dirty;
+ folio_sector_set(folio, s, i, folio_sector_undirty(s->s[i].state));
+ }
- if (!bio->bi_vcnt)
- bio->bi_iter.bi_sector = offset;
- else if (!bio_can_add_page_contig(bio, page))
- return -1;
+ bch2_disk_reservation_put(c, &disk_res);
- __bio_add_page(bio, page);
- return 0;
+ i_sectors_acct(c, inode, NULL, dirty_sectors);
+
+ bch2_folio_release(folio);
}
-static void bch2_readpages_end_io(struct bio *bio)
+static void bch2_set_folio_dirty(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct folio *folio,
+ struct bch2_folio_reservation *res,
+ unsigned offset, unsigned len)
{
- struct bio_vec *bv;
- int i;
+ struct bch_folio *s = bch2_folio(folio);
+ unsigned i, dirty_sectors = 0;
- bio_for_each_segment_all(bv, bio, i) {
- struct page *page = bv->bv_page;
+ WARN_ON((u64) folio_pos(folio) + offset + len >
+ round_up((u64) i_size_read(&inode->v), block_bytes(c)));
- if (!bio->bi_error) {
- SetPageUptodate(page);
- } else {
- ClearPageUptodate(page);
- SetPageError(page);
- }
- unlock_page(page);
- }
+ BUG_ON(!s->uptodate);
- bio_put(bio);
-}
+ spin_lock(&s->lock);
-struct readpages_iter {
- struct address_space *mapping;
- struct list_head pages;
- unsigned nr_pages;
-};
+ for (i = round_down(offset, block_bytes(c)) >> 9;
+ i < round_up(offset + len, block_bytes(c)) >> 9;
+ i++) {
+ unsigned sectors = sectors_to_reserve(&s->s[i],
+ res->disk.nr_replicas);
+
+ /*
+ * This can happen if we race with the error path in
+ * bch2_writepage_io_done():
+ */
+ sectors = min_t(unsigned, sectors, res->disk.sectors);
+
+ s->s[i].replicas_reserved += sectors;
+ res->disk.sectors -= sectors;
-static int readpage_add_page(struct readpages_iter *iter, struct page *page)
+ dirty_sectors += s->s[i].state == SECTOR_unallocated;
+
+ folio_sector_set(folio, s, i, folio_sector_dirty(s->s[i].state));
+ }
+
+ spin_unlock(&s->lock);
+
+ i_sectors_acct(c, inode, &res->quota, dirty_sectors);
+
+ if (!folio_test_dirty(folio))
+ filemap_dirty_folio(inode->v.i_mapping, folio);
+}
+
+vm_fault_t bch2_page_fault(struct vm_fault *vmf)
{
- struct bch_page_state *s = page_state(page);
+ struct file *file = vmf->vma->vm_file;
+ struct address_space *mapping = file->f_mapping;
+ struct address_space *fdm = faults_disabled_mapping();
+ struct bch_inode_info *inode = file_bch_inode(file);
int ret;
- BUG_ON(s->reserved);
- s->allocated = 1;
- s->sectors = 0;
+ if (fdm == mapping)
+ return VM_FAULT_SIGBUS;
+
+ /* Lock ordering: */
+ if (fdm > mapping) {
+ struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
+
+ if (bch2_pagecache_add_tryget(inode))
+ goto got_lock;
+
+ bch2_pagecache_block_put(fdm_host);
+
+ bch2_pagecache_add_get(inode);
+ bch2_pagecache_add_put(inode);
+
+ bch2_pagecache_block_get(fdm_host);
+
+ /* Signal that lock has been dropped: */
+ set_fdm_dropped_locks();
+ return VM_FAULT_SIGBUS;
+ }
+
+ bch2_pagecache_add_get(inode);
+got_lock:
+ ret = filemap_fault(vmf);
+ bch2_pagecache_add_put(inode);
- prefetchw(&page->flags);
- ret = add_to_page_cache_lru(page, iter->mapping,
- page->index, GFP_NOFS);
- put_page(page);
return ret;
}
-static inline struct page *readpage_iter_next(struct readpages_iter *iter)
+vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
{
- while (iter->nr_pages) {
- struct page *page =
- list_last_entry(&iter->pages, struct page, lru);
+ struct folio *folio = page_folio(vmf->page);
+ struct file *file = vmf->vma->vm_file;
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct address_space *mapping = file->f_mapping;
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch2_folio_reservation res;
+ unsigned len;
+ loff_t isize;
+ int ret;
+
+ bch2_folio_reservation_init(c, inode, &res);
+
+ sb_start_pagefault(inode->v.i_sb);
+ file_update_time(file);
+
+ /*
+ * Not strictly necessary, but helps avoid dio writes livelocking in
+ * write_invalidate_inode_pages_range() - can drop this if/when we get
+ * a write_invalidate_inode_pages_range() that works without dropping
+ * page lock before invalidating page
+ */
+ bch2_pagecache_add_get(inode);
+
+ folio_lock(folio);
+ isize = i_size_read(&inode->v);
+
+ if (folio->mapping != mapping || folio_pos(folio) >= isize) {
+ folio_unlock(folio);
+ ret = VM_FAULT_NOPAGE;
+ goto out;
+ }
+
+ len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio));
- prefetchw(&page->flags);
- list_del(&page->lru);
- iter->nr_pages--;
+ if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate) {
+ if (bch2_folio_set(c, inode_inum(inode), &folio, 1)) {
+ folio_unlock(folio);
+ ret = VM_FAULT_SIGBUS;
+ goto out;
+ }
+ }
- if (!readpage_add_page(iter, page))
- return page;
+ if (bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
+ folio_unlock(folio);
+ ret = VM_FAULT_SIGBUS;
+ goto out;
}
- return NULL;
+ bch2_set_folio_dirty(c, inode, folio, &res, 0, len);
+ bch2_folio_reservation_put(c, inode, &res);
+
+ folio_wait_stable(folio);
+ ret = VM_FAULT_LOCKED;
+out:
+ bch2_pagecache_add_put(inode);
+ sb_end_pagefault(inode->v.i_sb);
+
+ return ret;
}
-#define for_each_readpage_page(_iter, _page) \
- for (; \
- ((_page) = __readpage_next_page(&(_iter)));) \
+void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
+{
+ if (offset || length < folio_size(folio))
+ return;
+
+ bch2_clear_folio_bits(folio);
+}
-static void bch2_mark_pages_unalloc(struct bio *bio)
+bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
{
- struct bvec_iter iter;
- struct bio_vec bv;
+ if (folio_test_dirty(folio) || folio_test_writeback(folio))
+ return false;
- bio_for_each_segment(bv, bio, iter)
- page_state(bv.bv_page)->allocated = 0;
+ bch2_clear_folio_bits(folio);
+ return true;
}
-static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
+/* readpage(s): */
+
+static void bch2_readpages_end_io(struct bio *bio)
{
- struct bvec_iter iter;
- struct bio_vec bv;
+ struct bvec_iter_all iter;
+ struct folio_vec fv;
+
+ bio_for_each_folio_all(fv, bio, iter) {
+ if (!bio->bi_status) {
+ folio_mark_uptodate(fv.fv_folio);
+ } else {
+ folio_clear_uptodate(fv.fv_folio);
+ folio_set_error(fv.fv_folio);
+ }
+ folio_unlock(fv.fv_folio);
+ }
+
+ bio_put(bio);
+}
+
+struct readpages_iter {
+ struct address_space *mapping;
+ unsigned idx;
+ folios folios;
+};
- bio_for_each_segment(bv, bio, iter) {
- struct bch_page_state *s = page_state(bv.bv_page);
+static int readpages_iter_init(struct readpages_iter *iter,
+ struct readahead_control *ractl)
+{
+ struct folio **fi;
+ int ret;
- /* sectors in @k from the start of this page: */
- unsigned k_sectors = k.k->size - (iter.bi_sector - k.k->p.offset);
+ memset(iter, 0, sizeof(*iter));
- unsigned page_sectors = min(bv.bv_len >> 9, k_sectors);
+ iter->mapping = ractl->mapping;
- if (!s->sectors)
- s->nr_replicas = bch2_extent_nr_dirty_ptrs(k);
- else
- s->nr_replicas = min_t(unsigned, s->nr_replicas,
- bch2_extent_nr_dirty_ptrs(k));
+ ret = filemap_get_contig_folios_d(iter->mapping,
+ ractl->_index << PAGE_SHIFT,
+ (ractl->_index + ractl->_nr_pages) << PAGE_SHIFT,
+ 0, mapping_gfp_mask(iter->mapping),
+ &iter->folios);
+ if (ret)
+ return ret;
- BUG_ON(s->sectors + page_sectors > PAGE_SECTORS);
- s->sectors += page_sectors;
+ darray_for_each(iter->folios, fi) {
+ ractl->_nr_pages -= 1U << folio_order(*fi);
+ __bch2_folio_create(*fi, __GFP_NOFAIL);
+ folio_put(*fi);
+ folio_put(*fi);
}
+
+ return 0;
+}
+
+static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
+{
+ if (iter->idx >= iter->folios.nr)
+ return NULL;
+ return iter->folios.data[iter->idx];
+}
+
+static inline void readpage_iter_advance(struct readpages_iter *iter)
+{
+ iter->idx++;
+}
+
+static bool extent_partial_reads_expensive(struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ struct bch_extent_crc_unpacked crc;
+ const union bch_extent_entry *i;
+
+ bkey_for_each_crc(k.k, ptrs, crc, i)
+ if (crc.csum_type || crc.compression_type)
+ return true;
+ return false;
}
static void readpage_bio_extend(struct readpages_iter *iter,
- struct bio *bio, u64 offset,
+ struct bio *bio,
+ unsigned sectors_this_extent,
bool get_more)
{
- struct page *page;
- pgoff_t page_offset;
- int ret;
-
- while (bio_end_sector(bio) < offset &&
+ while (bio_sectors(bio) < sectors_this_extent &&
bio->bi_vcnt < bio->bi_max_vecs) {
- page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
+ struct folio *folio = readpage_iter_peek(iter);
+ int ret;
+
+ if (folio) {
+ readpage_iter_advance(iter);
+ } else {
+ pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
+
+ if (!get_more)
+ break;
- if (iter->nr_pages) {
- page = list_last_entry(&iter->pages, struct page, lru);
- if (page->index != page_offset)
+ folio = xa_load(&iter->mapping->i_pages, folio_offset);
+ if (folio && !xa_is_value(folio))
break;
- list_del(&page->lru);
- iter->nr_pages--;
- } else if (get_more) {
- rcu_read_lock();
- page = radix_tree_lookup(&iter->mapping->page_tree, page_offset);
- rcu_read_unlock();
+ folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
+ if (!folio)
+ break;
- if (page && !radix_tree_exceptional_entry(page))
+ if (!__bch2_folio_create(folio, 0)) {
+ folio_put(folio);
break;
+ }
- page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
- if (!page)
+ ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_NOFS);
+ if (ret) {
+ __bch2_folio_release(folio);
+ folio_put(folio);
break;
+ }
- page->index = page_offset;
- ClearPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
- } else {
- break;
+ folio_put(folio);
}
- ret = readpage_add_page(iter, page);
- if (ret)
- break;
+ BUG_ON(folio_sector(folio) != bio_end_sector(bio));
- __bio_add_page(bio, page);
+ BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
}
-
- if (!iter->nr_pages)
- SetPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
}
-static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
- struct bch_read_bio *rbio, u64 inode,
+static void bchfs_read(struct btree_trans *trans,
+ struct bch_read_bio *rbio,
+ subvol_inum inum,
struct readpages_iter *readpages_iter)
{
- struct bio *bio = &rbio->bio;
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_buf sk;
int flags = BCH_READ_RETRY_IF_STALE|
- BCH_READ_PROMOTE|
- BCH_READ_MAY_REUSE_BIO;
+ BCH_READ_MAY_PROMOTE;
+ u32 snapshot;
+ int ret = 0;
+
+ rbio->c = c;
+ rbio->start_time = local_clock();
+ rbio->subvol = inum.subvol;
+
+ bch2_bkey_buf_init(&sk);
+retry:
+ bch2_trans_begin(trans);
+ iter = (struct btree_iter) { NULL };
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+ SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
+ BTREE_ITER_SLOTS);
while (1) {
- struct extent_pick_ptr pick;
- BKEY_PADDED(k) tmp;
struct bkey_s_c k;
- unsigned bytes;
- bool is_last;
+ unsigned bytes, sectors, offset_into_extent;
+ enum btree_id data_btree = BTREE_ID_extents;
- bch2_btree_iter_set_pos(iter, POS(inode, bio->bi_iter.bi_sector));
+ /*
+ * read_extent -> io_time_reset may cause a transaction restart
+ * without returning an error, we need to check for that here:
+ */
+ ret = bch2_trans_relock(trans);
+ if (ret)
+ break;
- k = bch2_btree_iter_peek_with_holes(iter);
- BUG_ON(!k.k);
+ bch2_btree_iter_set_pos(&iter,
+ POS(inum.inum, rbio->bio.bi_iter.bi_sector));
- if (IS_ERR(k.k)) {
- int ret = bch2_btree_iter_unlock(iter);
- BUG_ON(!ret);
- bcache_io_error(c, bio, "btree IO error %i", ret);
- bio_endio(bio);
- return;
- }
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ break;
- bkey_reassemble(&tmp.k, k);
- bch2_btree_iter_unlock(iter);
- k = bkey_i_to_s_c(&tmp.k);
+ offset_into_extent = iter.pos.offset -
+ bkey_start_offset(k.k);
+ sectors = k.k->size - offset_into_extent;
- bch2_extent_pick_ptr(c, k, &pick);
- if (IS_ERR(pick.ca)) {
- bcache_io_error(c, bio, "no device to read from");
- bio_endio(bio);
- return;
- }
+ bch2_bkey_buf_reassemble(&sk, c, k);
+
+ ret = bch2_read_indirect_extent(trans, &data_btree,
+ &offset_into_extent, &sk);
+ if (ret)
+ break;
+
+ k = bkey_i_to_s_c(sk.k);
+
+ sectors = min(sectors, k.k->size - offset_into_extent);
if (readpages_iter)
- readpage_bio_extend(readpages_iter,
- bio, k.k->p.offset,
- pick.ca &&
- (pick.crc.csum_type ||
- pick.crc.compression_type));
+ readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
+ extent_partial_reads_expensive(k));
- bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
- bio->bi_iter.bi_sector) << 9;
- is_last = bytes == bio->bi_iter.bi_size;
- swap(bio->bi_iter.bi_size, bytes);
+ bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
+ swap(rbio->bio.bi_iter.bi_size, bytes);
- if (bkey_extent_is_allocation(k.k))
- bch2_add_page_sectors(bio, k);
+ if (rbio->bio.bi_iter.bi_size == bytes)
+ flags |= BCH_READ_LAST_FRAGMENT;
- if (!bkey_extent_is_allocation(k.k) ||
- bkey_extent_is_compressed(k))
- bch2_mark_pages_unalloc(bio);
+ bch2_bio_page_state_set(&rbio->bio, k);
- if (is_last)
- flags |= BCH_READ_IS_LAST;
+ bch2_read_extent(trans, rbio, iter.pos,
+ data_btree, k, offset_into_extent, flags);
- if (pick.ca) {
- PTR_BUCKET(pick.ca, &pick.ptr)->prio[READ] =
- c->prio_clock[READ].hand;
+ if (flags & BCH_READ_LAST_FRAGMENT)
+ break;
- bch2_read_extent(c, rbio, k, &pick, flags);
- flags &= ~BCH_READ_MAY_REUSE_BIO;
- } else {
- zero_fill_bio(bio);
+ swap(rbio->bio.bi_iter.bi_size, bytes);
+ bio_advance(&rbio->bio, bytes);
- if (is_last)
- bio_endio(bio);
- }
+ ret = btree_trans_too_many_iters(trans);
+ if (ret)
+ break;
+ }
+err:
+ bch2_trans_iter_exit(trans, &iter);
- if (is_last)
- return;
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
- swap(bio->bi_iter.bi_size, bytes);
- bio_advance(bio, bytes);
+ if (ret) {
+ bch_err_inum_offset_ratelimited(c,
+ iter.pos.inode,
+ iter.pos.offset << 9,
+ "read error %i from btree lookup", ret);
+ rbio->bio.bi_status = BLK_STS_IOERR;
+ bio_endio(&rbio->bio);
}
+
+ bch2_bkey_buf_exit(&sk, c);
}
-int bch2_readpages(struct file *file, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
+void bch2_readahead(struct readahead_control *ractl)
{
- struct inode *inode = mapping->host;
- struct bch_fs *c = inode->i_sb->s_fs_info;
- struct btree_iter iter;
- struct page *page;
- struct readpages_iter readpages_iter = {
- .mapping = mapping, .nr_pages = nr_pages
- };
+ struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_io_opts opts;
+ struct btree_trans trans;
+ struct folio *folio;
+ struct readpages_iter readpages_iter;
+ int ret;
- bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0);
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
- INIT_LIST_HEAD(&readpages_iter.pages);
- list_add(&readpages_iter.pages, pages);
- list_del_init(pages);
+ ret = readpages_iter_init(&readpages_iter, ractl);
+ BUG_ON(ret);
- if (current->pagecache_lock != &mapping->add_lock)
- pagecache_add_get(&mapping->add_lock);
+ bch2_trans_init(&trans, c, 0, 0);
- while ((page = readpage_iter_next(&readpages_iter))) {
- unsigned n = max(min_t(unsigned, readpages_iter.nr_pages + 1,
- BIO_MAX_PAGES),
- BCH_ENCODED_EXTENT_MAX >> PAGE_SECTOR_SHIFT);
+ bch2_pagecache_add_get(inode);
+ while ((folio = readpage_iter_peek(&readpages_iter))) {
+ unsigned n = min_t(unsigned,
+ readpages_iter.folios.nr -
+ readpages_iter.idx,
+ BIO_MAX_VECS);
struct bch_read_bio *rbio =
- container_of(bio_alloc_bioset(GFP_NOFS, n,
- &c->bio_read),
- struct bch_read_bio, bio);
+ rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
+ GFP_NOFS, &c->bio_read),
+ opts);
+
+ readpage_iter_advance(&readpages_iter);
+ rbio->bio.bi_iter.bi_sector = folio_sector(folio);
rbio->bio.bi_end_io = bch2_readpages_end_io;
- bio_add_page_contig(&rbio->bio, page);
- bchfs_read(c, &iter, rbio, inode->i_ino, &readpages_iter);
+ BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
+
+ bchfs_read(&trans, rbio, inode_inum(inode),
+ &readpages_iter);
}
- if (current->pagecache_lock != &mapping->add_lock)
- pagecache_add_put(&mapping->add_lock);
+ bch2_pagecache_add_put(inode);
- return 0;
+ bch2_trans_exit(&trans);
+ darray_exit(&readpages_iter.folios);
}
-static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
- u64 inode, struct page *page)
+static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio,
+ subvol_inum inum, struct folio *folio)
{
- struct btree_iter iter;
+ struct btree_trans trans;
- /*
- * Initialize page state:
- * If a page is partly allocated and partly a hole, we want it to be
- * marked BCH_PAGE_UNALLOCATED - so we initially mark all pages
- * allocated and then mark them unallocated as we find holes:
- *
- * Note that the bio hasn't been split yet - it's the only bio that
- * points to these pages. As we walk extents and split @bio, that
- * necessarily be true, the splits won't necessarily be on page
- * boundaries:
- */
- struct bch_page_state *s = page_state(page);
+ bch2_folio_create(folio, __GFP_NOFAIL);
- EBUG_ON(s->reserved);
- s->allocated = 1;
- s->sectors = 0;
+ rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
+ rbio->bio.bi_iter.bi_sector = folio_sector(folio);
+ BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
- bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
- bio_add_page_contig(&rbio->bio, page);
+ bch2_trans_init(&trans, c, 0, 0);
+ bchfs_read(&trans, rbio, inum, NULL);
+ bch2_trans_exit(&trans);
+}
- bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0);
- bchfs_read(c, &iter, rbio, inode, NULL);
+static void bch2_read_single_folio_end_io(struct bio *bio)
+{
+ complete(bio->bi_private);
}
-int bch2_readpage(struct file *file, struct page *page)
+static int bch2_read_single_folio(struct folio *folio,
+ struct address_space *mapping)
{
- struct address_space *mapping = page->mapping;
- struct inode *inode = mapping->host;
- struct bch_fs *c = inode->i_sb->s_fs_info;
+ struct bch_inode_info *inode = to_bch_ei(mapping->host);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_read_bio *rbio;
+ struct bch_io_opts opts;
+ int ret;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+
+ rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
+ opts);
+ rbio->bio.bi_private = &done;
+ rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
+
+ __bchfs_readfolio(c, rbio, inode_inum(inode), folio);
+ wait_for_completion(&done);
+
+ ret = blk_status_to_errno(rbio->bio.bi_status);
+ bio_put(&rbio->bio);
- rbio = container_of(bio_alloc_bioset(GFP_NOFS, 1,
- &c->bio_read),
- struct bch_read_bio, bio);
- rbio->bio.bi_end_io = bch2_readpages_end_io;
+ if (ret < 0)
+ return ret;
- __bchfs_readpage(c, rbio, inode->i_ino, page);
+ folio_mark_uptodate(folio);
return 0;
}
+int bch2_read_folio(struct file *file, struct folio *folio)
+{
+ int ret;
+
+ ret = bch2_read_single_folio(folio, folio->mapping);
+ folio_unlock(folio);
+ return bch2_err_class(ret);
+}
+
+/* writepages: */
+
struct bch_writepage_state {
struct bch_writepage_io *io;
+ struct bch_io_opts opts;
+ struct bch_folio_sector *tmp;
+ unsigned tmp_sectors;
};
-static void bch2_writepage_io_free(struct closure *cl)
+static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
+ struct bch_inode_info *inode)
{
- struct bch_writepage_io *io = container_of(cl,
- struct bch_writepage_io, cl);
- struct bio *bio = &io->bio.bio;
+ struct bch_writepage_state ret = { 0 };
- bio_put(bio);
+ bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
+ return ret;
}
-static void bch2_writepage_io_done(struct closure *cl)
+static void bch2_writepage_io_done(struct bch_write_op *op)
{
- struct bch_writepage_io *io = container_of(cl,
- struct bch_writepage_io, cl);
- struct bch_fs *c = io->op.op.c;
- struct bio *bio = &io->bio.bio;
- struct bio_vec *bvec;
+ struct bch_writepage_io *io =
+ container_of(op, struct bch_writepage_io, op);
+ struct bch_fs *c = io->op.c;
+ struct bio *bio = &io->op.wbio.bio;
+ struct bvec_iter_all iter;
+ struct folio_vec fv;
unsigned i;
- atomic_sub(bio->bi_vcnt, &c->writeback_pages);
- wake_up(&c->writeback_wait);
+ if (io->op.error) {
+ set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
- bio_for_each_segment_all(bvec, bio, i) {
- struct page *page = bvec->bv_page;
+ bio_for_each_folio_all(fv, bio, iter) {
+ struct bch_folio *s;
- if (io->op.op.error) {
- SetPageError(page);
- if (page->mapping)
- set_bit(AS_EIO, &page->mapping->flags);
- }
+ folio_set_error(fv.fv_folio);
+ mapping_set_error(fv.fv_folio->mapping, -EIO);
- if (io->op.op.written >= PAGE_SECTORS) {
- struct bch_page_state old, new;
+ s = __bch2_folio(fv.fv_folio);
+ spin_lock(&s->lock);
+ for (i = 0; i < folio_sectors(fv.fv_folio); i++)
+ s->s[i].nr_replicas = 0;
+ spin_unlock(&s->lock);
+ }
+ }
- old = page_state_cmpxchg(page_state(page), new, {
- new.sectors = PAGE_SECTORS;
- new.dirty_sectors = 0;
- });
+ if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
+ bio_for_each_folio_all(fv, bio, iter) {
+ struct bch_folio *s;
- io->op.sectors_added -= old.dirty_sectors;
- io->op.op.written -= PAGE_SECTORS;
+ s = __bch2_folio(fv.fv_folio);
+ spin_lock(&s->lock);
+ for (i = 0; i < folio_sectors(fv.fv_folio); i++)
+ s->s[i].nr_replicas = 0;
+ spin_unlock(&s->lock);
}
}
/*
* racing with fallocate can cause us to add fewer sectors than
* expected - but we shouldn't add more sectors than expected:
- *
+ */
+ WARN_ON_ONCE(io->op.i_sectors_delta > 0);
+
+ /*
* (error (due to going RO) halfway through a page can screw that up
* slightly)
+ * XXX wtf?
+ BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
*/
- BUG_ON(io->op.sectors_added >= (s64) PAGE_SECTORS);
/*
* PageWriteback is effectively our ref on the inode - fixup i_blocks
* before calling end_page_writeback:
*/
- if (io->op.sectors_added) {
- struct inode *inode = &io->op.ei->vfs_inode;
+ i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
- spin_lock(&inode->i_lock);
- inode->i_blocks += io->op.sectors_added;
- spin_unlock(&inode->i_lock);
- }
+ bio_for_each_folio_all(fv, bio, iter) {
+ struct bch_folio *s = __bch2_folio(fv.fv_folio);
- bio_for_each_segment_all(bvec, bio, i)
- end_page_writeback(bvec->bv_page);
+ if (atomic_dec_and_test(&s->write_count))
+ folio_end_writeback(fv.fv_folio);
+ }
- closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
+ bio_put(&io->op.wbio.bio);
}
static void bch2_writepage_do_io(struct bch_writepage_state *w)
struct bch_writepage_io *io = w->io;
w->io = NULL;
- atomic_add(io->bio.bio.bi_vcnt, &io->op.op.c->writeback_pages);
-
- io->op.op.pos.offset = io->bio.bio.bi_iter.bi_sector;
-
- closure_call(&io->op.op.cl, bch2_write, NULL, &io->cl);
- continue_at(&io->cl, bch2_writepage_io_done, NULL);
+ closure_call(&io->op.cl, bch2_write, NULL, NULL);
}
/*
* possible, else allocating a new one:
*/
static void bch2_writepage_io_alloc(struct bch_fs *c,
+ struct writeback_control *wbc,
struct bch_writepage_state *w,
- struct bch_inode_info *ei,
- struct page *page)
-{
- u64 inum = ei->vfs_inode.i_ino;
- unsigned nr_replicas = page_state(page)->nr_replicas;
-
- EBUG_ON(!nr_replicas);
- /* XXX: disk_reservation->gen isn't plumbed through */
-
- if (!w->io) {
-alloc_io:
- w->io = container_of(bio_alloc_bioset(GFP_NOFS,
- BIO_MAX_PAGES,
- bch2_writepage_bioset),
- struct bch_writepage_io, bio.bio);
-
- closure_init(&w->io->cl, NULL);
- w->io->op.ei = ei;
- w->io->op.sectors_added = 0;
- w->io->op.is_dio = false;
- bch2_write_op_init(&w->io->op.op, c, &w->io->bio,
- (struct disk_reservation) {
- .nr_replicas = c->opts.data_replicas,
- },
- foreground_write_point(c, inum),
- POS(inum, 0),
- &ei->journal_seq, 0);
- w->io->op.op.index_update_fn = bchfs_write_index_update;
- }
-
- if (w->io->op.op.res.nr_replicas != nr_replicas ||
- bio_add_page_contig(&w->io->bio.bio, page)) {
- bch2_writepage_do_io(w);
- goto alloc_io;
- }
-
- /*
- * We shouldn't ever be handed pages for multiple inodes in a single
- * pass - right?
- */
- BUG_ON(ei != w->io->op.ei);
-}
-
-static int __bch2_writepage(struct bch_fs *c, struct page *page,
+ struct bch_inode_info *inode,
+ u64 sector,
+ unsigned nr_replicas)
+{
+ struct bch_write_op *op;
+
+ w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
+ REQ_OP_WRITE,
+ GFP_NOFS,
+ &c->writepage_bioset),
+ struct bch_writepage_io, op.wbio.bio);
+
+ w->io->inode = inode;
+ op = &w->io->op;
+ bch2_write_op_init(op, c, w->opts);
+ op->target = w->opts.foreground_target;
+ op->nr_replicas = nr_replicas;
+ op->res.nr_replicas = nr_replicas;
+ op->write_point = writepoint_hashed(inode->ei_last_dirtied);
+ op->subvol = inode->ei_subvol;
+ op->pos = POS(inode->v.i_ino, sector);
+ op->end_io = bch2_writepage_io_done;
+ op->devs_need_flush = &inode->ei_devs_need_flush;
+ op->wbio.bio.bi_iter.bi_sector = sector;
+ op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
+}
+
+static int __bch2_writepage(struct page *_page,
struct writeback_control *wbc,
- struct bch_writepage_state *w)
-{
- struct inode *inode = page->mapping->host;
- struct bch_inode_info *ei = to_bch_ei(inode);
- struct bch_page_state new, old;
- unsigned offset;
- loff_t i_size = i_size_read(inode);
- pgoff_t end_index = i_size >> PAGE_SHIFT;
+ void *data)
+{
+ struct folio *folio = page_folio(_page);
+ struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_writepage_state *w = data;
+ struct bch_folio *s;
+ unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
+ loff_t i_size = i_size_read(&inode->v);
+ int ret;
- EBUG_ON(!PageUptodate(page));
+ EBUG_ON(!folio_test_uptodate(folio));
- /* Is the page fully inside i_size? */
- if (page->index < end_index)
+ /* Is the folio fully inside i_size? */
+ if (folio_end_pos(folio) <= i_size)
goto do_io;
- /* Is the page fully outside i_size? (truncate in progress) */
- offset = i_size & (PAGE_SIZE - 1);
- if (page->index > end_index || !offset) {
- unlock_page(page);
+ /* Is the folio fully outside i_size? (truncate in progress) */
+ if (folio_pos(folio) >= i_size) {
+ folio_unlock(folio);
return 0;
}
/*
- * The page straddles i_size. It must be zeroed out on each and every
+ * The folio straddles i_size. It must be zeroed out on each and every
* writepage invocation because it may be mmapped. "A file is mapped
- * in multiples of the page size. For a file that is not a multiple of
- * the page size, the remaining memory is zeroed when mapped, and
+ * in multiples of the folio size. For a file that is not a multiple of
+ * the folio size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- zero_user_segment(page, offset, PAGE_SIZE);
+ folio_zero_segment(folio,
+ i_size - folio_pos(folio),
+ folio_size(folio));
do_io:
- bch2_writepage_io_alloc(c, w, ei, page);
-
- /* while page is locked: */
- w->io->op.new_i_size = i_size;
+ f_sectors = folio_sectors(folio);
+ s = bch2_folio_create(folio, __GFP_NOFAIL);
+
+ if (f_sectors > w->tmp_sectors) {
+ kfree(w->tmp);
+ w->tmp = kzalloc(sizeof(struct bch_folio_sector) *
+ f_sectors, __GFP_NOFAIL);
+ w->tmp_sectors = f_sectors;
+ }
- if (wbc->sync_mode == WB_SYNC_ALL)
- w->io->bio.bio.bi_opf |= REQ_SYNC;
+ /*
+ * Things get really hairy with errors during writeback:
+ */
+ ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
+ BUG_ON(ret);
- /* Before unlocking the page, transfer reservation to w->io: */
- old = page_state_cmpxchg(page_state(page), new, {
- EBUG_ON(!new.reserved &&
- (new.sectors != PAGE_SECTORS ||
- !new.allocated));
+ /* Before unlocking the page, get copy of reservations: */
+ spin_lock(&s->lock);
+ memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
- if (new.allocated &&
- w->io->op.op.compression_type != BCH_COMPRESSION_NONE)
- new.allocated = 0;
- else if (!new.reserved)
- goto out;
- new.reserved = 0;
- });
+ for (i = 0; i < f_sectors; i++) {
+ if (s->s[i].state < SECTOR_dirty)
+ continue;
- w->io->op.op.res.sectors += PAGE_SECTORS *
- (old.reserved - new.reserved) *
- old.nr_replicas;
-out:
- BUG_ON(PageWriteback(page));
- set_page_writeback(page);
- unlock_page(page);
+ nr_replicas_this_write =
+ min_t(unsigned, nr_replicas_this_write,
+ s->s[i].nr_replicas +
+ s->s[i].replicas_reserved);
+ }
- return 0;
-}
+ for (i = 0; i < f_sectors; i++) {
+ if (s->s[i].state < SECTOR_dirty)
+ continue;
-int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
-{
- struct bch_fs *c = mapping->host->i_sb->s_fs_info;
- struct bch_writepage_state w = { NULL };
- struct pagecache_iter iter;
- struct page *page;
- int ret = 0;
- int done = 0;
- pgoff_t uninitialized_var(writeback_index);
- pgoff_t index;
- pgoff_t end; /* Inclusive */
- pgoff_t done_index;
- int cycled;
- int range_whole = 0;
- int tag;
-
- if (wbc->range_cyclic) {
- writeback_index = mapping->writeback_index; /* prev offset */
- index = writeback_index;
- if (index == 0)
- cycled = 1;
- else
- cycled = 0;
- end = -1;
- } else {
- index = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = 1;
- cycled = 1; /* ignore range_cyclic tests */
- }
- if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
- tag = PAGECACHE_TAG_TOWRITE;
- else
- tag = PAGECACHE_TAG_DIRTY;
-retry:
- if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
- tag_pages_for_writeback(mapping, index, end);
-
- done_index = index;
-get_pages:
- for_each_pagecache_tag(&iter, mapping, tag, index, end, page) {
- done_index = page->index;
-
- if (w.io &&
- !bio_can_add_page_contig(&w.io->bio.bio, page))
- bch2_writepage_do_io(&w);
-
- if (!w.io &&
- atomic_read(&c->writeback_pages) >=
- c->writeback_pages_max) {
- /* don't sleep with pages pinned: */
- pagecache_iter_release(&iter);
-
- __wait_event(c->writeback_wait,
- atomic_read(&c->writeback_pages) <
- c->writeback_pages_max);
- goto get_pages;
- }
+ s->s[i].nr_replicas = w->opts.compression
+ ? 0 : nr_replicas_this_write;
- lock_page(page);
+ s->s[i].replicas_reserved = 0;
+ folio_sector_set(folio, s, i, SECTOR_allocated);
+ }
+ spin_unlock(&s->lock);
- /*
- * Page truncated or invalidated. We can freely skip it
- * then, even for data integrity operations: the page
- * has disappeared concurrently, so there could be no
- * real expectation of this data interity operation
- * even if there is now a new, dirty page at the same
- * pagecache address.
- */
- if (unlikely(page->mapping != mapping)) {
-continue_unlock:
- unlock_page(page);
- continue;
- }
+ BUG_ON(atomic_read(&s->write_count));
+ atomic_set(&s->write_count, 1);
- if (!PageDirty(page)) {
- /* someone wrote it for us */
- goto continue_unlock;
- }
+ BUG_ON(folio_test_writeback(folio));
+ folio_start_writeback(folio);
- if (PageWriteback(page)) {
- if (wbc->sync_mode != WB_SYNC_NONE)
- wait_on_page_writeback(page);
- else
- goto continue_unlock;
- }
+ folio_unlock(folio);
- BUG_ON(PageWriteback(page));
- if (!clear_page_dirty_for_io(page))
- goto continue_unlock;
+ offset = 0;
+ while (1) {
+ unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
+ u64 sector;
- trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
- ret = __bch2_writepage(c, page, wbc, &w);
- if (unlikely(ret)) {
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
- unlock_page(page);
- ret = 0;
- } else {
- /*
- * done_index is set past this page,
- * so media errors will not choke
- * background writeout for the entire
- * file. This has consequences for
- * range_cyclic semantics (ie. it may
- * not be suitable for data integrity
- * writeout).
- */
- done_index = page->index + 1;
- done = 1;
- break;
- }
- }
+ while (offset < f_sectors &&
+ w->tmp[offset].state < SECTOR_dirty)
+ offset++;
- /*
- * We stop writing back only if we are not doing
- * integrity sync. In case of integrity sync we have to
- * keep going until we have written all the pages
- * we tagged for writeback prior to entering this loop.
- */
- if (--wbc->nr_to_write <= 0 &&
- wbc->sync_mode == WB_SYNC_NONE) {
- done = 1;
+ if (offset == f_sectors)
break;
+
+ while (offset + sectors < f_sectors &&
+ w->tmp[offset + sectors].state >= SECTOR_dirty) {
+ reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
+ dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
+ sectors++;
}
+ BUG_ON(!sectors);
+
+ sector = folio_sector(folio) + offset;
+
+ if (w->io &&
+ (w->io->op.res.nr_replicas != nr_replicas_this_write ||
+ bio_full(&w->io->op.wbio.bio, sectors << 9) ||
+ w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
+ (BIO_MAX_VECS * PAGE_SIZE) ||
+ bio_end_sector(&w->io->op.wbio.bio) != sector))
+ bch2_writepage_do_io(w);
+
+ if (!w->io)
+ bch2_writepage_io_alloc(c, wbc, w, inode, sector,
+ nr_replicas_this_write);
+
+ atomic_inc(&s->write_count);
+
+ BUG_ON(inode != w->io->inode);
+ BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
+ sectors << 9, offset << 9));
+
+ /* Check for writing past i_size: */
+ WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
+ round_up(i_size, block_bytes(c)) &&
+ !test_bit(BCH_FS_EMERGENCY_RO, &c->flags),
+ "writing past i_size: %llu > %llu (unrounded %llu)\n",
+ bio_end_sector(&w->io->op.wbio.bio) << 9,
+ round_up(i_size, block_bytes(c)),
+ i_size);
+
+ w->io->op.res.sectors += reserved_sectors;
+ w->io->op.i_sectors_delta -= dirty_sectors;
+ w->io->op.new_i_size = i_size;
+
+ offset += sectors;
}
- pagecache_iter_release(&iter);
-
- if (w.io)
- bch2_writepage_do_io(&w);
- if (!cycled && !done) {
- /*
- * range_cyclic:
- * We hit the last page and there is more work to be done: wrap
- * back to the start of the file
- */
- cycled = 1;
- index = 0;
- end = writeback_index - 1;
- goto retry;
- }
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
- mapping->writeback_index = done_index;
+ if (atomic_dec_and_test(&s->write_count))
+ folio_end_writeback(folio);
- return ret;
+ return 0;
}
-int bch2_writepage(struct page *page, struct writeback_control *wbc)
+int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
- struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
- struct bch_writepage_state w = { NULL };
+ struct bch_fs *c = mapping->host->i_sb->s_fs_info;
+ struct bch_writepage_state w =
+ bch_writepage_state_init(c, to_bch_ei(mapping->host));
+ struct blk_plug plug;
int ret;
- ret = __bch2_writepage(c, page, wbc, &w);
+ blk_start_plug(&plug);
+ ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
if (w.io)
bch2_writepage_do_io(&w);
-
- return ret;
+ blk_finish_plug(&plug);
+ kfree(w.tmp);
+ return bch2_err_class(ret);
}
-static void bch2_read_single_page_end_io(struct bio *bio)
-{
- complete(bio->bi_private);
-}
+/* buffered writes: */
-static int bch2_read_single_page(struct page *page,
- struct address_space *mapping)
+int bch2_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct page **pagep, void **fsdata)
{
- struct inode *inode = mapping->host;
- struct bch_fs *c = inode->i_sb->s_fs_info;
- struct bch_read_bio *rbio;
- int ret;
- DECLARE_COMPLETION_ONSTACK(done);
+ struct bch_inode_info *inode = to_bch_ei(mapping->host);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch2_folio_reservation *res;
+ struct folio *folio;
+ unsigned offset;
+ int ret = -ENOMEM;
- rbio = container_of(bio_alloc_bioset(GFP_NOFS, 1,
- &c->bio_read),
- struct bch_read_bio, bio);
- rbio->bio.bi_private = &done;
- rbio->bio.bi_end_io = bch2_read_single_page_end_io;
+ res = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
- __bchfs_readpage(c, rbio, inode->i_ino, page);
- wait_for_completion(&done);
+ bch2_folio_reservation_init(c, inode, res);
+ *fsdata = res;
- ret = rbio->bio.bi_error;
- bio_put(&rbio->bio);
+ bch2_pagecache_add_get(inode);
- if (ret < 0)
- return ret;
-
- SetPageUptodate(page);
- return 0;
-}
-
-int bch2_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
-{
- struct inode *inode = mapping->host;
- struct bch_fs *c = inode->i_sb->s_fs_info;
- pgoff_t index = pos >> PAGE_SHIFT;
- unsigned offset = pos & (PAGE_SIZE - 1);
- struct page *page;
- int ret = -ENOMEM;
-
- BUG_ON(inode_unhashed(mapping->host));
-
- /* Not strictly necessary - same reason as mkwrite(): */
- pagecache_add_get(&mapping->add_lock);
-
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
+ folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
+ FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
+ mapping_gfp_mask(mapping));
+ if (!folio)
goto err_unlock;
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
goto out;
- /* If we're writing entire page, don't need to read it in first: */
- if (len == PAGE_SIZE)
+ offset = pos - folio_pos(folio);
+ len = min_t(size_t, len, folio_end_pos(folio) - pos);
+
+ /* If we're writing entire folio, don't need to read it in first: */
+ if (!offset && len == folio_size(folio))
goto out;
- if (!offset && pos + len >= inode->i_size) {
- zero_user_segment(page, len, PAGE_SIZE);
- flush_dcache_page(page);
+ if (!offset && pos + len >= inode->v.i_size) {
+ folio_zero_segment(folio, len, folio_size(folio));
+ flush_dcache_folio(folio);
goto out;
}
- if (index > inode->i_size >> PAGE_SHIFT) {
- zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
- flush_dcache_page(page);
+ if (folio_pos(folio) >= inode->v.i_size) {
+ folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
+ flush_dcache_folio(folio);
goto out;
}
readpage:
- ret = bch2_read_single_page(page, mapping);
+ ret = bch2_read_single_folio(folio, mapping);
if (ret)
goto err;
out:
- ret = bch2_get_page_reservation(c, page, true);
+ if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate) {
+ ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
+ if (ret)
+ goto err;
+ }
+
+ ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
if (ret) {
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
/*
- * If the page hasn't been read in, we won't know if we
+ * If the folio hasn't been read in, we won't know if we
* actually need a reservation - we don't actually need
- * to read here, we just need to check if the page is
+ * to read here, we just need to check if the folio is
* fully backed by uncompressed data:
*/
goto readpage;
goto err;
}
- *pagep = page;
+ *pagep = &folio->page;
return 0;
err:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
*pagep = NULL;
err_unlock:
- pagecache_add_put(&mapping->add_lock);
- return ret;
+ bch2_pagecache_add_put(inode);
+ kfree(res);
+ *fsdata = NULL;
+ return bch2_err_class(ret);
}
-int bch2_write_end(struct file *filp, struct address_space *mapping,
+int bch2_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
- struct inode *inode = page->mapping->host;
- struct bch_fs *c = inode->i_sb->s_fs_info;
+ struct bch_inode_info *inode = to_bch_ei(mapping->host);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch2_folio_reservation *res = fsdata;
+ struct folio *folio = page_folio(page);
+ unsigned offset = pos - folio_pos(folio);
- lockdep_assert_held(&inode->i_rwsem);
+ lockdep_assert_held(&inode->v.i_rwsem);
+ BUG_ON(offset + copied > folio_size(folio));
- if (unlikely(copied < len && !PageUptodate(page))) {
+ if (unlikely(copied < len && !folio_test_uptodate(folio))) {
/*
- * The page needs to be read in, but that would destroy
+ * The folio needs to be read in, but that would destroy
* our partial write - simplest thing is to just force
* userspace to redo the write:
*/
- zero_user(page, 0, PAGE_SIZE);
- flush_dcache_page(page);
+ folio_zero_range(folio, 0, folio_size(folio));
+ flush_dcache_folio(folio);
copied = 0;
}
- if (pos + copied > inode->i_size)
- i_size_write(inode, pos + copied);
+ spin_lock(&inode->v.i_lock);
+ if (pos + copied > inode->v.i_size)
+ i_size_write(&inode->v, pos + copied);
+ spin_unlock(&inode->v.i_lock);
if (copied) {
- if (!PageUptodate(page))
- SetPageUptodate(page);
- if (!PageDirty(page))
- set_page_dirty(page);
- } else {
- bch2_put_page_reservation(c, page);
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+
+ bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
+
+ inode->ei_last_dirtied = (unsigned long) current;
}
- unlock_page(page);
- put_page(page);
- pagecache_add_put(&mapping->add_lock);
+ folio_unlock(folio);
+ folio_put(folio);
+ bch2_pagecache_add_put(inode);
+
+ bch2_folio_reservation_put(c, inode, res);
+ kfree(res);
return copied;
}
-/* O_DIRECT */
+static noinline void folios_trunc(folios *folios, struct folio **fi)
+{
+ while (folios->data + folios->nr > fi) {
+ struct folio *f = darray_pop(folios);
+
+ folio_unlock(f);
+ folio_put(f);
+ }
+}
+
+static int __bch2_buffered_write(struct bch_inode_info *inode,
+ struct address_space *mapping,
+ struct iov_iter *iter,
+ loff_t pos, unsigned len)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch2_folio_reservation res;
+ folios folios;
+ struct folio **fi, *f;
+ unsigned copied = 0, f_offset;
+ loff_t end = pos + len, f_pos;
+ loff_t last_folio_pos = inode->v.i_size;
+ int ret = 0;
+
+ BUG_ON(!len);
+
+ bch2_folio_reservation_init(c, inode, &res);
+ darray_init(&folios);
+
+ ret = filemap_get_contig_folios_d(mapping, pos, end,
+ FGP_LOCK|FGP_WRITE|FGP_STABLE|FGP_CREAT,
+ mapping_gfp_mask(mapping),
+ &folios);
+ if (ret)
+ goto out;
+
+ BUG_ON(!folios.nr);
+
+ f = darray_first(folios);
+ if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
+ ret = bch2_read_single_folio(f, mapping);
+ if (ret)
+ goto out;
+ }
+
+ f = darray_last(folios);
+ end = min(end, folio_end_pos(f));
+ last_folio_pos = folio_pos(f);
+ if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
+ if (end >= inode->v.i_size) {
+ folio_zero_range(f, 0, folio_size(f));
+ } else {
+ ret = bch2_read_single_folio(f, mapping);
+ if (ret)
+ goto out;
+ }
+ }
+
+ f_pos = pos;
+ f_offset = pos - folio_pos(darray_first(folios));
+ darray_for_each(folios, fi) {
+ struct folio *f = *fi;
+ unsigned f_len = min(end, folio_end_pos(f)) - f_pos;
+
+ if (!bch2_folio_create(f, __GFP_NOFAIL)->uptodate) {
+ ret = bch2_folio_set(c, inode_inum(inode), fi,
+ folios.data + folios.nr - fi);
+ if (ret)
+ goto out;
+ }
+
+ /*
+ * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
+ * supposed to write as much as we have disk space for.
+ *
+ * On failure here we should still write out a partial page if
+ * we aren't completely out of disk space - we don't do that
+ * yet:
+ */
+ ret = bch2_folio_reservation_get(c, inode, f, &res, f_offset, f_len);
+ if (unlikely(ret)) {
+ folios_trunc(&folios, fi);
+ if (!folios.nr)
+ goto out;
+
+ end = min(end, folio_end_pos(darray_last(folios)));
+ break;
+ }
+
+ f_pos = folio_end_pos(f);
+ f_offset = 0;
+ }
+
+ if (mapping_writably_mapped(mapping))
+ darray_for_each(folios, fi)
+ flush_dcache_folio(*fi);
+
+ f_pos = pos;
+ f_offset = pos - folio_pos(darray_first(folios));
+ darray_for_each(folios, fi) {
+ struct folio *f = *fi;
+ unsigned f_len = min(end, folio_end_pos(f)) - f_pos;
+ unsigned f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
+
+ if (!f_copied) {
+ folios_trunc(&folios, fi);
+ break;
+ }
+
+ if (!folio_test_uptodate(f) &&
+ f_copied != folio_size(f) &&
+ pos + copied + f_copied < inode->v.i_size) {
+ folio_zero_range(f, 0, folio_size(f));
+ folios_trunc(&folios, fi);
+ break;
+ }
+
+ flush_dcache_folio(f);
+ copied += f_copied;
+
+ if (f_copied != f_len) {
+ folios_trunc(&folios, fi + 1);
+ break;
+ }
+
+ f_pos = folio_end_pos(f);
+ f_offset = 0;
+ }
+
+ if (!copied)
+ goto out;
+
+ end = pos + copied;
+
+ spin_lock(&inode->v.i_lock);
+ if (end > inode->v.i_size)
+ i_size_write(&inode->v, end);
+ spin_unlock(&inode->v.i_lock);
+
+ f_pos = pos;
+ f_offset = pos - folio_pos(darray_first(folios));
+ darray_for_each(folios, fi) {
+ struct folio *f = *fi;
+ unsigned f_len = min(end, folio_end_pos(f)) - f_pos;
+
+ if (!folio_test_uptodate(f))
+ folio_mark_uptodate(f);
+
+ bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
+
+ f_pos = folio_end_pos(f);
+ f_offset = 0;
+ }
+
+ inode->ei_last_dirtied = (unsigned long) current;
+out:
+ darray_for_each(folios, fi) {
+ folio_unlock(*fi);
+ folio_put(*fi);
+ }
+
+ /*
+ * If the last folio added to the mapping starts beyond current EOF, we
+ * performed a short write but left around at least one post-EOF folio.
+ * Clean up the mapping before we return.
+ */
+ if (last_folio_pos >= inode->v.i_size)
+ truncate_pagecache(&inode->v, inode->v.i_size);
+
+ darray_exit(&folios);
+ bch2_folio_reservation_put(c, inode, &res);
+
+ return copied ?: ret;
+}
+
+static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct file *file = iocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
+ struct bch_inode_info *inode = file_bch_inode(file);
+ loff_t pos = iocb->ki_pos;
+ ssize_t written = 0;
+ int ret = 0;
+
+ bch2_pagecache_add_get(inode);
+
+ do {
+ unsigned offset = pos & (PAGE_SIZE - 1);
+ unsigned bytes = iov_iter_count(iter);
+again:
+ /*
+ * Bring in the user page that we will copy from _first_.
+ * Otherwise there's a nasty deadlock on copying from the
+ * same page as we're writing to, without it being marked
+ * up-to-date.
+ *
+ * Not only is this an optimisation, but it is also required
+ * to check that the address is actually valid, when atomic
+ * usercopies are used, below.
+ */
+ if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
+ bytes = min_t(unsigned long, iov_iter_count(iter),
+ PAGE_SIZE - offset);
+
+ if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
+ ret = -EFAULT;
+ break;
+ }
+ }
+
+ if (unlikely(fatal_signal_pending(current))) {
+ ret = -EINTR;
+ break;
+ }
+
+ ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
+ if (unlikely(ret < 0))
+ break;
+
+ cond_resched();
+
+ if (unlikely(ret == 0)) {
+ /*
+ * If we were unable to copy any data at all, we must
+ * fall back to a single segment length write.
+ *
+ * If we didn't fallback here, we could livelock
+ * because not all segments in the iov can be copied at
+ * once without a pagefault.
+ */
+ bytes = min_t(unsigned long, PAGE_SIZE - offset,
+ iov_iter_single_seg_count(iter));
+ goto again;
+ }
+ pos += ret;
+ written += ret;
+ ret = 0;
+
+ balance_dirty_pages_ratelimited(mapping);
+ } while (iov_iter_count(iter));
+
+ bch2_pagecache_add_put(inode);
+
+ return written ? written : ret;
+}
+
+/* O_DIRECT reads */
+
+static void bio_check_or_release(struct bio *bio, bool check_dirty)
+{
+ if (check_dirty) {
+ bio_check_pages_dirty(bio);
+ } else {
+ bio_release_pages(bio, false);
+ bio_put(bio);
+ }
+}
static void bch2_dio_read_complete(struct closure *cl)
{
struct dio_read *dio = container_of(cl, struct dio_read, cl);
- dio->req->ki_complete(dio->req, dio->ret, 0);
- bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
+ dio->req->ki_complete(dio->req, dio->ret);
+ bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
}
static void bch2_direct_IO_read_endio(struct bio *bio)
{
struct dio_read *dio = bio->bi_private;
- if (bio->bi_error)
- dio->ret = bio->bi_error;
+ if (bio->bi_status)
+ dio->ret = blk_status_to_errno(bio->bi_status);
closure_put(&dio->cl);
}
static void bch2_direct_IO_read_split_endio(struct bio *bio)
{
+ struct dio_read *dio = bio->bi_private;
+ bool should_dirty = dio->should_dirty;
+
bch2_direct_IO_read_endio(bio);
- bio_check_pages_dirty(bio); /* transfers ownership */
+ bio_check_or_release(bio, should_dirty);
}
-static int bch2_direct_IO_read(struct bch_fs *c, struct kiocb *req,
- struct file *file, struct inode *inode,
- struct iov_iter *iter, loff_t offset)
+static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
{
+ struct file *file = req->ki_filp;
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_io_opts opts;
struct dio_read *dio;
struct bio *bio;
+ loff_t offset = req->ki_pos;
bool sync = is_sync_kiocb(req);
+ size_t shorten;
ssize_t ret;
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+
if ((offset|iter->count) & (block_bytes(c) - 1))
return -EINVAL;
ret = min_t(loff_t, iter->count,
- max_t(loff_t, 0, i_size_read(inode) - offset));
- iov_iter_truncate(iter, round_up(ret, block_bytes(c)));
+ max_t(loff_t, 0, i_size_read(&inode->v) - offset));
if (!ret)
return ret;
- bio = bio_alloc_bioset(GFP_KERNEL,
- iov_iter_npages(iter, BIO_MAX_PAGES),
- bch2_dio_read_bioset);
+ shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
+ iter->count -= shorten;
+
+ bio = bio_alloc_bioset(NULL,
+ bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+ REQ_OP_READ,
+ GFP_KERNEL,
+ &c->dio_read_bioset);
bio->bi_end_io = bch2_direct_IO_read_endio;
dio->req = req;
dio->ret = ret;
+ /*
+ * This is one of the sketchier things I've encountered: we have to skip
+ * the dirtying of requests that are internal from the kernel (i.e. from
+ * loopback), because we'll deadlock on page_lock.
+ */
+ dio->should_dirty = iter_is_iovec(iter);
goto start;
while (iter->count) {
- bio = bio_alloc_bioset(GFP_KERNEL,
- iov_iter_npages(iter, BIO_MAX_PAGES),
+ bio = bio_alloc_bioset(NULL,
+ bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+ REQ_OP_READ,
+ GFP_KERNEL,
&c->bio_read);
bio->bi_end_io = bch2_direct_IO_read_split_endio;
start:
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
+ bio->bi_opf = REQ_OP_READ|REQ_SYNC;
bio->bi_iter.bi_sector = offset >> 9;
bio->bi_private = dio;
ret = bio_iov_iter_get_pages(bio, iter);
if (ret < 0) {
/* XXX: fault inject this path */
- bio->bi_error = ret;
+ bio->bi_status = BLK_STS_RESOURCE;
bio_endio(bio);
break;
}
offset += bio->bi_iter.bi_size;
- bio_set_pages_dirty(bio);
+
+ if (dio->should_dirty)
+ bio_set_pages_dirty(bio);
if (iter->count)
closure_get(&dio->cl);
- bch2_read(c, container_of(bio,
- struct bch_read_bio, bio),
- inode->i_ino);
+ bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
}
+ iter->count += shorten;
+
if (sync) {
closure_sync(&dio->cl);
closure_debug_destroy(&dio->cl);
ret = dio->ret;
- bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
+ bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
return ret;
} else {
return -EIOCBQUEUED;
}
}
-static long __bch2_dio_write_complete(struct dio_write *dio)
+ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
- struct file *file = dio->req->ki_filp;
+ struct file *file = iocb->ki_filp;
+ struct bch_inode_info *inode = file_bch_inode(file);
struct address_space *mapping = file->f_mapping;
- struct inode *inode = file->f_inode;
- long ret = dio->error ?: dio->written;
+ size_t count = iov_iter_count(iter);
+ ssize_t ret;
- bch2_disk_reservation_put(dio->c, &dio->res);
+ if (!count)
+ return 0; /* skip atime */
- __pagecache_block_put(&mapping->add_lock);
- inode_dio_end(inode);
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ struct blk_plug plug;
- if (dio->iovec && dio->iovec != dio->inline_vecs)
- kfree(dio->iovec);
+ if (unlikely(mapping->nrpages)) {
+ ret = filemap_write_and_wait_range(mapping,
+ iocb->ki_pos,
+ iocb->ki_pos + count - 1);
+ if (ret < 0)
+ goto out;
+ }
- bio_put(&dio->bio.bio);
- return ret;
-}
+ file_accessed(file);
-static void bch2_dio_write_complete(struct closure *cl)
-{
- struct dio_write *dio = container_of(cl, struct dio_write, cl);
- struct kiocb *req = dio->req;
+ blk_start_plug(&plug);
+ ret = bch2_direct_IO_read(iocb, iter);
+ blk_finish_plug(&plug);
- req->ki_complete(req, __bch2_dio_write_complete(dio), 0);
+ if (ret >= 0)
+ iocb->ki_pos += ret;
+ } else {
+ bch2_pagecache_add_get(inode);
+ ret = generic_file_read_iter(iocb, iter);
+ bch2_pagecache_add_put(inode);
+ }
+out:
+ return bch2_err_class(ret);
}
-static void bch2_dio_write_done(struct dio_write *dio)
+/* O_DIRECT writes */
+
+static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
+ u64 offset, u64 size,
+ unsigned nr_replicas, bool compressed)
{
- struct bio_vec *bv;
- int i;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u64 end = offset + size;
+ u32 snapshot;
+ bool ret = true;
+ int err;
- dio->written += dio->iop.op.written << 9;
+ bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
- if (dio->iop.op.error)
- dio->error = dio->iop.op.error;
+ err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
+ if (err)
+ goto err;
- bio_for_each_segment_all(bv, &dio->bio.bio, i)
- put_page(bv->bv_page);
+ for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
+ SPOS(inum.inum, offset, snapshot),
+ BTREE_ITER_SLOTS, k, err) {
+ if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
+ break;
- if (dio->iter.count)
- bio_reset(&dio->bio.bio);
+ if (k.k->p.snapshot != snapshot ||
+ nr_replicas > bch2_bkey_replicas(c, k) ||
+ (!compressed && bch2_bkey_sectors_compressed(k))) {
+ ret = false;
+ break;
+ }
+ }
+
+ offset = iter.pos.offset;
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(err, BCH_ERR_transaction_restart))
+ goto retry;
+ bch2_trans_exit(&trans);
+
+ return err ? false : ret;
}
-static void bch2_do_direct_IO_write(struct dio_write *dio)
+static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
{
- struct file *file = dio->req->ki_filp;
- struct inode *inode = file->f_inode;
- struct bch_inode_info *ei = to_bch_ei(inode);
- struct bio *bio = &dio->bio.bio;
- unsigned flags = 0;
- int ret;
-
- if ((dio->req->ki_flags & IOCB_DSYNC) &&
- !dio->c->opts.journal_flush_disabled)
- flags |= BCH_WRITE_FLUSH;
+ struct bch_fs *c = dio->op.c;
+ struct bch_inode_info *inode = dio->inode;
+ struct bio *bio = &dio->op.wbio.bio;
- bio->bi_iter.bi_sector = (dio->offset + dio->written) >> 9;
+ return bch2_check_range_allocated(c, inode_inum(inode),
+ dio->op.pos.offset, bio_sectors(bio),
+ dio->op.opts.data_replicas,
+ dio->op.opts.compression != 0);
+}
- ret = bio_iov_iter_get_pages(bio, &dio->iter);
- if (ret < 0) {
- /*
- * these didn't get initialized, but bch2_dio_write_done() will
- * look at them:
- */
- dio->iop.op.error = 0;
- dio->iop.op.written = 0;
- dio->error = ret;
- return;
- }
+static void bch2_dio_write_loop_async(struct bch_write_op *);
+static __always_inline long bch2_dio_write_done(struct dio_write *dio);
- dio->iop.ei = ei;
- dio->iop.sectors_added = 0;
- dio->iop.is_dio = true;
- dio->iop.new_i_size = U64_MAX;
- bch2_write_op_init(&dio->iop.op, dio->c, &dio->bio,
- dio->res,
- foreground_write_point(dio->c, inode->i_ino),
- POS(inode->i_ino, bio->bi_iter.bi_sector),
- &ei->journal_seq, flags);
- dio->iop.op.index_update_fn = bchfs_write_index_update;
+static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
+{
+ struct iovec *iov = dio->inline_vecs;
- dio->res.sectors -= bio_sectors(bio);
- dio->iop.op.res.sectors = bio_sectors(bio);
+ if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
+ iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
+ GFP_KERNEL);
+ if (unlikely(!iov))
+ return -ENOMEM;
- task_io_account_write(bio->bi_iter.bi_size);
+ dio->free_iov = true;
+ }
- closure_call(&dio->iop.op.cl, bch2_write, NULL, &dio->cl);
+ memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
+ dio->iter.iov = iov;
+ return 0;
}
-static void bch2_dio_write_loop_async(struct closure *cl)
+static void bch2_dio_write_flush_done(struct closure *cl)
{
- struct dio_write *dio =
- container_of(cl, struct dio_write, cl);
- struct address_space *mapping = dio->req->ki_filp->f_mapping;
+ struct dio_write *dio = container_of(cl, struct dio_write, op.cl);
+ struct bch_fs *c = dio->op.c;
+
+ closure_debug_destroy(cl);
+
+ dio->op.error = bch2_journal_error(&c->journal);
bch2_dio_write_done(dio);
+}
- if (dio->iter.count && !dio->error) {
- use_mm(dio->mm);
- pagecache_block_get(&mapping->add_lock);
+static noinline void bch2_dio_write_flush(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct bch_inode_unpacked inode;
+ int ret;
- bch2_do_direct_IO_write(dio);
+ dio->flush = 0;
- pagecache_block_put(&mapping->add_lock);
- unuse_mm(dio->mm);
+ closure_init(&dio->op.cl, NULL);
- continue_at(&dio->cl, bch2_dio_write_loop_async, NULL);
+ if (!dio->op.error) {
+ ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
+ if (ret) {
+ dio->op.error = ret;
+ } else {
+ bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq, &dio->op.cl);
+ bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
+ }
+ }
+
+ if (dio->sync) {
+ closure_sync(&dio->op.cl);
+ closure_debug_destroy(&dio->op.cl);
} else {
-#if 0
- closure_return_with_destructor(cl, bch2_dio_write_complete);
-#else
- closure_debug_destroy(cl);
- bch2_dio_write_complete(cl);
-#endif
+ continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
}
}
-static int bch2_direct_IO_write(struct bch_fs *c, struct kiocb *req,
- struct file *file, struct inode *inode,
- struct iov_iter *iter, loff_t offset)
+static __always_inline long bch2_dio_write_done(struct dio_write *dio)
{
- struct address_space *mapping = file->f_mapping;
- struct dio_write *dio;
- struct bio *bio;
- ssize_t ret;
- bool sync = is_sync_kiocb(req);
+ struct kiocb *req = dio->req;
+ struct bch_inode_info *inode = dio->inode;
+ bool sync = dio->sync;
+ long ret;
+
+ if (unlikely(dio->flush)) {
+ bch2_dio_write_flush(dio);
+ if (!sync)
+ return -EIOCBQUEUED;
+ }
- lockdep_assert_held(&inode->i_rwsem);
+ bch2_pagecache_block_put(inode);
- if (unlikely(!iter->count))
- return 0;
+ if (dio->free_iov)
+ kfree(dio->iter.iov);
- if (unlikely((offset|iter->count) & (block_bytes(c) - 1)))
- return -EINVAL;
+ ret = dio->op.error ?: ((long) dio->written << 9);
+ bio_put(&dio->op.wbio.bio);
- bio = bio_alloc_bioset(GFP_KERNEL,
- iov_iter_npages(iter, BIO_MAX_PAGES),
- bch2_dio_write_bioset);
- dio = container_of(bio, struct dio_write, bio.bio);
- dio->req = req;
- dio->c = c;
- dio->written = 0;
- dio->error = 0;
- dio->offset = offset;
- dio->iovec = NULL;
- dio->iter = *iter;
- dio->mm = current->mm;
- closure_init(&dio->cl, NULL);
+ /* inode->i_dio_count is our ref on inode and thus bch_fs */
+ inode_dio_end(&inode->v);
- if (offset + iter->count > inode->i_size)
- sync = true;
+ if (ret < 0)
+ ret = bch2_err_class(ret);
- /*
- * XXX: we shouldn't return -ENOSPC if we're overwriting existing data -
- * if getting a reservation fails we should check if we are doing an
- * overwrite.
- *
- * Have to then guard against racing with truncate (deleting data that
- * we would have been overwriting)
- */
- ret = bch2_disk_reservation_get(c, &dio->res, iter->count >> 9, 0);
- if (unlikely(ret)) {
- closure_debug_destroy(&dio->cl);
- bio_put(bio);
- return ret;
+ if (!sync) {
+ req->ki_complete(req, ret);
+ ret = -EIOCBQUEUED;
}
+ return ret;
+}
- inode_dio_begin(inode);
- __pagecache_block_get(&mapping->add_lock);
+static __always_inline void bch2_dio_write_end(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct kiocb *req = dio->req;
+ struct bch_inode_info *inode = dio->inode;
+ struct bio *bio = &dio->op.wbio.bio;
- if (sync) {
- do {
- bch2_do_direct_IO_write(dio);
+ req->ki_pos += (u64) dio->op.written << 9;
+ dio->written += dio->op.written;
- closure_sync(&dio->cl);
- bch2_dio_write_done(dio);
- } while (dio->iter.count && !dio->error);
+ if (dio->extending) {
+ spin_lock(&inode->v.i_lock);
+ if (req->ki_pos > inode->v.i_size)
+ i_size_write(&inode->v, req->ki_pos);
+ spin_unlock(&inode->v.i_lock);
+ }
- closure_debug_destroy(&dio->cl);
- return __bch2_dio_write_complete(dio);
- } else {
- bch2_do_direct_IO_write(dio);
-
- if (dio->iter.count && !dio->error) {
- if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
- dio->iovec = kmalloc(dio->iter.nr_segs *
- sizeof(struct iovec),
- GFP_KERNEL);
- if (!dio->iovec)
- dio->error = -ENOMEM;
- } else {
- dio->iovec = dio->inline_vecs;
- }
+ if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
+ mutex_lock(&inode->ei_quota_lock);
+ __i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
+ __bch2_quota_reservation_put(c, inode, &dio->quota_res);
+ mutex_unlock(&inode->ei_quota_lock);
+ }
- memcpy(dio->iovec,
- dio->iter.iov,
- dio->iter.nr_segs * sizeof(struct iovec));
- dio->iter.iov = dio->iovec;
- }
+ if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF))) {
+ struct bvec_iter_all iter;
+ struct folio_vec fv;
- continue_at_noreturn(&dio->cl, bch2_dio_write_loop_async, NULL);
- return -EIOCBQUEUED;
+ bio_for_each_folio_all(fv, bio, iter)
+ folio_put(fv.fv_folio);
}
+
+ if (unlikely(dio->op.error))
+ set_bit(EI_INODE_ERROR, &inode->ei_flags);
}
-ssize_t bch2_direct_IO(struct kiocb *req, struct iov_iter *iter)
+static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
{
- struct file *file = req->ki_filp;
- struct inode *inode = file->f_inode;
- struct bch_fs *c = inode->i_sb->s_fs_info;
- struct blk_plug plug;
- ssize_t ret;
+ struct bch_fs *c = dio->op.c;
+ struct kiocb *req = dio->req;
+ struct address_space *mapping = dio->mapping;
+ struct bch_inode_info *inode = dio->inode;
+ struct bch_io_opts opts;
+ struct bio *bio = &dio->op.wbio.bio;
+ unsigned unaligned, iter_count;
+ bool sync = dio->sync, dropped_locks;
+ long ret;
- blk_start_plug(&plug);
- ret = ((iov_iter_rw(iter) == WRITE)
- ? bch2_direct_IO_write
- : bch2_direct_IO_read)(c, req, file, inode, iter, req->ki_pos);
- blk_finish_plug(&plug);
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
- return ret;
-}
+ while (1) {
+ iter_count = dio->iter.count;
-static ssize_t
-bch2_direct_write(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_inode;
- struct bch_fs *c = inode->i_sb->s_fs_info;
- struct address_space *mapping = file->f_mapping;
- loff_t pos = iocb->ki_pos;
- ssize_t ret;
+ EBUG_ON(current->faults_disabled_mapping);
+ current->faults_disabled_mapping = mapping;
- pagecache_block_get(&mapping->add_lock);
+ ret = bio_iov_iter_get_pages(bio, &dio->iter);
- /* Write and invalidate pagecache range that we're writing to: */
- ret = write_invalidate_inode_pages_range(file->f_mapping, pos,
- pos + iov_iter_count(iter) - 1);
- if (unlikely(ret))
- goto err;
+ dropped_locks = fdm_dropped_locks();
+
+ current->faults_disabled_mapping = NULL;
+
+ /*
+ * If the fault handler returned an error but also signalled
+ * that it dropped & retook ei_pagecache_lock, we just need to
+ * re-shoot down the page cache and retry:
+ */
+ if (dropped_locks && ret)
+ ret = 0;
+
+ if (unlikely(ret < 0))
+ goto err;
+
+ if (unlikely(dropped_locks)) {
+ ret = write_invalidate_inode_pages_range(mapping,
+ req->ki_pos,
+ req->ki_pos + iter_count - 1);
+ if (unlikely(ret))
+ goto err;
+
+ if (!bio->bi_iter.bi_size)
+ continue;
+ }
+
+ unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
+ bio->bi_iter.bi_size -= unaligned;
+ iov_iter_revert(&dio->iter, unaligned);
- ret = bch2_direct_IO_write(c, iocb, file, inode, iter, pos);
+ if (!bio->bi_iter.bi_size) {
+ /*
+ * bio_iov_iter_get_pages was only able to get <
+ * blocksize worth of pages:
+ */
+ ret = -EFAULT;
+ goto err;
+ }
+
+ bch2_write_op_init(&dio->op, c, opts);
+ dio->op.end_io = sync
+ ? NULL
+ : bch2_dio_write_loop_async;
+ dio->op.target = dio->op.opts.foreground_target;
+ dio->op.write_point = writepoint_hashed((unsigned long) current);
+ dio->op.nr_replicas = dio->op.opts.data_replicas;
+ dio->op.subvol = inode->ei_subvol;
+ dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
+ dio->op.devs_need_flush = &inode->ei_devs_need_flush;
+
+ if (sync)
+ dio->op.flags |= BCH_WRITE_SYNC;
+ dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
+
+ ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
+ bio_sectors(bio), true);
+ if (unlikely(ret))
+ goto err;
+
+ ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
+ dio->op.opts.data_replicas, 0);
+ if (unlikely(ret) &&
+ !bch2_dio_write_check_allocated(dio))
+ goto err;
+
+ task_io_account_write(bio->bi_iter.bi_size);
+
+ if (unlikely(dio->iter.count) &&
+ !dio->sync &&
+ !dio->loop &&
+ bch2_dio_write_copy_iov(dio))
+ dio->sync = sync = true;
+
+ dio->loop = true;
+ closure_call(&dio->op.cl, bch2_write, NULL, NULL);
+
+ if (!sync)
+ return -EIOCBQUEUED;
+
+ bch2_dio_write_end(dio);
+
+ if (likely(!dio->iter.count) || dio->op.error)
+ break;
+
+ bio_reset(bio, NULL, REQ_OP_WRITE);
+ }
+out:
+ return bch2_dio_write_done(dio);
err:
- pagecache_block_put(&mapping->add_lock);
+ dio->op.error = ret;
- return ret;
+ if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
+ struct bvec_iter_all iter;
+ struct folio_vec fv;
+
+ bio_for_each_folio_all(fv, bio, iter)
+ folio_put(fv.fv_folio);
+ }
+
+ bch2_quota_reservation_put(c, inode, &dio->quota_res);
+ goto out;
}
-static ssize_t __bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
+static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
{
- struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- ssize_t ret;
+ struct mm_struct *mm = dio->mm;
- /* We can write back this queue in page reclaim */
- current->backing_dev_info = inode_to_bdi(inode);
- ret = file_remove_privs(file);
- if (ret)
- goto out;
+ bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
- ret = file_update_time(file);
- if (ret)
- goto out;
+ if (mm)
+ kthread_use_mm(mm);
+ bch2_dio_write_loop(dio);
+ if (mm)
+ kthread_unuse_mm(mm);
+}
- ret = iocb->ki_flags & IOCB_DIRECT
- ? bch2_direct_write(iocb, from)
- : generic_perform_write(file, from, iocb->ki_pos);
+static void bch2_dio_write_loop_async(struct bch_write_op *op)
+{
+ struct dio_write *dio = container_of(op, struct dio_write, op);
- if (likely(ret > 0))
- iocb->ki_pos += ret;
-out:
- current->backing_dev_info = NULL;
- return ret;
+ bch2_dio_write_end(dio);
+
+ if (likely(!dio->iter.count) || dio->op.error)
+ bch2_dio_write_done(dio);
+ else
+ bch2_dio_write_continue(dio);
}
-ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
+static noinline
+ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- bool direct = iocb->ki_flags & IOCB_DIRECT;
+ struct file *file = req->ki_filp;
+ struct address_space *mapping = file->f_mapping;
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct dio_write *dio;
+ struct bio *bio;
+ bool locked = true, extending;
ssize_t ret;
- inode_lock(inode);
- ret = generic_write_checks(iocb, from);
- if (ret > 0)
- ret = __bch2_write_iter(iocb, from);
- inode_unlock(inode);
+ prefetch(&c->opts);
+ prefetch((void *) &c->opts + 64);
+ prefetch(&inode->ei_inode);
+ prefetch((void *) &inode->ei_inode + 64);
- if (ret > 0 && !direct)
- ret = generic_write_sync(iocb, ret);
+ inode_lock(&inode->v);
- return ret;
-}
+ ret = generic_write_checks(req, iter);
+ if (unlikely(ret <= 0))
+ goto err;
-int bch2_page_mkwrite(struct vm_fault *vmf)
-{
- struct page *page = vmf->page;
- struct file *file = vmf->vma->vm_file;
- struct inode *inode = file_inode(file);
- struct address_space *mapping = inode->i_mapping;
- struct bch_fs *c = inode->i_sb->s_fs_info;
- int ret = VM_FAULT_LOCKED;
+ ret = file_remove_privs(file);
+ if (unlikely(ret))
+ goto err;
- sb_start_pagefault(inode->i_sb);
- file_update_time(file);
+ ret = file_update_time(file);
+ if (unlikely(ret))
+ goto err;
- /*
- * Not strictly necessary, but helps avoid dio writes livelocking in
- * write_invalidate_inode_pages_range() - can drop this if/when we get
- * a write_invalidate_inode_pages_range() that works without dropping
- * page lock before invalidating page
- */
- if (current->pagecache_lock != &mapping->add_lock)
- pagecache_add_get(&mapping->add_lock);
+ if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
+ goto err;
- lock_page(page);
- if (page->mapping != mapping ||
- page_offset(page) > i_size_read(inode)) {
- unlock_page(page);
- ret = VM_FAULT_NOPAGE;
- goto out;
+ inode_dio_begin(&inode->v);
+ bch2_pagecache_block_get(inode);
+
+ extending = req->ki_pos + iter->count > inode->v.i_size;
+ if (!extending) {
+ inode_unlock(&inode->v);
+ locked = false;
}
- if (bch2_get_page_reservation(c, page, true)) {
- unlock_page(page);
- ret = VM_FAULT_SIGBUS;
- goto out;
+ bio = bio_alloc_bioset(NULL,
+ bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+ REQ_OP_WRITE,
+ GFP_KERNEL,
+ &c->dio_write_bioset);
+ dio = container_of(bio, struct dio_write, op.wbio.bio);
+ dio->req = req;
+ dio->mapping = mapping;
+ dio->inode = inode;
+ dio->mm = current->mm;
+ dio->loop = false;
+ dio->extending = extending;
+ dio->sync = is_sync_kiocb(req) || extending;
+ dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
+ dio->free_iov = false;
+ dio->quota_res.sectors = 0;
+ dio->written = 0;
+ dio->iter = *iter;
+ dio->op.c = c;
+
+ if (unlikely(mapping->nrpages)) {
+ ret = write_invalidate_inode_pages_range(mapping,
+ req->ki_pos,
+ req->ki_pos + iter->count - 1);
+ if (unlikely(ret))
+ goto err_put_bio;
}
- if (!PageDirty(page))
- set_page_dirty(page);
- wait_for_stable_page(page);
-out:
- if (current->pagecache_lock != &mapping->add_lock)
- pagecache_add_put(&mapping->add_lock);
- sb_end_pagefault(inode->i_sb);
+ ret = bch2_dio_write_loop(dio);
+err:
+ if (locked)
+ inode_unlock(&inode->v);
return ret;
+err_put_bio:
+ bch2_pagecache_block_put(inode);
+ bio_put(bio);
+ inode_dio_end(&inode->v);
+ goto err;
}
-void bch2_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- EBUG_ON(!PageLocked(page));
- EBUG_ON(PageWriteback(page));
+ struct file *file = iocb->ki_filp;
+ struct bch_inode_info *inode = file_bch_inode(file);
+ ssize_t ret;
- if (offset || length < PAGE_SIZE)
- return;
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ ret = bch2_direct_write(iocb, from);
+ goto out;
+ }
- bch2_clear_page_bits(page);
-}
+ /* We can write back this queue in page reclaim */
+ current->backing_dev_info = inode_to_bdi(&inode->v);
+ inode_lock(&inode->v);
-int bch2_releasepage(struct page *page, gfp_t gfp_mask)
-{
- EBUG_ON(!PageLocked(page));
- EBUG_ON(PageWriteback(page));
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0)
+ goto unlock;
- if (PageDirty(page))
- return 0;
+ ret = file_remove_privs(file);
+ if (ret)
+ goto unlock;
+
+ ret = file_update_time(file);
+ if (ret)
+ goto unlock;
+
+ ret = bch2_buffered_write(iocb, from);
+ if (likely(ret > 0))
+ iocb->ki_pos += ret;
+unlock:
+ inode_unlock(&inode->v);
+ current->backing_dev_info = NULL;
- bch2_clear_page_bits(page);
- return 1;
+ if (ret > 0)
+ ret = generic_write_sync(iocb, ret);
+out:
+ return bch2_err_class(ret);
}
-#ifdef CONFIG_MIGRATION
-int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
- struct page *page, enum migrate_mode mode)
+/* fsync: */
+
+/*
+ * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
+ * insert trigger: look up the btree inode instead
+ */
+static int bch2_flush_inode(struct bch_fs *c,
+ struct bch_inode_info *inode)
{
+ struct bch_inode_unpacked u;
int ret;
- ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
- if (ret != MIGRATEPAGE_SUCCESS)
- return ret;
+ if (c->opts.journal_flush_disabled)
+ return 0;
- if (PagePrivate(page)) {
- *page_state(newpage) = *page_state(page);
- ClearPagePrivate(page);
- }
+ ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
+ if (ret)
+ return ret;
- migrate_page_copy(newpage, page);
- return MIGRATEPAGE_SUCCESS;
+ return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
+ bch2_inode_flush_nocow_writes(c, inode);
}
-#endif
int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct inode *inode = file->f_mapping->host;
- struct bch_inode_info *ei = to_bch_ei(inode);
- struct bch_fs *c = inode->i_sb->s_fs_info;
- int ret;
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ int ret, ret2, ret3;
+
+ ret = file_write_and_wait_range(file, start, end);
+ ret2 = sync_inode_metadata(&inode->v, 1);
+ ret3 = bch2_flush_inode(c, inode);
+
+ return bch2_err_class(ret ?: ret2 ?: ret3);
+}
+
+/* truncate: */
+
+static inline int range_has_data(struct bch_fs *c, u32 subvol,
+ struct bpos start,
+ struct bpos end)
+{
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret = 0;
- ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
+
+ ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
if (ret)
- return ret;
+ goto err;
- if (c->opts.journal_flush_disabled)
- return 0;
+ for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
+ if (bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k)) {
+ ret = 1;
+ break;
+ }
+ start = iter.pos;
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
- return bch2_journal_flush_seq(&c->journal, ei->journal_seq);
+ bch2_trans_exit(&trans);
+ return ret;
}
-static int __bch2_truncate_page(struct address_space *mapping,
- pgoff_t index, loff_t start, loff_t end)
+static int __bch2_truncate_folio(struct bch_inode_info *inode,
+ pgoff_t index, loff_t start, loff_t end)
{
- struct inode *inode = mapping->host;
- struct bch_fs *c = inode->i_sb->s_fs_info;
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct address_space *mapping = inode->v.i_mapping;
+ struct bch_folio *s;
unsigned start_offset = start & (PAGE_SIZE - 1);
unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
- struct page *page;
+ unsigned i;
+ struct folio *folio;
+ s64 i_sectors_delta = 0;
int ret = 0;
+ loff_t end_pos;
- /* Page boundary? Nothing to do */
- if (!((index == start >> PAGE_SHIFT && start_offset) ||
- (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
- return 0;
-
- /* Above i_size? */
- if (index << PAGE_SHIFT >= inode->i_size)
- return 0;
-
- page = find_lock_page(mapping, index);
- if (!page) {
- struct btree_iter iter;
- struct bkey_s_c k = bkey_s_c_null;
-
+ folio = filemap_lock_folio(mapping, index);
+ if (!folio) {
/*
* XXX: we're doing two index lookups when we end up reading the
- * page
+ * folio
*/
- for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
- POS(inode->i_ino,
- index << (PAGE_SHIFT - 9)), 0, k) {
- if (bkey_cmp(bkey_start_pos(k.k),
- POS(inode->i_ino,
- (index + 1) << (PAGE_SHIFT - 9))) >= 0)
- break;
-
- if (k.k->type != KEY_TYPE_DISCARD &&
- k.k->type != BCH_RESERVATION) {
- bch2_btree_iter_unlock(&iter);
- goto create;
- }
- }
- bch2_btree_iter_unlock(&iter);
- return 0;
-create:
- page = find_or_create_page(mapping, index, GFP_KERNEL);
- if (unlikely(!page)) {
+ ret = range_has_data(c, inode->ei_subvol,
+ POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
+ POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
+ if (ret <= 0)
+ return ret;
+
+ folio = __filemap_get_folio(mapping, index,
+ FGP_LOCK|FGP_CREAT, GFP_KERNEL);
+ if (unlikely(!folio)) {
ret = -ENOMEM;
goto out;
}
}
- if (!PageUptodate(page)) {
- ret = bch2_read_single_page(page, mapping);
+ BUG_ON(start >= folio_end_pos(folio));
+ BUG_ON(end <= folio_pos(folio));
+
+ start_offset = max(start, folio_pos(folio)) - folio_pos(folio);
+ end_offset = min(end, folio_end_pos(folio)) - folio_pos(folio);
+
+ /* Folio boundary? Nothing to do */
+ if (start_offset == 0 &&
+ end_offset == folio_size(folio)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ s = bch2_folio_create(folio, 0);
+ if (!s) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ if (!folio_test_uptodate(folio)) {
+ ret = bch2_read_single_folio(folio, mapping);
if (ret)
goto unlock;
}
+ if (!s->uptodate) {
+ ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
+ if (ret)
+ goto unlock;
+ }
+
+ for (i = round_up(start_offset, block_bytes(c)) >> 9;
+ i < round_down(end_offset, block_bytes(c)) >> 9;
+ i++) {
+ s->s[i].nr_replicas = 0;
+
+ i_sectors_delta -= s->s[i].state == SECTOR_dirty;
+ folio_sector_set(folio, s, i, SECTOR_unallocated);
+ }
+
+ i_sectors_acct(c, inode, NULL, i_sectors_delta);
+
+ /*
+ * Caller needs to know whether this folio will be written out by
+ * writeback - doing an i_size update if necessary - or whether it will
+ * be responsible for the i_size update.
+ *
+ * Note that we shouldn't ever see a folio beyond EOF, but check and
+ * warn if so. This has been observed by failure to clean up folios
+ * after a short write and there's still a chance reclaim will fix
+ * things up.
+ */
+ WARN_ON_ONCE(folio_pos(folio) >= inode->v.i_size);
+ end_pos = folio_end_pos(folio);
+ if (inode->v.i_size > folio_pos(folio))
+ end_pos = min(inode->v.i_size, end_pos);
+ ret = s->s[(end_pos - folio_pos(folio) - 1) >> 9].state >= SECTOR_dirty;
+
+ folio_zero_segment(folio, start_offset, end_offset);
+
/*
* Bit of a hack - we don't want truncate to fail due to -ENOSPC.
*
- * XXX: because we aren't currently tracking whether the page has actual
+ * XXX: because we aren't currently tracking whether the folio has actual
* data in it (vs. just 0s, or only partially written) this wrong. ick.
*/
- ret = bch2_get_page_reservation(c, page, false);
- BUG_ON(ret);
-
- if (index == start >> PAGE_SHIFT &&
- index == end >> PAGE_SHIFT)
- zero_user_segment(page, start_offset, end_offset);
- else if (index == start >> PAGE_SHIFT)
- zero_user_segment(page, start_offset, PAGE_SIZE);
- else if (index == end >> PAGE_SHIFT)
- zero_user_segment(page, 0, end_offset);
+ BUG_ON(bch2_get_folio_disk_reservation(c, inode, folio, false));
- if (!PageDirty(page))
- set_page_dirty(page);
+ /*
+ * This removes any writeable userspace mappings; we need to force
+ * .page_mkwrite to be called again before any mmapped writes, to
+ * redirty the full page:
+ */
+ folio_mkclean(folio);
+ filemap_dirty_folio(mapping, folio);
unlock:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
out:
return ret;
}
-static int bch2_truncate_page(struct address_space *mapping, loff_t from)
+static int bch2_truncate_folio(struct bch_inode_info *inode, loff_t from)
+{
+ return __bch2_truncate_folio(inode, from >> PAGE_SHIFT,
+ from, ANYSINT_MAX(loff_t));
+}
+
+static int bch2_truncate_folios(struct bch_inode_info *inode,
+ loff_t start, loff_t end)
+{
+ int ret = __bch2_truncate_folio(inode, start >> PAGE_SHIFT,
+ start, end);
+
+ if (ret >= 0 &&
+ start >> PAGE_SHIFT != end >> PAGE_SHIFT)
+ ret = __bch2_truncate_folio(inode,
+ (end - 1) >> PAGE_SHIFT,
+ start, end);
+ return ret;
+}
+
+static int bch2_extend(struct user_namespace *mnt_userns,
+ struct bch_inode_info *inode,
+ struct bch_inode_unpacked *inode_u,
+ struct iattr *iattr)
{
- return __bch2_truncate_page(mapping, from >> PAGE_SHIFT,
- from, from + PAGE_SIZE);
+ struct address_space *mapping = inode->v.i_mapping;
+ int ret;
+
+ /*
+ * sync appends:
+ *
+ * this has to be done _before_ extending i_size:
+ */
+ ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
+ if (ret)
+ return ret;
+
+ truncate_setsize(&inode->v, iattr->ia_size);
+
+ return bch2_setattr_nonsize(mnt_userns, inode, iattr);
}
-int bch2_truncate(struct inode *inode, struct iattr *iattr)
+static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi,
+ void *p)
{
- struct address_space *mapping = inode->i_mapping;
- struct bch_inode_info *ei = to_bch_ei(inode);
- struct bch_fs *c = inode->i_sb->s_fs_info;
- bool shrink = iattr->ia_size <= inode->i_size;
+ bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
+ return 0;
+}
+
+static int bch2_truncate_start_fn(struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi, void *p)
+{
+ u64 *new_i_size = p;
+
+ bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
+ bi->bi_size = *new_i_size;
+ return 0;
+}
+
+int bch2_truncate(struct user_namespace *mnt_userns,
+ struct bch_inode_info *inode, struct iattr *iattr)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct address_space *mapping = inode->v.i_mapping;
+ struct bch_inode_unpacked inode_u;
+ u64 new_i_size = iattr->ia_size;
+ s64 i_sectors_delta = 0;
int ret = 0;
- inode_dio_wait(inode);
- pagecache_block_get(&mapping->add_lock);
+ /*
+ * If the truncate call with change the size of the file, the
+ * cmtimes should be updated. If the size will not change, we
+ * do not need to update the cmtimes.
+ */
+ if (iattr->ia_size != inode->v.i_size) {
+ if (!(iattr->ia_valid & ATTR_MTIME))
+ ktime_get_coarse_real_ts64(&iattr->ia_mtime);
+ if (!(iattr->ia_valid & ATTR_CTIME))
+ ktime_get_coarse_real_ts64(&iattr->ia_ctime);
+ iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
+ }
+
+ inode_dio_wait(&inode->v);
+ bch2_pagecache_block_get(inode);
- truncate_setsize(inode, iattr->ia_size);
+ ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
+ if (ret)
+ goto err;
- /* sync appends.. */
- /* XXX what protects ei->i_size? */
- if (iattr->ia_size > ei->i_size)
- ret = filemap_write_and_wait_range(mapping, ei->i_size, S64_MAX);
+ /*
+ * check this before next assertion; on filesystem error our normal
+ * invariants are a bit broken (truncate has to truncate the page cache
+ * before the inode).
+ */
+ ret = bch2_journal_error(&c->journal);
if (ret)
- goto err_put_pagecache;
+ goto err;
- mutex_lock(&ei->update_lock);
- i_size_dirty_get(ei);
- ret = bch2_write_inode_size(c, ei, inode->i_size);
- mutex_unlock(&ei->update_lock);
+ WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
+ inode->v.i_size < inode_u.bi_size,
+ "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
+ (u64) inode->v.i_size, inode_u.bi_size);
- if (unlikely(ret))
+ if (iattr->ia_size > inode->v.i_size) {
+ ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
+ goto err;
+ }
+
+ iattr->ia_valid &= ~ATTR_SIZE;
+
+ ret = bch2_truncate_folio(inode, iattr->ia_size);
+ if (unlikely(ret < 0))
goto err;
/*
- * There might be persistent reservations (from fallocate())
- * above i_size, which bch2_inode_truncate() will discard - we're
- * only supposed to discard them if we're doing a real truncate
- * here (new i_size < current i_size):
+ * When extending, we're going to write the new i_size to disk
+ * immediately so we need to flush anything above the current on disk
+ * i_size first:
+ *
+ * Also, when extending we need to flush the page that i_size currently
+ * straddles - if it's mapped to userspace, we need to ensure that
+ * userspace has to redirty it and call .mkwrite -> set_page_dirty
+ * again to allocate the part of the page that was extended.
*/
- if (shrink) {
- struct i_sectors_hook i_sectors_hook;
- int ret;
+ if (iattr->ia_size > inode_u.bi_size)
+ ret = filemap_write_and_wait_range(mapping,
+ inode_u.bi_size,
+ iattr->ia_size - 1);
+ else if (iattr->ia_size & (PAGE_SIZE - 1))
+ ret = filemap_write_and_wait_range(mapping,
+ round_down(iattr->ia_size, PAGE_SIZE),
+ iattr->ia_size - 1);
+ if (ret)
+ goto err;
- ret = i_sectors_dirty_get(ei, &i_sectors_hook);
- if (unlikely(ret))
- goto err;
+ mutex_lock(&inode->ei_update_lock);
+ ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
+ &new_i_size, 0);
+ mutex_unlock(&inode->ei_update_lock);
- ret = bch2_truncate_page(inode->i_mapping, iattr->ia_size);
- if (unlikely(ret)) {
- i_sectors_dirty_put(ei, &i_sectors_hook);
- goto err;
- }
+ if (unlikely(ret))
+ goto err;
- ret = bch2_inode_truncate(c, inode->i_ino,
- round_up(iattr->ia_size, PAGE_SIZE) >> 9,
- &i_sectors_hook.hook,
- &ei->journal_seq);
+ truncate_setsize(&inode->v, iattr->ia_size);
- i_sectors_dirty_put(ei, &i_sectors_hook);
+ ret = bch2_fpunch(c, inode_inum(inode),
+ round_up(iattr->ia_size, block_bytes(c)) >> 9,
+ U64_MAX, &i_sectors_delta);
+ i_sectors_acct(c, inode, NULL, i_sectors_delta);
- if (unlikely(ret))
- goto err;
- }
+ bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
+ !bch2_journal_error(&c->journal), c,
+ "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
+ inode->v.i_ino, (u64) inode->v.i_blocks,
+ inode->ei_inode.bi_sectors);
+ if (unlikely(ret))
+ goto err;
+
+ mutex_lock(&inode->ei_update_lock);
+ ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
+ mutex_unlock(&inode->ei_update_lock);
- mutex_lock(&ei->update_lock);
- setattr_copy(inode, iattr);
- inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
+ ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
+err:
+ bch2_pagecache_block_put(inode);
+ return bch2_err_class(ret);
+}
- /* clear I_SIZE_DIRTY: */
- i_size_dirty_put(ei);
- ret = bch2_write_inode_size(c, ei, inode->i_size);
- mutex_unlock(&ei->update_lock);
+/* fallocate: */
- pagecache_block_put(&mapping->add_lock);
+static int inode_update_times_fn(struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi, void *p)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
return 0;
-err:
- i_size_dirty_put(ei);
-err_put_pagecache:
- pagecache_block_put(&mapping->add_lock);
- return ret;
}
-static long bch2_fpunch(struct inode *inode, loff_t offset, loff_t len)
+static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
{
- struct address_space *mapping = inode->i_mapping;
- struct bch_inode_info *ei = to_bch_ei(inode);
- struct bch_fs *c = inode->i_sb->s_fs_info;
- u64 ino = inode->i_ino;
- u64 discard_start = round_up(offset, PAGE_SIZE) >> 9;
- u64 discard_end = round_down(offset + len, PAGE_SIZE) >> 9;
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ u64 end = offset + len;
+ u64 block_start = round_up(offset, block_bytes(c));
+ u64 block_end = round_down(end, block_bytes(c));
+ bool truncated_last_page;
int ret = 0;
- inode_lock(inode);
- inode_dio_wait(inode);
- pagecache_block_get(&mapping->add_lock);
-
- ret = __bch2_truncate_page(inode->i_mapping,
- offset >> PAGE_SHIFT,
- offset, offset + len);
- if (unlikely(ret))
- goto out;
-
- if (offset >> PAGE_SHIFT !=
- (offset + len) >> PAGE_SHIFT) {
- ret = __bch2_truncate_page(inode->i_mapping,
- (offset + len) >> PAGE_SHIFT,
- offset, offset + len);
- if (unlikely(ret))
- goto out;
- }
-
- truncate_pagecache_range(inode, offset, offset + len - 1);
-
- if (discard_start < discard_end) {
- struct disk_reservation disk_res;
- struct i_sectors_hook i_sectors_hook;
- int ret;
+ ret = bch2_truncate_folios(inode, offset, end);
+ if (unlikely(ret < 0))
+ goto err;
- BUG_ON(bch2_disk_reservation_get(c, &disk_res, 0, 0));
+ truncated_last_page = ret;
- ret = i_sectors_dirty_get(ei, &i_sectors_hook);
- if (unlikely(ret))
- goto out;
+ truncate_pagecache_range(&inode->v, offset, end - 1);
- ret = bch2_discard(c,
- POS(ino, discard_start),
- POS(ino, discard_end),
- ZERO_VERSION,
- &disk_res,
- &i_sectors_hook.hook,
- &ei->journal_seq);
+ if (block_start < block_end) {
+ s64 i_sectors_delta = 0;
- i_sectors_dirty_put(ei, &i_sectors_hook);
- bch2_disk_reservation_put(c, &disk_res);
+ ret = bch2_fpunch(c, inode_inum(inode),
+ block_start >> 9, block_end >> 9,
+ &i_sectors_delta);
+ i_sectors_acct(c, inode, NULL, i_sectors_delta);
}
-out:
- pagecache_block_put(&mapping->add_lock);
- inode_unlock(inode);
+ mutex_lock(&inode->ei_update_lock);
+ if (end >= inode->v.i_size && !truncated_last_page) {
+ ret = bch2_write_inode_size(c, inode, inode->v.i_size,
+ ATTR_MTIME|ATTR_CTIME);
+ } else {
+ ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
+ ATTR_MTIME|ATTR_CTIME);
+ }
+ mutex_unlock(&inode->ei_update_lock);
+err:
return ret;
}
-static long bch2_fcollapse(struct inode *inode, loff_t offset, loff_t len)
+static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
+ loff_t offset, loff_t len,
+ bool insert)
{
- struct address_space *mapping = inode->i_mapping;
- struct bch_inode_info *ei = to_bch_ei(inode);
- struct bch_fs *c = inode->i_sb->s_fs_info;
- struct btree_iter src;
- struct btree_iter dst;
- BKEY_PADDED(k) copy;
- struct bkey_s_c k;
- struct i_sectors_hook i_sectors_hook;
- loff_t new_size;
- int ret;
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct address_space *mapping = inode->v.i_mapping;
+ struct bkey_buf copy;
+ struct btree_trans trans;
+ struct btree_iter src, dst, del;
+ loff_t shift, new_size;
+ u64 src_start;
+ int ret = 0;
- if ((offset | len) & (PAGE_SIZE - 1))
+ if ((offset | len) & (block_bytes(c) - 1))
return -EINVAL;
- bch2_btree_iter_init(&dst, c, BTREE_ID_EXTENTS,
- POS(inode->i_ino, offset >> 9),
- BTREE_ITER_INTENT);
- /* position will be set from dst iter's position: */
- bch2_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN, 0);
- bch2_btree_iter_link(&src, &dst);
-
- /*
- * We need i_mutex to keep the page cache consistent with the extents
- * btree, and the btree consistent with i_size - we don't need outside
- * locking for the extents btree itself, because we're using linked
- * iterators
- */
- inode_lock(inode);
- inode_dio_wait(inode);
- pagecache_block_get(&mapping->add_lock);
+ if (insert) {
+ if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
+ return -EFBIG;
- ret = -EINVAL;
- if (offset + len >= inode->i_size)
- goto err;
+ if (offset >= inode->v.i_size)
+ return -EINVAL;
- if (inode->i_size < len)
- goto err;
+ src_start = U64_MAX;
+ shift = len;
+ } else {
+ if (offset + len >= inode->v.i_size)
+ return -EINVAL;
- new_size = inode->i_size - len;
+ src_start = offset + len;
+ shift = -len;
+ }
- ret = write_invalidate_inode_pages_range(inode->i_mapping,
- offset, LLONG_MAX);
- if (ret)
- goto err;
+ new_size = inode->v.i_size + shift;
- ret = i_sectors_dirty_get(ei, &i_sectors_hook);
+ ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
if (ret)
- goto err;
+ return ret;
- while (bkey_cmp(dst.pos,
- POS(inode->i_ino,
- round_up(new_size, PAGE_SIZE) >> 9)) < 0) {
- struct disk_reservation disk_res;
+ if (insert) {
+ i_size_write(&inode->v, new_size);
+ mutex_lock(&inode->ei_update_lock);
+ ret = bch2_write_inode_size(c, inode, new_size,
+ ATTR_MTIME|ATTR_CTIME);
+ mutex_unlock(&inode->ei_update_lock);
+ } else {
+ s64 i_sectors_delta = 0;
- bch2_btree_iter_set_pos(&src,
- POS(dst.pos.inode, dst.pos.offset + (len >> 9)));
+ ret = bch2_fpunch(c, inode_inum(inode),
+ offset >> 9, (offset + len) >> 9,
+ &i_sectors_delta);
+ i_sectors_acct(c, inode, NULL, i_sectors_delta);
- ret = bch2_btree_iter_traverse(&dst);
if (ret)
- goto btree_iter_err;
+ return ret;
+ }
- k = bch2_btree_iter_peek_with_holes(&src);
- if ((ret = btree_iter_err(k)))
- goto btree_iter_err;
+ bch2_bkey_buf_init(©);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
+ bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
+ POS(inode->v.i_ino, src_start >> 9),
+ BTREE_ITER_INTENT);
+ bch2_trans_copy_iter(&dst, &src);
+ bch2_trans_copy_iter(&del, &src);
+
+ while (ret == 0 ||
+ bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
+ struct disk_reservation disk_res =
+ bch2_disk_reservation_init(c, 0);
+ struct bkey_i delete;
+ struct bkey_s_c k;
+ struct bpos next_pos;
+ struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
+ struct bpos atomic_end;
+ unsigned trigger_flags = 0;
+ u32 snapshot;
- bkey_reassemble(©.k, k);
+ bch2_trans_begin(&trans);
- if (bkey_deleted(©.k.k))
- copy.k.k.type = KEY_TYPE_DISCARD;
+ ret = bch2_subvolume_get_snapshot(&trans,
+ inode->ei_subvol, &snapshot);
+ if (ret)
+ continue;
- bch2_cut_front(src.pos, ©.k);
- copy.k.k.p.offset -= len >> 9;
+ bch2_btree_iter_set_snapshot(&src, snapshot);
+ bch2_btree_iter_set_snapshot(&dst, snapshot);
+ bch2_btree_iter_set_snapshot(&del, snapshot);
- BUG_ON(bkey_cmp(dst.pos, bkey_start_pos(©.k.k)));
+ bch2_trans_begin(&trans);
- ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size,
- BCH_DISK_RESERVATION_NOFAIL);
- BUG_ON(ret);
+ k = insert
+ ? bch2_btree_iter_peek_prev(&src)
+ : bch2_btree_iter_peek_upto(&src, POS(inode->v.i_ino, U64_MAX));
+ if ((ret = bkey_err(k)))
+ continue;
- ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
- &ei->journal_seq,
- BTREE_INSERT_ATOMIC|
- BTREE_INSERT_NOFAIL,
- BTREE_INSERT_ENTRY(&dst, ©.k));
- bch2_disk_reservation_put(c, &disk_res);
-btree_iter_err:
- if (ret < 0 && ret != -EINTR)
- goto err_unwind;
+ if (!k.k || k.k->p.inode != inode->v.i_ino)
+ break;
- bch2_btree_iter_cond_resched(&src);
- }
+ if (insert &&
+ bkey_le(k.k->p, POS(inode->v.i_ino, offset >> 9)))
+ break;
+reassemble:
+ bch2_bkey_buf_reassemble(©, c, k);
- bch2_btree_iter_unlock(&src);
- bch2_btree_iter_unlock(&dst);
+ if (insert &&
+ bkey_lt(bkey_start_pos(k.k), move_pos))
+ bch2_cut_front(move_pos, copy.k);
- ret = bch2_inode_truncate(c, inode->i_ino,
- round_up(new_size, PAGE_SIZE) >> 9,
- &i_sectors_hook.hook,
- &ei->journal_seq);
- if (ret)
- goto err_unwind;
+ copy.k->k.p.offset += shift >> 9;
+ bch2_btree_iter_set_pos(&dst, bkey_start_pos(©.k->k));
- i_sectors_dirty_put(ei, &i_sectors_hook);
+ ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
+ if (ret)
+ continue;
- mutex_lock(&ei->update_lock);
- i_size_write(inode, new_size);
- ret = bch2_write_inode_size(c, ei, inode->i_size);
- mutex_unlock(&ei->update_lock);
+ if (!bkey_eq(atomic_end, copy.k->k.p)) {
+ if (insert) {
+ move_pos = atomic_end;
+ move_pos.offset -= shift >> 9;
+ goto reassemble;
+ } else {
+ bch2_cut_back(atomic_end, copy.k);
+ }
+ }
- pagecache_block_put(&mapping->add_lock);
- inode_unlock(inode);
+ bkey_init(&delete.k);
+ delete.k.p = copy.k->k.p;
+ delete.k.size = copy.k->k.size;
+ delete.k.p.offset -= shift >> 9;
+ bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
- return ret;
-err_unwind:
- /*
- * XXX: we've left data with multiple pointers... which isn't a _super_
- * serious problem...
- */
- i_sectors_dirty_put(ei, &i_sectors_hook);
-err:
- bch2_btree_iter_unlock(&src);
- bch2_btree_iter_unlock(&dst);
- pagecache_block_put(&mapping->add_lock);
- inode_unlock(inode);
- return ret;
-}
+ next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
-static long bch2_fallocate(struct inode *inode, int mode,
- loff_t offset, loff_t len)
-{
- struct address_space *mapping = inode->i_mapping;
- struct bch_inode_info *ei = to_bch_ei(inode);
- struct bch_fs *c = inode->i_sb->s_fs_info;
- struct i_sectors_hook i_sectors_hook;
- struct btree_iter iter;
- struct bpos end;
- loff_t block_start, block_end;
- loff_t new_size = offset + len;
- unsigned sectors;
- unsigned replicas = READ_ONCE(c->opts.data_replicas);
- int ret;
+ if (copy.k->k.size != k.k->size) {
+ /* We might end up splitting compressed extents: */
+ unsigned nr_ptrs =
+ bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
- bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
- BTREE_ITER_INTENT);
+ ret = bch2_disk_reservation_get(c, &disk_res,
+ copy.k->k.size, nr_ptrs,
+ BCH_DISK_RESERVATION_NOFAIL);
+ BUG_ON(ret);
+ }
- inode_lock(inode);
- inode_dio_wait(inode);
- pagecache_block_get(&mapping->add_lock);
+ ret = bch2_btree_iter_traverse(&del) ?:
+ bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
+ bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
+ bch2_trans_commit(&trans, &disk_res, NULL,
+ BTREE_INSERT_NOFAIL);
+ bch2_disk_reservation_put(c, &disk_res);
- if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- new_size > inode->i_size) {
- ret = inode_newsize_ok(inode, new_size);
- if (ret)
- goto err;
+ if (!ret)
+ bch2_btree_iter_set_pos(&src, next_pos);
}
+ bch2_trans_iter_exit(&trans, &del);
+ bch2_trans_iter_exit(&trans, &dst);
+ bch2_trans_iter_exit(&trans, &src);
+ bch2_trans_exit(&trans);
+ bch2_bkey_buf_exit(©, c);
- if (mode & FALLOC_FL_ZERO_RANGE) {
- ret = __bch2_truncate_page(inode->i_mapping,
- offset >> PAGE_SHIFT,
- offset, offset + len);
-
- if (!ret &&
- offset >> PAGE_SHIFT !=
- (offset + len) >> PAGE_SHIFT)
- ret = __bch2_truncate_page(inode->i_mapping,
- (offset + len) >> PAGE_SHIFT,
- offset, offset + len);
-
- if (unlikely(ret))
- goto err;
-
- truncate_pagecache_range(inode, offset, offset + len - 1);
+ if (ret)
+ return ret;
- block_start = round_up(offset, PAGE_SIZE);
- block_end = round_down(offset + len, PAGE_SIZE);
+ mutex_lock(&inode->ei_update_lock);
+ if (!insert) {
+ i_size_write(&inode->v, new_size);
+ ret = bch2_write_inode_size(c, inode, new_size,
+ ATTR_MTIME|ATTR_CTIME);
} else {
- block_start = round_down(offset, PAGE_SIZE);
- block_end = round_up(offset + len, PAGE_SIZE);
+ /* We need an inode update to update bi_journal_seq for fsync: */
+ ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
+ ATTR_MTIME|ATTR_CTIME);
}
+ mutex_unlock(&inode->ei_update_lock);
+ return ret;
+}
+
+static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
+ u64 start_sector, u64 end_sector)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bpos end_pos = POS(inode->v.i_ino, end_sector);
+ struct bch_io_opts opts;
+ int ret = 0;
- bch2_btree_iter_set_pos(&iter, POS(inode->i_ino, block_start >> 9));
- end = POS(inode->i_ino, block_end >> 9);
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
- ret = i_sectors_dirty_get(ei, &i_sectors_hook);
- if (unlikely(ret))
- goto err;
+ bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
+ POS(inode->v.i_ino, start_sector),
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- while (bkey_cmp(iter.pos, end) < 0) {
- struct disk_reservation disk_res = { 0 };
- struct bkey_i_reservation reservation;
+ while (!ret && bkey_lt(iter.pos, end_pos)) {
+ s64 i_sectors_delta = 0;
+ struct quota_res quota_res = { 0 };
struct bkey_s_c k;
+ unsigned sectors;
+ u32 snapshot;
+
+ bch2_trans_begin(&trans);
- k = bch2_btree_iter_peek_with_holes(&iter);
- if ((ret = btree_iter_err(k)))
- goto btree_iter_err;
+ ret = bch2_subvolume_get_snapshot(&trans,
+ inode->ei_subvol, &snapshot);
+ if (ret)
+ goto bkey_err;
+
+ bch2_btree_iter_set_snapshot(&iter, snapshot);
+
+ k = bch2_btree_iter_peek_slot(&iter);
+ if ((ret = bkey_err(k)))
+ goto bkey_err;
/* already reserved */
- if (k.k->type == BCH_RESERVATION &&
- bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
- bch2_btree_iter_advance_pos(&iter);
+ if (bkey_extent_is_reservation(k) &&
+ bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
+ bch2_btree_iter_advance(&iter);
continue;
}
- if (bkey_extent_is_data(k.k)) {
- if (!(mode & FALLOC_FL_ZERO_RANGE)) {
- bch2_btree_iter_advance_pos(&iter);
- continue;
- }
+ if (bkey_extent_is_data(k.k) &&
+ !(mode & FALLOC_FL_ZERO_RANGE)) {
+ bch2_btree_iter_advance(&iter);
+ continue;
}
- bkey_reservation_init(&reservation.k_i);
- reservation.k.type = BCH_RESERVATION;
- reservation.k.p = k.k->p;
- reservation.k.size = k.k->size;
-
- bch2_cut_front(iter.pos, &reservation.k_i);
- bch2_cut_back(end, &reservation.k);
-
- sectors = reservation.k.size;
- reservation.v.nr_replicas = bch2_extent_nr_dirty_ptrs(k);
+ /*
+ * XXX: for nocow mode, we should promote shared extents to
+ * unshared here
+ */
- if (reservation.v.nr_replicas < replicas ||
- bkey_extent_is_compressed(k)) {
- ret = bch2_disk_reservation_get(c, &disk_res,
- sectors, 0);
- if (ret)
- goto err_put_sectors_dirty;
+ sectors = bpos_min(k.k->p, end_pos).offset - iter.pos.offset;
- reservation.v.nr_replicas = disk_res.nr_replicas;
+ if (!bkey_extent_is_allocation(k.k)) {
+ ret = bch2_quota_reservation_add(c, inode,
+ "a_res,
+ sectors, true);
+ if (unlikely(ret))
+ goto bkey_err;
}
- ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
- &ei->journal_seq,
- BTREE_INSERT_ATOMIC|
- BTREE_INSERT_NOFAIL,
- BTREE_INSERT_ENTRY(&iter, &reservation.k_i));
- bch2_disk_reservation_put(c, &disk_res);
-btree_iter_err:
- if (ret < 0 && ret != -EINTR)
- goto err_put_sectors_dirty;
+ ret = bch2_extent_fallocate(&trans, inode_inum(inode), &iter,
+ sectors, opts, &i_sectors_delta,
+ writepoint_hashed((unsigned long) current));
+ if (ret)
+ goto bkey_err;
+ i_sectors_acct(c, inode, "a_res, i_sectors_delta);
+bkey_err:
+ bch2_quota_reservation_put(c, inode, "a_res);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ ret = 0;
}
- bch2_btree_iter_unlock(&iter);
- i_sectors_dirty_put(ei, &i_sectors_hook);
+ bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
+ mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
- if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- new_size > inode->i_size) {
- i_size_write(inode, new_size);
+ if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
+ struct quota_res quota_res = { 0 };
+ s64 i_sectors_delta = 0;
- mutex_lock(&ei->update_lock);
- ret = bch2_write_inode_size(c, ei, inode->i_size);
- mutex_unlock(&ei->update_lock);
+ bch2_fpunch_at(&trans, &iter, inode_inum(inode),
+ end_sector, &i_sectors_delta);
+ i_sectors_acct(c, inode, "a_res, i_sectors_delta);
+ bch2_quota_reservation_put(c, inode, "a_res);
}
- /* blech */
- if ((mode & FALLOC_FL_KEEP_SIZE) &&
- (mode & FALLOC_FL_ZERO_RANGE) &&
- ei->i_size != inode->i_size) {
- /* sync appends.. */
- ret = filemap_write_and_wait_range(mapping, ei->i_size, S64_MAX);
+ bch2_trans_iter_exit(&trans, &iter);
+ bch2_trans_exit(&trans);
+ return ret;
+}
+
+static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
+ loff_t offset, loff_t len)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ u64 end = offset + len;
+ u64 block_start = round_down(offset, block_bytes(c));
+ u64 block_end = round_up(end, block_bytes(c));
+ bool truncated_last_page = false;
+ int ret, ret2 = 0;
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
+ ret = inode_newsize_ok(&inode->v, end);
if (ret)
- goto err;
+ return ret;
+ }
- if (ei->i_size != inode->i_size) {
- mutex_lock(&ei->update_lock);
- ret = bch2_write_inode_size(c, ei, inode->i_size);
- mutex_unlock(&ei->update_lock);
- }
+ if (mode & FALLOC_FL_ZERO_RANGE) {
+ ret = bch2_truncate_folios(inode, offset, end);
+ if (unlikely(ret < 0))
+ return ret;
+
+ truncated_last_page = ret;
+
+ truncate_pagecache_range(&inode->v, offset, end - 1);
+
+ block_start = round_up(offset, block_bytes(c));
+ block_end = round_down(end, block_bytes(c));
}
- pagecache_block_put(&mapping->add_lock);
- inode_unlock(inode);
+ ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
- return 0;
-err_put_sectors_dirty:
- i_sectors_dirty_put(ei, &i_sectors_hook);
-err:
- bch2_btree_iter_unlock(&iter);
- pagecache_block_put(&mapping->add_lock);
- inode_unlock(inode);
- return ret;
+ /*
+ * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
+ * so that the VFS cache i_size is consistent with the btree i_size:
+ */
+ if (ret &&
+ !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
+ return ret;
+
+ if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
+ end = inode->v.i_size;
+
+ if (end >= inode->v.i_size &&
+ (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
+ !(mode & FALLOC_FL_KEEP_SIZE))) {
+ spin_lock(&inode->v.i_lock);
+ i_size_write(&inode->v, end);
+ spin_unlock(&inode->v.i_lock);
+
+ mutex_lock(&inode->ei_update_lock);
+ ret2 = bch2_write_inode_size(c, inode, end, 0);
+ mutex_unlock(&inode->ei_update_lock);
+ }
+
+ return ret ?: ret2;
}
long bch2_fallocate_dispatch(struct file *file, int mode,
loff_t offset, loff_t len)
{
- struct inode *inode = file_inode(file);
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ long ret;
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fallocate))
+ return -EROFS;
+
+ inode_lock(&inode->v);
+ inode_dio_wait(&inode->v);
+ bch2_pagecache_block_get(inode);
+
+ ret = file_modified(file);
+ if (ret)
+ goto err;
if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
- return bch2_fallocate(inode, mode, offset, len);
+ ret = bchfs_fallocate(inode, mode, offset, len);
+ else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
+ ret = bchfs_fpunch(inode, offset, len);
+ else if (mode == FALLOC_FL_INSERT_RANGE)
+ ret = bchfs_fcollapse_finsert(inode, offset, len, true);
+ else if (mode == FALLOC_FL_COLLAPSE_RANGE)
+ ret = bchfs_fcollapse_finsert(inode, offset, len, false);
+ else
+ ret = -EOPNOTSUPP;
+err:
+ bch2_pagecache_block_put(inode);
+ inode_unlock(&inode->v);
+ bch2_write_ref_put(c, BCH_WRITE_REF_fallocate);
+
+ return bch2_err_class(ret);
+}
+
+/*
+ * Take a quota reservation for unallocated blocks in a given file range
+ * Does not check pagecache
+ */
+static int quota_reserve_range(struct bch_inode_info *inode,
+ struct quota_res *res,
+ u64 start, u64 end)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u32 snapshot;
+ u64 sectors = end - start;
+ u64 pos = start;
+ int ret;
+
+ bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
+
+ ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
+ SPOS(inode->v.i_ino, pos, snapshot), 0);
+
+ while (!(ret = btree_trans_too_many_iters(&trans)) &&
+ (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
+ !(ret = bkey_err(k))) {
+ if (bkey_extent_is_allocation(k.k)) {
+ u64 s = min(end, k.k->p.offset) -
+ max(start, bkey_start_offset(k.k));
+ BUG_ON(s > sectors);
+ sectors -= s;
+ }
+ bch2_btree_iter_advance(&iter);
+ }
+ pos = iter.pos.offset;
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
- if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
- return bch2_fpunch(inode, offset, len);
+ bch2_trans_exit(&trans);
- if (mode == FALLOC_FL_COLLAPSE_RANGE)
- return bch2_fcollapse(inode, offset, len);
+ if (ret)
+ return ret;
- return -EOPNOTSUPP;
+ return bch2_quota_reservation_add(c, inode, res, sectors, true);
}
-static bool page_is_data(struct page *page)
+loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
+ struct file *file_dst, loff_t pos_dst,
+ loff_t len, unsigned remap_flags)
{
- /* XXX: should only have to check PageDirty */
- return PagePrivate(page) &&
- (page_state(page)->sectors ||
- page_state(page)->dirty_sectors);
+ struct bch_inode_info *src = file_bch_inode(file_src);
+ struct bch_inode_info *dst = file_bch_inode(file_dst);
+ struct bch_fs *c = src->v.i_sb->s_fs_info;
+ struct quota_res quota_res = { 0 };
+ s64 i_sectors_delta = 0;
+ u64 aligned_len;
+ loff_t ret = 0;
+
+ if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
+ return -EINVAL;
+
+ if (remap_flags & REMAP_FILE_DEDUP)
+ return -EOPNOTSUPP;
+
+ if ((pos_src & (block_bytes(c) - 1)) ||
+ (pos_dst & (block_bytes(c) - 1)))
+ return -EINVAL;
+
+ if (src == dst &&
+ abs(pos_src - pos_dst) < len)
+ return -EINVAL;
+
+ bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
+
+ inode_dio_wait(&src->v);
+ inode_dio_wait(&dst->v);
+
+ ret = generic_remap_file_range_prep(file_src, pos_src,
+ file_dst, pos_dst,
+ &len, remap_flags);
+ if (ret < 0 || len == 0)
+ goto err;
+
+ aligned_len = round_up((u64) len, block_bytes(c));
+
+ ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
+ pos_dst, pos_dst + len - 1);
+ if (ret)
+ goto err;
+
+ ret = quota_reserve_range(dst, "a_res, pos_dst >> 9,
+ (pos_dst + aligned_len) >> 9);
+ if (ret)
+ goto err;
+
+ file_update_time(file_dst);
+
+ mark_pagecache_unallocated(src, pos_src >> 9,
+ (pos_src + aligned_len) >> 9);
+
+ ret = bch2_remap_range(c,
+ inode_inum(dst), pos_dst >> 9,
+ inode_inum(src), pos_src >> 9,
+ aligned_len >> 9,
+ pos_dst + len, &i_sectors_delta);
+ if (ret < 0)
+ goto err;
+
+ /*
+ * due to alignment, we might have remapped slightly more than requsted
+ */
+ ret = min((u64) ret << 9, (u64) len);
+
+ i_sectors_acct(c, dst, "a_res, i_sectors_delta);
+
+ spin_lock(&dst->v.i_lock);
+ if (pos_dst + ret > dst->v.i_size)
+ i_size_write(&dst->v, pos_dst + ret);
+ spin_unlock(&dst->v.i_lock);
+
+ if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
+ IS_SYNC(file_inode(file_dst)))
+ ret = bch2_flush_inode(c, dst);
+err:
+ bch2_quota_reservation_put(c, dst, "a_res);
+ bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
+
+ return bch2_err_class(ret);
}
-static loff_t bch2_next_pagecache_data(struct inode *inode,
- loff_t start_offset,
- loff_t end_offset)
+/* fseek: */
+
+static int folio_data_offset(struct folio *folio, unsigned offset)
{
- struct address_space *mapping = inode->i_mapping;
- struct page *page;
- pgoff_t index;
+ struct bch_folio *s = bch2_folio(folio);
+ unsigned i, sectors = folio_sectors(folio);
- for (index = start_offset >> PAGE_SHIFT;
- index < end_offset >> PAGE_SHIFT;
- index++) {
- if (find_get_pages(mapping, index, 1, &page)) {
- lock_page(page);
- index = page->index;
+ if (s)
+ for (i = offset >> 9; i < sectors; i++)
+ if (s->s[i].state >= SECTOR_dirty)
+ return i << 9;
- if (page_is_data(page))
- end_offset =
- min(end_offset,
- max(start_offset,
- ((loff_t) index) << PAGE_SHIFT));
- unlock_page(page);
- put_page(page);
- } else {
- break;
+ return -1;
+}
+
+static loff_t bch2_seek_pagecache_data(struct inode *vinode,
+ loff_t start_offset,
+ loff_t end_offset)
+{
+ struct folio_batch fbatch;
+ pgoff_t start_index = start_offset >> PAGE_SHIFT;
+ pgoff_t end_index = end_offset >> PAGE_SHIFT;
+ pgoff_t index = start_index;
+ unsigned i;
+ loff_t ret;
+ int offset;
+
+ folio_batch_init(&fbatch);
+
+ while (filemap_get_folios(vinode->i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+
+ folio_lock(folio);
+ offset = folio_data_offset(folio,
+ max(folio_pos(folio), start_offset) -
+ folio_pos(folio));
+ if (offset >= 0) {
+ ret = clamp(folio_pos(folio) + offset,
+ start_offset, end_offset);
+ folio_unlock(folio);
+ folio_batch_release(&fbatch);
+ return ret;
+ }
+ folio_unlock(folio);
}
+ folio_batch_release(&fbatch);
+ cond_resched();
}
return end_offset;
static loff_t bch2_seek_data(struct file *file, u64 offset)
{
- struct inode *inode = file->f_mapping->host;
- struct bch_fs *c = inode->i_sb->s_fs_info;
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
+ subvol_inum inum = inode_inum(inode);
u64 isize, next_data = MAX_LFS_FILESIZE;
+ u32 snapshot;
int ret;
- isize = i_size_read(inode);
+ isize = i_size_read(&inode->v);
if (offset >= isize)
return -ENXIO;
- for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
- POS(inode->i_ino, offset >> 9), 0, k) {
- if (k.k->p.inode != inode->i_ino) {
- break;
- } else if (bkey_extent_is_data(k.k)) {
+ bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
+
+ ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents,
+ SPOS(inode->v.i_ino, offset >> 9, snapshot),
+ POS(inode->v.i_ino, U64_MAX),
+ 0, k, ret) {
+ if (bkey_extent_is_data(k.k)) {
next_data = max(offset, bkey_start_offset(k.k) << 9);
break;
} else if (k.k->p.offset >> 9 > isize)
break;
}
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
- ret = bch2_btree_iter_unlock(&iter);
+ bch2_trans_exit(&trans);
if (ret)
return ret;
if (next_data > offset)
- next_data = bch2_next_pagecache_data(inode, offset, next_data);
+ next_data = bch2_seek_pagecache_data(&inode->v,
+ offset, next_data);
- if (next_data > isize)
+ if (next_data >= isize)
return -ENXIO;
return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
}
-static bool page_slot_is_data(struct address_space *mapping, pgoff_t index)
+static bool folio_hole_offset(struct address_space *mapping, loff_t *offset)
{
- struct page *page;
- bool ret;
+ struct folio *folio;
+ struct bch_folio *s;
+ unsigned i, sectors, f_offset;
+ bool ret = true;
- page = find_lock_entry(mapping, index);
- if (!page || radix_tree_exception(page))
- return false;
+ folio = filemap_lock_folio(mapping, *offset >> PAGE_SHIFT);
+ if (!folio)
+ return true;
+
+ s = bch2_folio(folio);
+ if (!s)
+ goto unlock;
- ret = page_is_data(page);
- unlock_page(page);
+ sectors = folio_sectors(folio);
+ f_offset = *offset - folio_pos(folio);
+ for (i = f_offset >> 9; i < sectors; i++)
+ if (s->s[i].state < SECTOR_dirty) {
+ *offset = max(*offset, folio_pos(folio) + (i << 9));
+ goto unlock;
+ }
+
+ *offset = folio_end_pos(folio);
+ ret = false;
+unlock:
+ folio_unlock(folio);
return ret;
}
-static loff_t bch2_next_pagecache_hole(struct inode *inode,
- loff_t start_offset,
- loff_t end_offset)
+static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
+ loff_t start_offset,
+ loff_t end_offset)
{
- struct address_space *mapping = inode->i_mapping;
- pgoff_t index;
+ struct address_space *mapping = vinode->i_mapping;
+ loff_t offset = start_offset;
- for (index = start_offset >> PAGE_SHIFT;
- index < end_offset >> PAGE_SHIFT;
- index++)
- if (!page_slot_is_data(mapping, index))
- end_offset = max(start_offset,
- ((loff_t) index) << PAGE_SHIFT);
+ while (offset < end_offset &&
+ !folio_hole_offset(mapping, &offset))
+ ;
- return end_offset;
+ return min(offset, end_offset);
}
static loff_t bch2_seek_hole(struct file *file, u64 offset)
{
- struct inode *inode = file->f_mapping->host;
- struct bch_fs *c = inode->i_sb->s_fs_info;
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
+ subvol_inum inum = inode_inum(inode);
u64 isize, next_hole = MAX_LFS_FILESIZE;
+ u32 snapshot;
int ret;
- isize = i_size_read(inode);
+ isize = i_size_read(&inode->v);
if (offset >= isize)
return -ENXIO;
- for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
- POS(inode->i_ino, offset >> 9),
- BTREE_ITER_WITH_HOLES, k) {
- if (k.k->p.inode != inode->i_ino) {
- next_hole = bch2_next_pagecache_hole(inode,
+ bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
+
+ ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
+ SPOS(inode->v.i_ino, offset >> 9, snapshot),
+ BTREE_ITER_SLOTS, k, ret) {
+ if (k.k->p.inode != inode->v.i_ino) {
+ next_hole = bch2_seek_pagecache_hole(&inode->v,
offset, MAX_LFS_FILESIZE);
break;
} else if (!bkey_extent_is_data(k.k)) {
- next_hole = bch2_next_pagecache_hole(inode,
+ next_hole = bch2_seek_pagecache_hole(&inode->v,
max(offset, bkey_start_offset(k.k) << 9),
k.k->p.offset << 9);
offset = max(offset, bkey_start_offset(k.k) << 9);
}
}
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
- ret = bch2_btree_iter_unlock(&iter);
+ bch2_trans_exit(&trans);
if (ret)
return ret;
loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
{
+ loff_t ret;
+
switch (whence) {
case SEEK_SET:
case SEEK_CUR:
case SEEK_END:
- return generic_file_llseek(file, offset, whence);
+ ret = generic_file_llseek(file, offset, whence);
+ break;
case SEEK_DATA:
- return bch2_seek_data(file, offset);
+ ret = bch2_seek_data(file, offset);
+ break;
case SEEK_HOLE:
- return bch2_seek_hole(file, offset);
+ ret = bch2_seek_hole(file, offset);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
- return -EINVAL;
+ return bch2_err_class(ret);
+}
+
+void bch2_fs_fsio_exit(struct bch_fs *c)
+{
+ bioset_exit(&c->nocow_flush_bioset);
+ bioset_exit(&c->dio_write_bioset);
+ bioset_exit(&c->dio_read_bioset);
+ bioset_exit(&c->writepage_bioset);
+}
+
+int bch2_fs_fsio_init(struct bch_fs *c)
+{
+ int ret = 0;
+
+ pr_verbose_init(c->opts, "");
+
+ if (bioset_init(&c->writepage_bioset,
+ 4, offsetof(struct bch_writepage_io, op.wbio.bio),
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_writepage_bioset_init;
+
+ if (bioset_init(&c->dio_read_bioset,
+ 4, offsetof(struct dio_read, rbio.bio),
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_dio_read_bioset_init;
+
+ if (bioset_init(&c->dio_write_bioset,
+ 4, offsetof(struct dio_write, op.wbio.bio),
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_dio_write_bioset_init;
+
+ if (bioset_init(&c->nocow_flush_bioset,
+ 1, offsetof(struct nocow_flush, bio), 0))
+ return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
+
+ pr_verbose_init(c->opts, "ret %i", ret);
+ return ret;
}
+
+#endif /* NO_BCACHEFS_FS */