X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Ffs-io.c;h=1eb69ed38b10bd2cbd56695f2f106bc40d91d976;hb=d2a118d921dfdf43adfa37aed1d9df62925bda66;hp=298e3592ed6c8c707577094701a5fe95d12ff28d;hpb=ea83a3985d28372d56ec7cea6e73907551869f63;p=bcachefs-tools-debian diff --git a/libbcachefs/fs-io.c b/libbcachefs/fs-io.c index 298e359..1eb69ed 100644 --- a/libbcachefs/fs-io.c +++ b/libbcachefs/fs-io.c @@ -1,10 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 #ifndef NO_BCACHEFS_FS #include "bcachefs.h" +#include "alloc_foreground.h" +#include "bkey_on_stack.h" #include "btree_update.h" #include "buckets.h" #include "clock.h" #include "error.h" +#include "extents.h" +#include "extent_update.h" #include "fs.h" #include "fs-io.h" #include "fsck.h" @@ -12,6 +17,8 @@ #include "journal.h" #include "io.h" #include "keylist.h" +#include "quota.h" +#include "reflink.h" #include #include @@ -19,6 +26,8 @@ #include #include #include +#include +#include #include #include #include @@ -26,59 +35,33 @@ #include #include -struct i_sectors_hook { - struct extent_insert_hook hook; - s64 sectors; - struct bch_inode_info *inode; -}; - -struct bchfs_write_op { - struct bch_inode_info *inode; - s64 sectors_added; - bool is_dio; - bool unalloc; - u64 new_i_size; - - /* must be last: */ - struct bch_write_op op; +struct quota_res { + u64 sectors; }; -static inline void bch2_fswrite_op_init(struct bchfs_write_op *op, - struct bch_inode_info *inode, - bool is_dio) -{ - op->inode = inode; - op->sectors_added = 0; - op->is_dio = is_dio; - op->unalloc = false; - op->new_i_size = U64_MAX; -} - struct bch_writepage_io { struct closure cl; + struct bch_inode_info *inode; /* must be last: */ - struct bchfs_write_op op; + struct bch_write_op op; }; struct dio_write { - struct closure cl; + struct completion done; struct kiocb *req; - struct bch_fs *c; - long written; - long error; - loff_t offset; - - struct disk_reservation res; + struct mm_struct *mm; + unsigned loop:1, + sync:1, + free_iov:1; + struct quota_res quota_res; + u64 written; - struct iovec *iovec; - struct iovec inline_vecs[UIO_FASTIOV]; struct iov_iter iter; - - struct task_struct *task; + struct iovec inline_vecs[2]; /* must be last: */ - struct bchfs_write_op iop; + struct bch_write_op op; }; struct dio_read { @@ -118,513 +101,514 @@ static int write_invalidate_inode_pages_range(struct address_space *mapping, return ret; } -/* i_size updates: */ +/* quotas */ -static int inode_set_size(struct bch_inode_info *inode, - struct bch_inode_unpacked *bi, - void *p) -{ - loff_t *new_i_size = p; +#ifdef CONFIG_BCACHEFS_QUOTA - lockdep_assert_held(&inode->ei_update_lock); +static void bch2_quota_reservation_put(struct bch_fs *c, + struct bch_inode_info *inode, + struct quota_res *res) +{ + if (!res->sectors) + return; - bi->bi_size = *new_i_size; + mutex_lock(&inode->ei_quota_lock); + BUG_ON(res->sectors > inode->ei_quota_reserved); - if (atomic_long_read(&inode->ei_size_dirty_count)) - bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY; - else - bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY; + bch2_quota_acct(c, inode->ei_qid, Q_SPC, + -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC); + inode->ei_quota_reserved -= res->sectors; + mutex_unlock(&inode->ei_quota_lock); - return 0; + res->sectors = 0; } -static int __must_check bch2_write_inode_size(struct bch_fs *c, - struct bch_inode_info *inode, - loff_t new_size) +static int bch2_quota_reservation_add(struct bch_fs *c, + struct bch_inode_info *inode, + struct quota_res *res, + unsigned sectors, + bool check_enospc) { - return __bch2_write_inode(c, inode, inode_set_size, &new_size); -} + int ret; -static inline void i_size_dirty_put(struct bch_inode_info *inode) -{ - atomic_long_dec_bug(&inode->ei_size_dirty_count); + mutex_lock(&inode->ei_quota_lock); + ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, + check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK); + if (likely(!ret)) { + inode->ei_quota_reserved += sectors; + res->sectors += sectors; + } + mutex_unlock(&inode->ei_quota_lock); + + return ret; } -static inline void i_size_dirty_get(struct bch_inode_info *inode) -{ - lockdep_assert_held(&inode->v.i_rwsem); +#else - atomic_long_inc(&inode->ei_size_dirty_count); +static void bch2_quota_reservation_put(struct bch_fs *c, + struct bch_inode_info *inode, + struct quota_res *res) +{ } -/* i_sectors accounting: */ - -static enum btree_insert_ret -i_sectors_hook_fn(struct extent_insert_hook *hook, - struct bpos committed_pos, - struct bpos next_pos, - struct bkey_s_c k, - const struct bkey_i *insert) +static int bch2_quota_reservation_add(struct bch_fs *c, + struct bch_inode_info *inode, + struct quota_res *res, + unsigned sectors, + bool check_enospc) { - struct i_sectors_hook *h = container_of(hook, - struct i_sectors_hook, hook); - s64 sectors = next_pos.offset - committed_pos.offset; - int sign = bkey_extent_is_allocation(&insert->k) - - (k.k && bkey_extent_is_allocation(k.k)); + return 0; +} - EBUG_ON(!(h->inode->ei_flags & BCH_INODE_I_SECTORS_DIRTY)); - EBUG_ON(!atomic_long_read(&h->inode->ei_sectors_dirty_count)); +#endif - h->sectors += sectors * sign; +/* i_size updates: */ - return BTREE_INSERT_OK; -} +struct inode_new_size { + loff_t new_size; + u64 now; + unsigned fields; +}; -static int inode_set_i_sectors_dirty(struct bch_inode_info *inode, - struct bch_inode_unpacked *bi, void *p) +static int inode_set_size(struct bch_inode_info *inode, + struct bch_inode_unpacked *bi, + void *p) { - BUG_ON(bi->bi_flags & BCH_INODE_I_SECTORS_DIRTY); + struct inode_new_size *s = p; + + bi->bi_size = s->new_size; + if (s->fields & ATTR_ATIME) + bi->bi_atime = s->now; + if (s->fields & ATTR_MTIME) + bi->bi_mtime = s->now; + if (s->fields & ATTR_CTIME) + bi->bi_ctime = s->now; - bi->bi_flags |= BCH_INODE_I_SECTORS_DIRTY; return 0; } -static int inode_clear_i_sectors_dirty(struct bch_inode_info *inode, - struct bch_inode_unpacked *bi, - void *p) +int __must_check bch2_write_inode_size(struct bch_fs *c, + struct bch_inode_info *inode, + loff_t new_size, unsigned fields) { - BUG_ON(!(bi->bi_flags & BCH_INODE_I_SECTORS_DIRTY)); + struct inode_new_size s = { + .new_size = new_size, + .now = bch2_current_time(c), + .fields = fields, + }; - bi->bi_sectors = atomic64_read(&inode->ei_sectors); - bi->bi_flags &= ~BCH_INODE_I_SECTORS_DIRTY; - return 0; + return bch2_write_inode(c, inode, inode_set_size, &s, fields); } -static void i_sectors_dirty_put(struct bch_fs *c, - struct bch_inode_info *inode, - struct i_sectors_hook *h) +static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode, + struct quota_res *quota_res, s64 sectors) { - if (h->sectors) { - spin_lock(&inode->v.i_lock); - inode->v.i_blocks += h->sectors; - spin_unlock(&inode->v.i_lock); + if (!sectors) + return; + + mutex_lock(&inode->ei_quota_lock); +#ifdef CONFIG_BCACHEFS_QUOTA + if (quota_res && sectors > 0) { + BUG_ON(sectors > quota_res->sectors); + BUG_ON(sectors > inode->ei_quota_reserved); - atomic64_add(h->sectors, &inode->ei_sectors); - EBUG_ON(atomic64_read(&inode->ei_sectors) < 0); + quota_res->sectors -= sectors; + inode->ei_quota_reserved -= sectors; + } else { + bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN); } +#endif + inode->v.i_blocks += sectors; + mutex_unlock(&inode->ei_quota_lock); +} - EBUG_ON(atomic_long_read(&inode->ei_sectors_dirty_count) <= 0); +/* page state: */ - mutex_lock(&inode->ei_update_lock); +/* stored in page->private: */ - if (atomic_long_dec_and_test(&inode->ei_sectors_dirty_count)) { - int ret = __bch2_write_inode(c, inode, - inode_clear_i_sectors_dirty, NULL); +struct bch_page_sector { + /* Uncompressed, fully allocated replicas: */ + unsigned nr_replicas:3; - ret = ret; - } + /* Owns PAGE_SECTORS * replicas_reserved sized reservation: */ + unsigned replicas_reserved:3; - mutex_unlock(&inode->ei_update_lock); -} + /* i_sectors: */ + enum { + SECTOR_UNALLOCATED, + SECTOR_RESERVED, + SECTOR_DIRTY, + SECTOR_ALLOCATED, + } state:2; +}; -static int __must_check i_sectors_dirty_get(struct bch_fs *c, - struct bch_inode_info *inode, - struct i_sectors_hook *h) +struct bch_page_state { + spinlock_t lock; + atomic_t write_count; + struct bch_page_sector s[PAGE_SECTORS]; +}; + +static inline struct bch_page_state *__bch2_page_state(struct page *page) { - int ret = 0; + return page_has_private(page) + ? (struct bch_page_state *) page_private(page) + : NULL; +} - h->hook.fn = i_sectors_hook_fn; - h->sectors = 0; -#ifdef CONFIG_BCACHEFS_DEBUG - h->inode = inode; -#endif +static inline struct bch_page_state *bch2_page_state(struct page *page) +{ + EBUG_ON(!PageLocked(page)); - if (atomic_long_inc_not_zero(&inode->ei_sectors_dirty_count)) - return 0; + return __bch2_page_state(page); +} - mutex_lock(&inode->ei_update_lock); +/* for newly allocated pages: */ +static void __bch2_page_state_release(struct page *page) +{ + kfree(detach_page_private(page)); +} - if (!(inode->ei_flags & BCH_INODE_I_SECTORS_DIRTY)) - ret = __bch2_write_inode(c, inode, inode_set_i_sectors_dirty, - NULL); +static void bch2_page_state_release(struct page *page) +{ + EBUG_ON(!PageLocked(page)); + __bch2_page_state_release(page); +} - if (!ret) - atomic_long_inc(&inode->ei_sectors_dirty_count); +/* for newly allocated pages: */ +static struct bch_page_state *__bch2_page_state_create(struct page *page, + gfp_t gfp) +{ + struct bch_page_state *s; - mutex_unlock(&inode->ei_update_lock); + s = kzalloc(sizeof(*s), GFP_NOFS|gfp); + if (!s) + return NULL; - return ret; + spin_lock_init(&s->lock); + attach_page_private(page, s); + return s; } -struct bchfs_extent_trans_hook { - struct bchfs_write_op *op; - struct extent_insert_hook hook; +static struct bch_page_state *bch2_page_state_create(struct page *page, + gfp_t gfp) +{ + return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp); +} - struct bch_inode_unpacked inode_u; - struct bkey_inode_buf inode_p; +static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode) +{ + /* XXX: this should not be open coded */ + return inode->ei_inode.bi_data_replicas + ? inode->ei_inode.bi_data_replicas - 1 + : c->opts.data_replicas; +} - bool need_inode_update; -}; +static inline unsigned sectors_to_reserve(struct bch_page_sector *s, + unsigned nr_replicas) +{ + return max(0, (int) nr_replicas - + s->nr_replicas - + s->replicas_reserved); +} -static enum btree_insert_ret -bchfs_extent_update_hook(struct extent_insert_hook *hook, - struct bpos committed_pos, - struct bpos next_pos, - struct bkey_s_c k, - const struct bkey_i *insert) -{ - struct bchfs_extent_trans_hook *h = container_of(hook, - struct bchfs_extent_trans_hook, hook); - struct bch_inode_info *inode = h->op->inode; - int sign = bkey_extent_is_allocation(&insert->k) - - (k.k && bkey_extent_is_allocation(k.k)); - s64 sectors = (s64) (next_pos.offset - committed_pos.offset) * sign; - u64 offset = min(next_pos.offset << 9, h->op->new_i_size); - bool do_pack = false; - - if (h->op->unalloc && - !bch2_extent_is_fully_allocated(k)) - return BTREE_INSERT_ENOSPC; - - BUG_ON((next_pos.offset << 9) > round_up(offset, PAGE_SIZE)); - - /* XXX: inode->i_size locking */ - if (offset > inode->ei_size) { - BUG_ON(inode->ei_flags & BCH_INODE_I_SIZE_DIRTY); - - if (!h->need_inode_update) { - h->need_inode_update = true; - return BTREE_INSERT_NEED_TRAVERSE; - } +static int bch2_get_page_disk_reservation(struct bch_fs *c, + struct bch_inode_info *inode, + struct page *page, bool check_enospc) +{ + struct bch_page_state *s = bch2_page_state_create(page, 0); + unsigned nr_replicas = inode_nr_replicas(c, inode); + struct disk_reservation disk_res = { 0 }; + unsigned i, disk_res_sectors = 0; + int ret; - h->inode_u.bi_size = offset; - do_pack = true; + if (!s) + return -ENOMEM; - inode->ei_size = offset; + for (i = 0; i < ARRAY_SIZE(s->s); i++) + disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas); - if (h->op->is_dio) - i_size_write(&inode->v, offset); - } + if (!disk_res_sectors) + return 0; - if (sectors) { - if (!h->need_inode_update) { - h->need_inode_update = true; - return BTREE_INSERT_NEED_TRAVERSE; - } + ret = bch2_disk_reservation_get(c, &disk_res, + disk_res_sectors, 1, + !check_enospc + ? BCH_DISK_RESERVATION_NOFAIL + : 0); + if (unlikely(ret)) + return ret; - h->inode_u.bi_sectors += sectors; - do_pack = true; + for (i = 0; i < ARRAY_SIZE(s->s); i++) + s->s[i].replicas_reserved += + sectors_to_reserve(&s->s[i], nr_replicas); - atomic64_add(sectors, &inode->ei_sectors); + return 0; +} - h->op->sectors_added += sectors; +struct bch2_page_reservation { + struct disk_reservation disk; + struct quota_res quota; +}; - if (h->op->is_dio) { - spin_lock(&inode->v.i_lock); - inode->v.i_blocks += sectors; - spin_unlock(&inode->v.i_lock); - } - } +static void bch2_page_reservation_init(struct bch_fs *c, + struct bch_inode_info *inode, + struct bch2_page_reservation *res) +{ + memset(res, 0, sizeof(*res)); - if (do_pack) - bch2_inode_pack(&h->inode_p, &h->inode_u); + res->disk.nr_replicas = inode_nr_replicas(c, inode); +} - return BTREE_INSERT_OK; +static void bch2_page_reservation_put(struct bch_fs *c, + struct bch_inode_info *inode, + struct bch2_page_reservation *res) +{ + bch2_disk_reservation_put(c, &res->disk); + bch2_quota_reservation_put(c, inode, &res->quota); } -static int bchfs_write_index_update(struct bch_write_op *wop) +static int bch2_page_reservation_get(struct bch_fs *c, + struct bch_inode_info *inode, struct page *page, + struct bch2_page_reservation *res, + unsigned offset, unsigned len, bool check_enospc) { - struct bchfs_write_op *op = container_of(wop, - struct bchfs_write_op, op); - struct keylist *keys = &op->op.insert_keys; - struct btree_iter extent_iter, inode_iter; - struct bchfs_extent_trans_hook hook; - struct bkey_i *k = bch2_keylist_front(keys); + struct bch_page_state *s = bch2_page_state_create(page, 0); + unsigned i, disk_sectors = 0, quota_sectors = 0; int ret; - BUG_ON(k->k.p.inode != op->inode->v.i_ino); - - bch2_btree_iter_init(&extent_iter, wop->c, BTREE_ID_EXTENTS, - bkey_start_pos(&bch2_keylist_front(keys)->k), - BTREE_ITER_INTENT); - bch2_btree_iter_init(&inode_iter, wop->c, BTREE_ID_INODES, - POS(extent_iter.pos.inode, 0), - BTREE_ITER_INTENT); + if (!s) + return -ENOMEM; - hook.op = op; - hook.hook.fn = bchfs_extent_update_hook; - hook.need_inode_update = false; + for (i = round_down(offset, block_bytes(c)) >> 9; + i < round_up(offset + len, block_bytes(c)) >> 9; + i++) { + disk_sectors += sectors_to_reserve(&s->s[i], + res->disk.nr_replicas); + quota_sectors += s->s[i].state == SECTOR_UNALLOCATED; + } - do { - ret = bch2_btree_iter_traverse(&extent_iter); - if (ret) - goto err; + if (disk_sectors) { + ret = bch2_disk_reservation_add(c, &res->disk, + disk_sectors, + !check_enospc + ? BCH_DISK_RESERVATION_NOFAIL + : 0); + if (unlikely(ret)) + return ret; + } - /* XXX: inode->i_size locking */ - k = bch2_keylist_front(keys); - if (min(k->k.p.offset << 9, op->new_i_size) > op->inode->ei_size) - hook.need_inode_update = true; + if (quota_sectors) { + ret = bch2_quota_reservation_add(c, inode, &res->quota, + quota_sectors, + check_enospc); + if (unlikely(ret)) { + struct disk_reservation tmp = { + .sectors = disk_sectors + }; - if (hook.need_inode_update) { - struct bkey_s_c inode; + bch2_disk_reservation_put(c, &tmp); + res->disk.sectors -= disk_sectors; + return ret; + } + } - if (!btree_iter_linked(&inode_iter)) - bch2_btree_iter_link(&extent_iter, &inode_iter); + return 0; +} - inode = bch2_btree_iter_peek_with_holes(&inode_iter); - if ((ret = btree_iter_err(inode))) - goto err; +static void bch2_clear_page_bits(struct page *page) +{ + struct bch_inode_info *inode = to_bch_ei(page->mapping->host); + struct bch_fs *c = inode->v.i_sb->s_fs_info; + struct bch_page_state *s = bch2_page_state(page); + struct disk_reservation disk_res = { 0 }; + int i, dirty_sectors = 0; - if (WARN_ONCE(inode.k->type != BCH_INODE_FS, - "inode %llu not found when updating", - extent_iter.pos.inode)) { - ret = -ENOENT; - break; - } + if (!s) + return; - if (WARN_ONCE(bkey_bytes(inode.k) > - sizeof(hook.inode_p), - "inode %llu too big (%zu bytes, buf %zu)", - extent_iter.pos.inode, - bkey_bytes(inode.k), - sizeof(hook.inode_p))) { - ret = -ENOENT; - break; - } + EBUG_ON(!PageLocked(page)); + EBUG_ON(PageWriteback(page)); - bkey_reassemble(&hook.inode_p.inode.k_i, inode); - ret = bch2_inode_unpack(bkey_s_c_to_inode(inode), - &hook.inode_u); - if (WARN_ONCE(ret, - "error %i unpacking inode %llu", - ret, extent_iter.pos.inode)) { - ret = -ENOENT; - break; - } + for (i = 0; i < ARRAY_SIZE(s->s); i++) { + disk_res.sectors += s->s[i].replicas_reserved; + s->s[i].replicas_reserved = 0; - ret = bch2_btree_insert_at(wop->c, &wop->res, - &hook.hook, op_journal_seq(wop), - BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC, - BTREE_INSERT_ENTRY(&extent_iter, k), - BTREE_INSERT_ENTRY_EXTRA_RES(&inode_iter, - &hook.inode_p.inode.k_i, 2)); - } else { - ret = bch2_btree_insert_at(wop->c, &wop->res, - &hook.hook, op_journal_seq(wop), - BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC, - BTREE_INSERT_ENTRY(&extent_iter, k)); + if (s->s[i].state == SECTOR_DIRTY) { + dirty_sectors++; + s->s[i].state = SECTOR_UNALLOCATED; } + } - BUG_ON(bkey_cmp(extent_iter.pos, bkey_start_pos(&k->k))); - BUG_ON(!ret != !k->k.size); -err: - if (ret == -EINTR) - continue; - if (ret) - break; - - BUG_ON(bkey_cmp(extent_iter.pos, k->k.p) < 0); - bch2_keylist_pop_front(keys); - } while (!bch2_keylist_empty(keys)); + bch2_disk_reservation_put(c, &disk_res); - bch2_btree_iter_unlock(&extent_iter); - bch2_btree_iter_unlock(&inode_iter); + if (dirty_sectors) + i_sectors_acct(c, inode, NULL, -dirty_sectors); - return ret; + bch2_page_state_release(page); } -/* page state: */ - -/* stored in page->private: */ - -/* - * bch_page_state has to (unfortunately) be manipulated with cmpxchg - we could - * almost protected it with the page lock, except that bch2_writepage_io_done has - * to update the sector counts (and from interrupt/bottom half context). - */ -struct bch_page_state { -union { struct { - /* - * page is _fully_ written on disk, and not compressed - which means to - * write this page we don't have to reserve space (the new write will - * never take up more space on disk than what it's overwriting) - */ - unsigned allocated:1; +static void bch2_set_page_dirty(struct bch_fs *c, + struct bch_inode_info *inode, struct page *page, + struct bch2_page_reservation *res, + unsigned offset, unsigned len) +{ + struct bch_page_state *s = bch2_page_state(page); + unsigned i, dirty_sectors = 0; - /* Owns PAGE_SECTORS sized reservation: */ - unsigned reserved:1; - unsigned nr_replicas:4; + WARN_ON((u64) page_offset(page) + offset + len > + round_up((u64) i_size_read(&inode->v), block_bytes(c))); - /* - * Number of sectors on disk - for i_blocks - * Uncompressed size, not compressed size: - */ - u8 sectors; - u8 dirty_sectors; -}; - /* for cmpxchg: */ - unsigned long v; -}; -}; + spin_lock(&s->lock); -#define page_state_cmpxchg(_ptr, _new, _expr) \ -({ \ - unsigned long _v = READ_ONCE((_ptr)->v); \ - struct bch_page_state _old; \ - \ - do { \ - _old.v = _new.v = _v; \ - _expr; \ - \ - EBUG_ON(_new.sectors + _new.dirty_sectors > PAGE_SECTORS);\ - } while (_old.v != _new.v && \ - (_v = cmpxchg(&(_ptr)->v, _old.v, _new.v)) != _old.v); \ - \ - _old; \ -}) + for (i = round_down(offset, block_bytes(c)) >> 9; + i < round_up(offset + len, block_bytes(c)) >> 9; + i++) { + unsigned sectors = sectors_to_reserve(&s->s[i], + res->disk.nr_replicas); -static inline struct bch_page_state *page_state(struct page *page) -{ - struct bch_page_state *s = (void *) &page->private; + /* + * This can happen if we race with the error path in + * bch2_writepage_io_done(): + */ + sectors = min_t(unsigned, sectors, res->disk.sectors); - BUILD_BUG_ON(sizeof(*s) > sizeof(page->private)); + s->s[i].replicas_reserved += sectors; + res->disk.sectors -= sectors; - if (!PagePrivate(page)) - SetPagePrivate(page); + if (s->s[i].state == SECTOR_UNALLOCATED) + dirty_sectors++; - return s; -} + s->s[i].state = max_t(unsigned, s->s[i].state, SECTOR_DIRTY); + } -static void bch2_put_page_reservation(struct bch_fs *c, struct page *page) -{ - struct disk_reservation res = { .sectors = PAGE_SECTORS }; - struct bch_page_state s; + spin_unlock(&s->lock); - s = page_state_cmpxchg(page_state(page), s, { - if (!s.reserved) - return; - s.reserved = 0; - }); + if (dirty_sectors) + i_sectors_acct(c, inode, &res->quota, dirty_sectors); - bch2_disk_reservation_put(c, &res); + if (!PageDirty(page)) + __set_page_dirty_nobuffers(page); } -static int bch2_get_page_reservation(struct bch_fs *c, struct page *page, - bool check_enospc) +vm_fault_t bch2_page_fault(struct vm_fault *vmf) { - struct bch_page_state *s = page_state(page), new; - struct disk_reservation res; - int ret = 0; - - BUG_ON(s->allocated && s->sectors != PAGE_SECTORS); - - if (s->allocated || s->reserved) - return 0; - - ret = bch2_disk_reservation_get(c, &res, PAGE_SECTORS, !check_enospc - ? BCH_DISK_RESERVATION_NOFAIL : 0); - if (ret) - return ret; + struct file *file = vmf->vma->vm_file; + struct bch_inode_info *inode = file_bch_inode(file); + int ret; - page_state_cmpxchg(s, new, { - if (new.reserved) { - bch2_disk_reservation_put(c, &res); - return 0; - } - new.reserved = 1; - new.nr_replicas = res.nr_replicas; - }); + bch2_pagecache_add_get(&inode->ei_pagecache_lock); + ret = filemap_fault(vmf); + bch2_pagecache_add_put(&inode->ei_pagecache_lock); - return 0; + return ret; } -static void bch2_clear_page_bits(struct page *page) +vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf) { - struct bch_inode_info *inode = to_bch_ei(page->mapping->host); + struct page *page = vmf->page; + struct file *file = vmf->vma->vm_file; + struct bch_inode_info *inode = file_bch_inode(file); + struct address_space *mapping = file->f_mapping; struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct disk_reservation res = { .sectors = PAGE_SECTORS }; - struct bch_page_state s; - - if (!PagePrivate(page)) - return; + struct bch2_page_reservation res; + unsigned len; + loff_t isize; + int ret = VM_FAULT_LOCKED; - s = xchg(page_state(page), (struct bch_page_state) { .v = 0 }); - ClearPagePrivate(page); + bch2_page_reservation_init(c, inode, &res); - if (s.dirty_sectors) { - spin_lock(&inode->v.i_lock); - inode->v.i_blocks -= s.dirty_sectors; - spin_unlock(&inode->v.i_lock); - } + sb_start_pagefault(inode->v.i_sb); + file_update_time(file); - if (s.reserved) - bch2_disk_reservation_put(c, &res); -} + /* + * Not strictly necessary, but helps avoid dio writes livelocking in + * write_invalidate_inode_pages_range() - can drop this if/when we get + * a write_invalidate_inode_pages_range() that works without dropping + * page lock before invalidating page + */ + bch2_pagecache_add_get(&inode->ei_pagecache_lock); -int bch2_set_page_dirty(struct page *page) -{ - struct bch_page_state old, new; + lock_page(page); + isize = i_size_read(&inode->v); - old = page_state_cmpxchg(page_state(page), new, - new.dirty_sectors = PAGE_SECTORS - new.sectors; - ); + if (page->mapping != mapping || page_offset(page) >= isize) { + unlock_page(page); + ret = VM_FAULT_NOPAGE; + goto out; + } - if (old.dirty_sectors != new.dirty_sectors) { - struct bch_inode_info *inode = to_bch_ei(page->mapping->host); + len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page)); - spin_lock(&inode->v.i_lock); - inode->v.i_blocks += new.dirty_sectors - old.dirty_sectors; - spin_unlock(&inode->v.i_lock); + if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) { + unlock_page(page); + ret = VM_FAULT_SIGBUS; + goto out; } - return __set_page_dirty_nobuffers(page); -} + bch2_set_page_dirty(c, inode, page, &res, 0, len); + bch2_page_reservation_put(c, inode, &res); + + wait_for_stable_page(page); +out: + bch2_pagecache_add_put(&inode->ei_pagecache_lock); + sb_end_pagefault(inode->v.i_sb); -/* readpages/writepages: */ + return ret; +} -static bool bio_can_add_page_contig(struct bio *bio, struct page *page) +void bch2_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { - sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT; + if (offset || length < PAGE_SIZE) + return; - return bio->bi_vcnt < bio->bi_max_vecs && - bio_end_sector(bio) == offset; + bch2_clear_page_bits(page); } -static void __bio_add_page(struct bio *bio, struct page *page) +int bch2_releasepage(struct page *page, gfp_t gfp_mask) { - bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) { - .bv_page = page, - .bv_len = PAGE_SIZE, - .bv_offset = 0, - }; + if (PageDirty(page)) + return 0; - bio->bi_iter.bi_size += PAGE_SIZE; + bch2_clear_page_bits(page); + return 1; } -static int bio_add_page_contig(struct bio *bio, struct page *page) +#ifdef CONFIG_MIGRATION +int bch2_migrate_page(struct address_space *mapping, struct page *newpage, + struct page *page, enum migrate_mode mode) { - sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT; + int ret; + + EBUG_ON(!PageLocked(page)); + EBUG_ON(!PageLocked(newpage)); - BUG_ON(!bio->bi_max_vecs); + ret = migrate_page_move_mapping(mapping, newpage, page, 0); + if (ret != MIGRATEPAGE_SUCCESS) + return ret; - if (!bio->bi_vcnt) - bio->bi_iter.bi_sector = offset; - else if (!bio_can_add_page_contig(bio, page)) - return -1; + if (PagePrivate(page)) + attach_page_private(newpage, detach_page_private(page)); - __bio_add_page(bio, page); - return 0; + if (mode != MIGRATE_SYNC_NO_COPY) + migrate_page_copy(newpage, page); + else + migrate_page_states(newpage, page); + return MIGRATEPAGE_SUCCESS; } +#endif + +/* readpage(s): */ static void bch2_readpages_end_io(struct bio *bio) { + struct bvec_iter_all iter; struct bio_vec *bv; - int i; - bio_for_each_segment_all(bv, bio, i) { + bio_for_each_segment_all(bv, bio, iter) { struct page *page = bv->bv_page; - if (!bio->bi_error) { + if (!bio->bi_status) { SetPageUptodate(page); } else { ClearPageUptodate(page); @@ -638,360 +622,407 @@ static void bch2_readpages_end_io(struct bio *bio) struct readpages_iter { struct address_space *mapping; - struct list_head pages; + struct page **pages; unsigned nr_pages; + unsigned idx; + pgoff_t offset; }; -static int readpage_add_page(struct readpages_iter *iter, struct page *page) +static int readpages_iter_init(struct readpages_iter *iter, + struct readahead_control *ractl) { - struct bch_page_state *s = page_state(page); - int ret; - - BUG_ON(s->reserved); - s->allocated = 1; - s->sectors = 0; + unsigned i, nr_pages = readahead_count(ractl); - prefetchw(&page->flags); - ret = add_to_page_cache_lru(page, iter->mapping, - page->index, GFP_NOFS); - put_page(page); - return ret; -} + memset(iter, 0, sizeof(*iter)); -static inline struct page *readpage_iter_next(struct readpages_iter *iter) -{ - while (iter->nr_pages) { - struct page *page = - list_last_entry(&iter->pages, struct page, lru); + iter->mapping = ractl->mapping; + iter->offset = readahead_index(ractl); + iter->nr_pages = nr_pages; - prefetchw(&page->flags); - list_del(&page->lru); - iter->nr_pages--; + iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS); + if (!iter->pages) + return -ENOMEM; - if (!readpage_add_page(iter, page)) - return page; + __readahead_batch(ractl, iter->pages, nr_pages); + for (i = 0; i < nr_pages; i++) { + __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL); + put_page(iter->pages[i]); } - return NULL; + return 0; } -#define for_each_readpage_page(_iter, _page) \ - for (; \ - ((_page) = __readpage_next_page(&(_iter)));) \ - -static void bch2_mark_pages_unalloc(struct bio *bio) +static inline struct page *readpage_iter_next(struct readpages_iter *iter) { - struct bvec_iter iter; - struct bio_vec bv; + if (iter->idx >= iter->nr_pages) + return NULL; + + EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx); - bio_for_each_segment(bv, bio, iter) - page_state(bv.bv_page)->allocated = 0; + return iter->pages[iter->idx]; } static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k) { struct bvec_iter iter; struct bio_vec bv; + unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v + ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k); + unsigned state = k.k->type == KEY_TYPE_reservation + ? SECTOR_RESERVED + : SECTOR_ALLOCATED; bio_for_each_segment(bv, bio, iter) { - struct bch_page_state *s = page_state(bv.bv_page); - - /* sectors in @k from the start of this page: */ - unsigned k_sectors = k.k->size - (iter.bi_sector - k.k->p.offset); - - unsigned page_sectors = min(bv.bv_len >> 9, k_sectors); + struct bch_page_state *s = bch2_page_state(bv.bv_page); + unsigned i; + + for (i = bv.bv_offset >> 9; + i < (bv.bv_offset + bv.bv_len) >> 9; + i++) { + s->s[i].nr_replicas = nr_ptrs; + s->s[i].state = state; + } + } +} - if (!s->sectors) - s->nr_replicas = bch2_extent_nr_dirty_ptrs(k); - else - s->nr_replicas = min_t(unsigned, s->nr_replicas, - bch2_extent_nr_dirty_ptrs(k)); +static bool extent_partial_reads_expensive(struct bkey_s_c k) +{ + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + struct bch_extent_crc_unpacked crc; + const union bch_extent_entry *i; - BUG_ON(s->sectors + page_sectors > PAGE_SECTORS); - s->sectors += page_sectors; - } + bkey_for_each_crc(k.k, ptrs, crc, i) + if (crc.csum_type || crc.compression_type) + return true; + return false; } static void readpage_bio_extend(struct readpages_iter *iter, - struct bio *bio, u64 offset, + struct bio *bio, + unsigned sectors_this_extent, bool get_more) { - struct page *page; - pgoff_t page_offset; - int ret; - - while (bio_end_sector(bio) < offset && + while (bio_sectors(bio) < sectors_this_extent && bio->bi_vcnt < bio->bi_max_vecs) { - page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT; + pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT; + struct page *page = readpage_iter_next(iter); + int ret; - if (iter->nr_pages) { - page = list_last_entry(&iter->pages, struct page, lru); - if (page->index != page_offset) + if (page) { + if (iter->offset + iter->idx != page_offset) break; - list_del(&page->lru); - iter->nr_pages--; - } else if (get_more) { - rcu_read_lock(); - page = radix_tree_lookup(&iter->mapping->page_tree, page_offset); - rcu_read_unlock(); + iter->idx++; + } else { + if (!get_more) + break; - if (page && !radix_tree_exceptional_entry(page)) + page = xa_load(&iter->mapping->i_pages, page_offset); + if (page && !xa_is_value(page)) break; page = __page_cache_alloc(readahead_gfp_mask(iter->mapping)); if (!page) break; - page->index = page_offset; - ClearPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page); - } else { - break; - } + if (!__bch2_page_state_create(page, 0)) { + put_page(page); + break; + } - ret = readpage_add_page(iter, page); - if (ret) - break; + ret = add_to_page_cache_lru(page, iter->mapping, + page_offset, GFP_NOFS); + if (ret) { + __bch2_page_state_release(page); + put_page(page); + break; + } - __bio_add_page(bio, page); - } + put_page(page); + } - if (!iter->nr_pages) - SetPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page); + BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0)); + } } -static void bchfs_read(struct bch_fs *c, struct btree_iter *iter, +static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter, struct bch_read_bio *rbio, u64 inum, struct readpages_iter *readpages_iter) { - struct bio *bio = &rbio->bio; + struct bch_fs *c = trans->c; + struct bkey_on_stack sk; int flags = BCH_READ_RETRY_IF_STALE| BCH_READ_MAY_PROMOTE; + int ret = 0; + rbio->c = c; + rbio->start_time = local_clock(); + + bkey_on_stack_init(&sk); +retry: while (1) { - struct extent_pick_ptr pick; - BKEY_PADDED(k) tmp; struct bkey_s_c k; - unsigned bytes; - bool is_last; + unsigned bytes, sectors, offset_into_extent; - bch2_btree_iter_set_pos(iter, POS(inum, bio->bi_iter.bi_sector)); + bch2_btree_iter_set_pos(iter, + POS(inum, rbio->bio.bi_iter.bi_sector)); - k = bch2_btree_iter_peek_with_holes(iter); - BUG_ON(!k.k); + k = bch2_btree_iter_peek_slot(iter); + ret = bkey_err(k); + if (ret) + break; - if (IS_ERR(k.k)) { - int ret = bch2_btree_iter_unlock(iter); - BUG_ON(!ret); - bcache_io_error(c, bio, "btree IO error %i", ret); - bio_endio(bio); - return; - } + offset_into_extent = iter->pos.offset - + bkey_start_offset(k.k); + sectors = k.k->size - offset_into_extent; - bkey_reassemble(&tmp.k, k); - bch2_btree_iter_unlock(iter); - k = bkey_i_to_s_c(&tmp.k); + bkey_on_stack_reassemble(&sk, c, k); - bch2_extent_pick_ptr(c, k, NULL, &pick); - if (IS_ERR(pick.ca)) { - bcache_io_error(c, bio, "no device to read from"); - bio_endio(bio); - return; - } + ret = bch2_read_indirect_extent(trans, + &offset_into_extent, &sk); + if (ret) + break; + + k = bkey_i_to_s_c(sk.k); + + sectors = min(sectors, k.k->size - offset_into_extent); + + bch2_trans_unlock(trans); if (readpages_iter) - readpage_bio_extend(readpages_iter, - bio, k.k->p.offset, - pick.ca && - (pick.crc.csum_type || - pick.crc.compression_type)); + readpage_bio_extend(readpages_iter, &rbio->bio, sectors, + extent_partial_reads_expensive(k)); - bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) - - bio->bi_iter.bi_sector) << 9; - is_last = bytes == bio->bi_iter.bi_size; - swap(bio->bi_iter.bi_size, bytes); + bytes = min(sectors, bio_sectors(&rbio->bio)) << 9; + swap(rbio->bio.bi_iter.bi_size, bytes); - if (bkey_extent_is_allocation(k.k)) - bch2_add_page_sectors(bio, k); + if (rbio->bio.bi_iter.bi_size == bytes) + flags |= BCH_READ_LAST_FRAGMENT; - if (!bch2_extent_is_fully_allocated(k)) - bch2_mark_pages_unalloc(bio); + if (bkey_extent_is_allocation(k.k)) + bch2_add_page_sectors(&rbio->bio, k); - if (pick.ca) { - if (!is_last) { - bio_inc_remaining(&rbio->bio); - flags |= BCH_READ_MUST_CLONE; - trace_read_split(&rbio->bio); - } + bch2_read_extent(trans, rbio, k, offset_into_extent, flags); - bch2_read_extent(c, rbio, bkey_s_c_to_extent(k), - &pick, flags); - } else { - zero_fill_bio(bio); + if (flags & BCH_READ_LAST_FRAGMENT) + break; - if (is_last) - bio_endio(bio); - } + swap(rbio->bio.bi_iter.bi_size, bytes); + bio_advance(&rbio->bio, bytes); + } - if (is_last) - return; + if (ret == -EINTR) + goto retry; - swap(bio->bi_iter.bi_size, bytes); - bio_advance(bio, bytes); + if (ret) { + bcache_io_error(c, &rbio->bio, "btree IO error %i", ret); + bio_endio(&rbio->bio); } + + bkey_on_stack_exit(&sk, c); } -int bch2_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +void bch2_readahead(struct readahead_control *ractl) { - struct bch_inode_info *inode = to_bch_ei(mapping->host); + struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host); struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct btree_iter iter; + struct bch_io_opts opts = io_opts(c, &inode->ei_inode); + struct btree_trans trans; + struct btree_iter *iter; struct page *page; - struct readpages_iter readpages_iter = { - .mapping = mapping, .nr_pages = nr_pages - }; + struct readpages_iter readpages_iter; + int ret; - bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0); + ret = readpages_iter_init(&readpages_iter, ractl); + BUG_ON(ret); - INIT_LIST_HEAD(&readpages_iter.pages); - list_add(&readpages_iter.pages, pages); - list_del_init(pages); + bch2_trans_init(&trans, c, 0, 0); - if (current->pagecache_lock != &mapping->add_lock) - pagecache_add_get(&mapping->add_lock); + iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN, + BTREE_ITER_SLOTS); - while ((page = readpage_iter_next(&readpages_iter))) { - unsigned n = max_t(unsigned, - min_t(unsigned, readpages_iter.nr_pages + 1, - BIO_MAX_PAGES), - c->sb.encoded_extent_max >> PAGE_SECTOR_SHIFT); + bch2_pagecache_add_get(&inode->ei_pagecache_lock); + while ((page = readpage_iter_next(&readpages_iter))) { + pgoff_t index = readpages_iter.offset + readpages_iter.idx; + unsigned n = min_t(unsigned, + readpages_iter.nr_pages - + readpages_iter.idx, + BIO_MAX_PAGES); struct bch_read_bio *rbio = - to_rbio(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read)); + rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read), + opts); + + readpages_iter.idx++; + bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0); + rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTOR_SHIFT; rbio->bio.bi_end_io = bch2_readpages_end_io; - bio_add_page_contig(&rbio->bio, page); - bchfs_read(c, &iter, rbio, inode->v.i_ino, &readpages_iter); + BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0)); + + bchfs_read(&trans, iter, rbio, inode->v.i_ino, + &readpages_iter); } - if (current->pagecache_lock != &mapping->add_lock) - pagecache_add_put(&mapping->add_lock); + bch2_pagecache_add_put(&inode->ei_pagecache_lock); - return 0; + bch2_trans_exit(&trans); + kfree(readpages_iter.pages); } static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio, u64 inum, struct page *page) { - struct btree_iter iter; - - /* - * Initialize page state: - * If a page is partly allocated and partly a hole, we want it to be - * marked BCH_PAGE_UNALLOCATED - so we initially mark all pages - * allocated and then mark them unallocated as we find holes: - * - * Note that the bio hasn't been split yet - it's the only bio that - * points to these pages. As we walk extents and split @bio, that - * necessarily be true, the splits won't necessarily be on page - * boundaries: - */ - struct bch_page_state *s = page_state(page); + struct btree_trans trans; + struct btree_iter *iter; - EBUG_ON(s->reserved); - s->allocated = 1; - s->sectors = 0; + bch2_page_state_create(page, __GFP_NOFAIL); bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC); - bio_add_page_contig(&rbio->bio, page); + rbio->bio.bi_iter.bi_sector = + (sector_t) page->index << PAGE_SECTOR_SHIFT; + BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0)); + + bch2_trans_init(&trans, c, 0, 0); + iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN, + BTREE_ITER_SLOTS); + + bchfs_read(&trans, iter, rbio, inum, NULL); - bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0); - bchfs_read(c, &iter, rbio, inum, NULL); + bch2_trans_exit(&trans); } int bch2_readpage(struct file *file, struct page *page) { struct bch_inode_info *inode = to_bch_ei(page->mapping->host); struct bch_fs *c = inode->v.i_sb->s_fs_info; + struct bch_io_opts opts = io_opts(c, &inode->ei_inode); struct bch_read_bio *rbio; - rbio = to_rbio(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read)); + rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts); rbio->bio.bi_end_io = bch2_readpages_end_io; __bchfs_readpage(c, rbio, inode->v.i_ino, page); return 0; } +static void bch2_read_single_page_end_io(struct bio *bio) +{ + complete(bio->bi_private); +} + +static int bch2_read_single_page(struct page *page, + struct address_space *mapping) +{ + struct bch_inode_info *inode = to_bch_ei(mapping->host); + struct bch_fs *c = inode->v.i_sb->s_fs_info; + struct bch_read_bio *rbio; + int ret; + DECLARE_COMPLETION_ONSTACK(done); + + rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), + io_opts(c, &inode->ei_inode)); + rbio->bio.bi_private = &done; + rbio->bio.bi_end_io = bch2_read_single_page_end_io; + + __bchfs_readpage(c, rbio, inode->v.i_ino, page); + wait_for_completion(&done); + + ret = blk_status_to_errno(rbio->bio.bi_status); + bio_put(&rbio->bio); + + if (ret < 0) + return ret; + + SetPageUptodate(page); + return 0; +} + +/* writepages: */ + struct bch_writepage_state { struct bch_writepage_io *io; + struct bch_io_opts opts; }; +static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c, + struct bch_inode_info *inode) +{ + return (struct bch_writepage_state) { + .opts = io_opts(c, &inode->ei_inode) + }; +} + static void bch2_writepage_io_free(struct closure *cl) { struct bch_writepage_io *io = container_of(cl, struct bch_writepage_io, cl); - bio_put(&io->op.op.wbio.bio); + bio_put(&io->op.wbio.bio); } static void bch2_writepage_io_done(struct closure *cl) { struct bch_writepage_io *io = container_of(cl, struct bch_writepage_io, cl); - struct bch_fs *c = io->op.op.c; - struct bio *bio = &io->op.op.wbio.bio; + struct bch_fs *c = io->op.c; + struct bio *bio = &io->op.wbio.bio; + struct bvec_iter_all iter; struct bio_vec *bvec; unsigned i; - atomic_sub(bio->bi_vcnt, &c->writeback_pages); - wake_up(&c->writeback_wait); + if (io->op.error) { + bio_for_each_segment_all(bvec, bio, iter) { + struct bch_page_state *s; - bio_for_each_segment_all(bvec, bio, i) { - struct page *page = bvec->bv_page; + SetPageError(bvec->bv_page); + mapping_set_error(bvec->bv_page->mapping, -EIO); - if (io->op.op.error) { - SetPageError(page); - if (page->mapping) - set_bit(AS_EIO, &page->mapping->flags); + s = __bch2_page_state(bvec->bv_page); + spin_lock(&s->lock); + for (i = 0; i < PAGE_SECTORS; i++) + s->s[i].nr_replicas = 0; + spin_unlock(&s->lock); } + } - if (io->op.op.written >= PAGE_SECTORS) { - struct bch_page_state old, new; - - old = page_state_cmpxchg(page_state(page), new, { - new.sectors = PAGE_SECTORS; - new.dirty_sectors = 0; - }); + if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) { + bio_for_each_segment_all(bvec, bio, iter) { + struct bch_page_state *s; - io->op.sectors_added -= old.dirty_sectors; - io->op.op.written -= PAGE_SECTORS; + s = __bch2_page_state(bvec->bv_page); + spin_lock(&s->lock); + for (i = 0; i < PAGE_SECTORS; i++) + s->s[i].nr_replicas = 0; + spin_unlock(&s->lock); } } /* * racing with fallocate can cause us to add fewer sectors than * expected - but we shouldn't add more sectors than expected: - * + */ + BUG_ON(io->op.i_sectors_delta > 0); + + /* * (error (due to going RO) halfway through a page can screw that up * slightly) + * XXX wtf? + BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS); */ - BUG_ON(io->op.sectors_added >= (s64) PAGE_SECTORS); /* * PageWriteback is effectively our ref on the inode - fixup i_blocks * before calling end_page_writeback: */ - if (io->op.sectors_added) { - struct bch_inode_info *inode = io->op.inode; + i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta); - spin_lock(&inode->v.i_lock); - inode->v.i_blocks += io->op.sectors_added; - spin_unlock(&inode->v.i_lock); - } + bio_for_each_segment_all(bvec, bio, iter) { + struct bch_page_state *s = __bch2_page_state(bvec->bv_page); - bio_for_each_segment_all(bvec, bio, i) - end_page_writeback(bvec->bv_page); + if (atomic_dec_and_test(&s->write_count)) + end_page_writeback(bvec->bv_page); + } closure_return_with_destructor(&io->cl, bch2_writepage_io_free); } @@ -999,14 +1030,9 @@ static void bch2_writepage_io_done(struct closure *cl) static void bch2_writepage_do_io(struct bch_writepage_state *w) { struct bch_writepage_io *io = w->io; - struct bio *bio = &io->op.op.wbio.bio; w->io = NULL; - atomic_add(bio->bi_vcnt, &io->op.op.c->writeback_pages); - - io->op.op.pos.offset = bio->bi_iter.bi_sector; - - closure_call(&io->op.op.cl, bch2_write, NULL, &io->cl); + closure_call(&io->op.cl, bch2_write, NULL, &io->cl); continue_at(&io->cl, bch2_writepage_io_done, NULL); } @@ -1015,59 +1041,46 @@ static void bch2_writepage_do_io(struct bch_writepage_state *w) * possible, else allocating a new one: */ static void bch2_writepage_io_alloc(struct bch_fs *c, + struct writeback_control *wbc, struct bch_writepage_state *w, struct bch_inode_info *inode, - struct page *page) + u64 sector, + unsigned nr_replicas) { - u64 inum = inode->v.i_ino; - unsigned nr_replicas = page_state(page)->nr_replicas; - - EBUG_ON(!nr_replicas); - /* XXX: disk_reservation->gen isn't plumbed through */ + struct bch_write_op *op; - if (!w->io) { -alloc_io: - w->io = container_of(bio_alloc_bioset(GFP_NOFS, - BIO_MAX_PAGES, - &c->writepage_bioset), - struct bch_writepage_io, op.op.wbio.bio); + w->io = container_of(bio_alloc_bioset(GFP_NOFS, + BIO_MAX_PAGES, + &c->writepage_bioset), + struct bch_writepage_io, op.wbio.bio); - closure_init(&w->io->cl, NULL); - bch2_fswrite_op_init(&w->io->op, inode, false); - bch2_write_op_init(&w->io->op.op, c, - (struct disk_reservation) { - .nr_replicas = c->opts.data_replicas, - }, - c->fastest_devs, - writepoint_hashed(inode->ei_last_dirtied), - POS(inum, 0), - &inode->ei_journal_seq, - 0); - w->io->op.op.index_update_fn = bchfs_write_index_update; - } - - if (w->io->op.op.res.nr_replicas != nr_replicas || - bio_add_page_contig(&w->io->op.op.wbio.bio, page)) { - bch2_writepage_do_io(w); - goto alloc_io; - } + closure_init(&w->io->cl, NULL); + w->io->inode = inode; - /* - * We shouldn't ever be handed pages for multiple inodes in a single - * pass - right? - */ - BUG_ON(inode != w->io->op.inode); + op = &w->io->op; + bch2_write_op_init(op, c, w->opts); + op->target = w->opts.foreground_target; + op_journal_seq_set(op, &inode->ei_journal_seq); + op->nr_replicas = nr_replicas; + op->res.nr_replicas = nr_replicas; + op->write_point = writepoint_hashed(inode->ei_last_dirtied); + op->pos = POS(inode->v.i_ino, sector); + op->wbio.bio.bi_iter.bi_sector = sector; + op->wbio.bio.bi_opf = wbc_to_write_flags(wbc); } -static int __bch2_writepage(struct bch_fs *c, struct page *page, +static int __bch2_writepage(struct page *page, struct writeback_control *wbc, - struct bch_writepage_state *w) + void *data) { struct bch_inode_info *inode = to_bch_ei(page->mapping->host); - struct bch_page_state new, old; - unsigned offset; + struct bch_fs *c = inode->v.i_sb->s_fs_info; + struct bch_writepage_state *w = data; + struct bch_page_state *s, orig; + unsigned i, offset, nr_replicas_this_write = U32_MAX; loff_t i_size = i_size_read(&inode->v); pgoff_t end_index = i_size >> PAGE_SHIFT; + int ret; EBUG_ON(!PageUptodate(page)); @@ -1091,246 +1104,158 @@ static int __bch2_writepage(struct bch_fs *c, struct page *page, */ zero_user_segment(page, offset, PAGE_SIZE); do_io: - bch2_writepage_io_alloc(c, w, inode, page); + s = bch2_page_state_create(page, __GFP_NOFAIL); + + ret = bch2_get_page_disk_reservation(c, inode, page, true); + if (ret) { + SetPageError(page); + mapping_set_error(page->mapping, ret); + unlock_page(page); + return 0; + } - /* while page is locked: */ - w->io->op.new_i_size = i_size; + /* Before unlocking the page, get copy of reservations: */ + orig = *s; - if (wbc->sync_mode == WB_SYNC_ALL) - w->io->op.op.wbio.bio.bi_opf |= REQ_SYNC; + for (i = 0; i < PAGE_SECTORS; i++) { + if (s->s[i].state < SECTOR_DIRTY) + continue; - /* Before unlocking the page, transfer reservation to w->io: */ - old = page_state_cmpxchg(page_state(page), new, { - EBUG_ON(!new.reserved && - (new.sectors != PAGE_SECTORS || - !new.allocated)); + nr_replicas_this_write = + min_t(unsigned, nr_replicas_this_write, + s->s[i].nr_replicas + + s->s[i].replicas_reserved); + } - if (new.allocated && - w->io->op.op.compression_type != BCH_COMPRESSION_NONE) - new.allocated = 0; - else if (!new.reserved) - goto out; - new.reserved = 0; - }); + for (i = 0; i < PAGE_SECTORS; i++) { + if (s->s[i].state < SECTOR_DIRTY) + continue; + + s->s[i].nr_replicas = w->opts.compression + ? 0 : nr_replicas_this_write; + + s->s[i].replicas_reserved = 0; + s->s[i].state = SECTOR_ALLOCATED; + } + + BUG_ON(atomic_read(&s->write_count)); + atomic_set(&s->write_count, 1); - w->io->op.op.res.sectors += PAGE_SECTORS * - (old.reserved - new.reserved) * - old.nr_replicas; -out: BUG_ON(PageWriteback(page)); set_page_writeback(page); + unlock_page(page); - return 0; -} + offset = 0; + while (1) { + unsigned sectors = 1, dirty_sectors = 0, reserved_sectors = 0; + u64 sector; -int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc) -{ - struct bch_fs *c = mapping->host->i_sb->s_fs_info; - struct bch_writepage_state w = { NULL }; - struct pagecache_iter iter; - struct page *page; - int ret = 0; - int done = 0; - pgoff_t uninitialized_var(writeback_index); - pgoff_t index; - pgoff_t end; /* Inclusive */ - pgoff_t done_index; - int cycled; - int range_whole = 0; - int tag; - - if (wbc->range_cyclic) { - writeback_index = mapping->writeback_index; /* prev offset */ - index = writeback_index; - if (index == 0) - cycled = 1; - else - cycled = 0; - end = -1; - } else { - index = wbc->range_start >> PAGE_SHIFT; - end = wbc->range_end >> PAGE_SHIFT; - if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) - range_whole = 1; - cycled = 1; /* ignore range_cyclic tests */ - } - if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) - tag = PAGECACHE_TAG_TOWRITE; - else - tag = PAGECACHE_TAG_DIRTY; -retry: - if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) - tag_pages_for_writeback(mapping, index, end); - - done_index = index; -get_pages: - for_each_pagecache_tag(&iter, mapping, tag, index, end, page) { - done_index = page->index; - - if (w.io && - !bio_can_add_page_contig(&w.io->op.op.wbio.bio, page)) - bch2_writepage_do_io(&w); - - if (!w.io && - atomic_read(&c->writeback_pages) >= - c->writeback_pages_max) { - /* don't sleep with pages pinned: */ - pagecache_iter_release(&iter); - - __wait_event(c->writeback_wait, - atomic_read(&c->writeback_pages) < - c->writeback_pages_max); - goto get_pages; - } + while (offset < PAGE_SECTORS && + orig.s[offset].state < SECTOR_DIRTY) + offset++; - lock_page(page); + if (offset == PAGE_SECTORS) + break; - /* - * Page truncated or invalidated. We can freely skip it - * then, even for data integrity operations: the page - * has disappeared concurrently, so there could be no - * real expectation of this data interity operation - * even if there is now a new, dirty page at the same - * pagecache address. - */ - if (unlikely(page->mapping != mapping)) { -continue_unlock: - unlock_page(page); - continue; - } + sector = ((u64) page->index << PAGE_SECTOR_SHIFT) + offset; - if (!PageDirty(page)) { - /* someone wrote it for us */ - goto continue_unlock; - } + while (offset + sectors < PAGE_SECTORS && + orig.s[offset + sectors].state >= SECTOR_DIRTY) + sectors++; - if (PageWriteback(page)) { - if (wbc->sync_mode != WB_SYNC_NONE) - wait_on_page_writeback(page); - else - goto continue_unlock; + for (i = offset; i < offset + sectors; i++) { + reserved_sectors += orig.s[i].replicas_reserved; + dirty_sectors += orig.s[i].state == SECTOR_DIRTY; } - BUG_ON(PageWriteback(page)); - if (!clear_page_dirty_for_io(page)) - goto continue_unlock; + if (w->io && + (w->io->op.res.nr_replicas != nr_replicas_this_write || + bio_full(&w->io->op.wbio.bio, PAGE_SIZE) || + w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >= + (BIO_MAX_PAGES * PAGE_SIZE) || + bio_end_sector(&w->io->op.wbio.bio) != sector)) + bch2_writepage_do_io(w); - trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); - ret = __bch2_writepage(c, page, wbc, &w); - if (unlikely(ret)) { - if (ret == AOP_WRITEPAGE_ACTIVATE) { - unlock_page(page); - ret = 0; - } else { - /* - * done_index is set past this page, - * so media errors will not choke - * background writeout for the entire - * file. This has consequences for - * range_cyclic semantics (ie. it may - * not be suitable for data integrity - * writeout). - */ - done_index = page->index + 1; - done = 1; - break; - } - } + if (!w->io) + bch2_writepage_io_alloc(c, wbc, w, inode, sector, + nr_replicas_this_write); - /* - * We stop writing back only if we are not doing - * integrity sync. In case of integrity sync we have to - * keep going until we have written all the pages - * we tagged for writeback prior to entering this loop. - */ - if (--wbc->nr_to_write <= 0 && - wbc->sync_mode == WB_SYNC_NONE) { - done = 1; - break; - } - } - pagecache_iter_release(&iter); + atomic_inc(&s->write_count); - if (w.io) - bch2_writepage_do_io(&w); + BUG_ON(inode != w->io->inode); + BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page, + sectors << 9, offset << 9)); - if (!cycled && !done) { - /* - * range_cyclic: - * We hit the last page and there is more work to be done: wrap - * back to the start of the file - */ - cycled = 1; - index = 0; - end = writeback_index - 1; - goto retry; + /* Check for writing past i_size: */ + WARN_ON((bio_end_sector(&w->io->op.wbio.bio) << 9) > + round_up(i_size, block_bytes(c))); + + w->io->op.res.sectors += reserved_sectors; + w->io->op.i_sectors_delta -= dirty_sectors; + w->io->op.new_i_size = i_size; + + offset += sectors; } - if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) - mapping->writeback_index = done_index; - return ret; + if (atomic_dec_and_test(&s->write_count)) + end_page_writeback(page); + + return 0; } -int bch2_writepage(struct page *page, struct writeback_control *wbc) +int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc) { - struct bch_fs *c = page->mapping->host->i_sb->s_fs_info; - struct bch_writepage_state w = { NULL }; + struct bch_fs *c = mapping->host->i_sb->s_fs_info; + struct bch_writepage_state w = + bch_writepage_state_init(c, to_bch_ei(mapping->host)); + struct blk_plug plug; int ret; - ret = __bch2_writepage(c, page, wbc, &w); + blk_start_plug(&plug); + ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w); if (w.io) bch2_writepage_do_io(&w); - + blk_finish_plug(&plug); return ret; } -static void bch2_read_single_page_end_io(struct bio *bio) -{ - complete(bio->bi_private); -} - -static int bch2_read_single_page(struct page *page, - struct address_space *mapping) +int bch2_writepage(struct page *page, struct writeback_control *wbc) { - struct bch_inode_info *inode = to_bch_ei(mapping->host); - struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct bch_read_bio *rbio; + struct bch_fs *c = page->mapping->host->i_sb->s_fs_info; + struct bch_writepage_state w = + bch_writepage_state_init(c, to_bch_ei(page->mapping->host)); int ret; - DECLARE_COMPLETION_ONSTACK(done); - - rbio = to_rbio(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read)); - rbio->bio.bi_private = &done; - rbio->bio.bi_end_io = bch2_read_single_page_end_io; - - __bchfs_readpage(c, rbio, inode->v.i_ino, page); - wait_for_completion(&done); - - ret = rbio->bio.bi_error; - bio_put(&rbio->bio); - if (ret < 0) - return ret; + ret = __bch2_writepage(page, wbc, &w); + if (w.io) + bch2_writepage_do_io(&w); - SetPageUptodate(page); - return 0; + return ret; } +/* buffered writes: */ + int bch2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct bch_inode_info *inode = to_bch_ei(mapping->host); struct bch_fs *c = inode->v.i_sb->s_fs_info; + struct bch2_page_reservation *res; pgoff_t index = pos >> PAGE_SHIFT; unsigned offset = pos & (PAGE_SIZE - 1); struct page *page; int ret = -ENOMEM; - BUG_ON(inode_unhashed(&inode->v)); + res = kmalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; + + bch2_page_reservation_init(c, inode, res); + *fsdata = res; - /* Not strictly necessary - same reason as mkwrite(): */ - pagecache_add_get(&mapping->add_lock); + bch2_pagecache_add_get(&inode->ei_pagecache_lock); page = grab_cache_page_write_begin(mapping, index, flags); if (!page) @@ -1359,7 +1284,8 @@ readpage: if (ret) goto err; out: - ret = bch2_get_page_reservation(c, page, true); + ret = bch2_page_reservation_get(c, inode, page, res, + offset, len, true); if (ret) { if (!PageUptodate(page)) { /* @@ -1381,16 +1307,20 @@ err: put_page(page); *pagep = NULL; err_unlock: - pagecache_add_put(&mapping->add_lock); + bch2_pagecache_add_put(&inode->ei_pagecache_lock); + kfree(res); + *fsdata = NULL; return ret; } -int bch2_write_end(struct file *filp, struct address_space *mapping, +int bch2_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { - struct bch_inode_info *inode = to_bch_ei(page->mapping->host); + struct bch_inode_info *inode = to_bch_ei(mapping->host); struct bch_fs *c = inode->v.i_sb->s_fs_info; + struct bch2_page_reservation *res = fsdata; + unsigned offset = pos & (PAGE_SIZE - 1); lockdep_assert_held(&inode->v.i_rwsem); @@ -1405,60 +1335,280 @@ int bch2_write_end(struct file *filp, struct address_space *mapping, copied = 0; } + spin_lock(&inode->v.i_lock); if (pos + copied > inode->v.i_size) i_size_write(&inode->v, pos + copied); + spin_unlock(&inode->v.i_lock); if (copied) { if (!PageUptodate(page)) SetPageUptodate(page); - if (!PageDirty(page)) - set_page_dirty(page); + + bch2_set_page_dirty(c, inode, page, res, offset, copied); inode->ei_last_dirtied = (unsigned long) current; - } else { - bch2_put_page_reservation(c, page); } unlock_page(page); put_page(page); - pagecache_add_put(&mapping->add_lock); + bch2_pagecache_add_put(&inode->ei_pagecache_lock); + + bch2_page_reservation_put(c, inode, res); + kfree(res); return copied; } -/* O_DIRECT */ +#define WRITE_BATCH_PAGES 32 -static void bch2_dio_read_complete(struct closure *cl) +static int __bch2_buffered_write(struct bch_inode_info *inode, + struct address_space *mapping, + struct iov_iter *iter, + loff_t pos, unsigned len) { - struct dio_read *dio = container_of(cl, struct dio_read, cl); - - dio->req->ki_complete(dio->req, dio->ret, 0); - bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */ -} + struct bch_fs *c = inode->v.i_sb->s_fs_info; + struct page *pages[WRITE_BATCH_PAGES]; + struct bch2_page_reservation res; + unsigned long index = pos >> PAGE_SHIFT; + unsigned offset = pos & (PAGE_SIZE - 1); + unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE); + unsigned i, reserved = 0, set_dirty = 0; + unsigned copied = 0, nr_pages_copied = 0; + int ret = 0; -static void bch2_direct_IO_read_endio(struct bio *bio) -{ - struct dio_read *dio = bio->bi_private; + BUG_ON(!len); + BUG_ON(nr_pages > ARRAY_SIZE(pages)); - if (bio->bi_error) - dio->ret = bio->bi_error; + bch2_page_reservation_init(c, inode, &res); - closure_put(&dio->cl); -} + for (i = 0; i < nr_pages; i++) { + pages[i] = grab_cache_page_write_begin(mapping, index + i, 0); + if (!pages[i]) { + nr_pages = i; + if (!i) { + ret = -ENOMEM; + goto out; + } + len = min_t(unsigned, len, + nr_pages * PAGE_SIZE - offset); + break; + } + } -static void bch2_direct_IO_read_split_endio(struct bio *bio) -{ - bch2_direct_IO_read_endio(bio); - bio_check_pages_dirty(bio); /* transfers ownership */ -} + if (offset && !PageUptodate(pages[0])) { + ret = bch2_read_single_page(pages[0], mapping); + if (ret) + goto out; + } -static int bch2_direct_IO_read(struct bch_fs *c, struct kiocb *req, - struct file *file, struct bch_inode_info *inode, - struct iov_iter *iter, loff_t offset) -{ - struct dio_read *dio; + if ((pos + len) & (PAGE_SIZE - 1) && + !PageUptodate(pages[nr_pages - 1])) { + if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) { + zero_user(pages[nr_pages - 1], 0, PAGE_SIZE); + } else { + ret = bch2_read_single_page(pages[nr_pages - 1], mapping); + if (ret) + goto out; + } + } + + while (reserved < len) { + struct page *page = pages[(offset + reserved) >> PAGE_SHIFT]; + unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1); + unsigned pg_len = min_t(unsigned, len - reserved, + PAGE_SIZE - pg_offset); +retry_reservation: + ret = bch2_page_reservation_get(c, inode, page, &res, + pg_offset, pg_len, true); + + if (ret && !PageUptodate(page)) { + ret = bch2_read_single_page(page, mapping); + if (!ret) + goto retry_reservation; + } + + if (ret) + goto out; + + reserved += pg_len; + } + + if (mapping_writably_mapped(mapping)) + for (i = 0; i < nr_pages; i++) + flush_dcache_page(pages[i]); + + while (copied < len) { + struct page *page = pages[(offset + copied) >> PAGE_SHIFT]; + unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1); + unsigned pg_len = min_t(unsigned, len - copied, + PAGE_SIZE - pg_offset); + unsigned pg_copied = iov_iter_copy_from_user_atomic(page, + iter, pg_offset, pg_len); + + if (!pg_copied) + break; + + if (!PageUptodate(page) && + pg_copied != PAGE_SIZE && + pos + copied + pg_copied < inode->v.i_size) { + zero_user(page, 0, PAGE_SIZE); + break; + } + + flush_dcache_page(page); + iov_iter_advance(iter, pg_copied); + copied += pg_copied; + + if (pg_copied != pg_len) + break; + } + + if (!copied) + goto out; + + spin_lock(&inode->v.i_lock); + if (pos + copied > inode->v.i_size) + i_size_write(&inode->v, pos + copied); + spin_unlock(&inode->v.i_lock); + + while (set_dirty < copied) { + struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT]; + unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1); + unsigned pg_len = min_t(unsigned, copied - set_dirty, + PAGE_SIZE - pg_offset); + + if (!PageUptodate(page)) + SetPageUptodate(page); + + bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len); + unlock_page(page); + put_page(page); + + set_dirty += pg_len; + } + + nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE); + inode->ei_last_dirtied = (unsigned long) current; +out: + for (i = nr_pages_copied; i < nr_pages; i++) { + unlock_page(pages[i]); + put_page(pages[i]); + } + + bch2_page_reservation_put(c, inode, &res); + + return copied ?: ret; +} + +static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter) +{ + struct file *file = iocb->ki_filp; + struct address_space *mapping = file->f_mapping; + struct bch_inode_info *inode = file_bch_inode(file); + loff_t pos = iocb->ki_pos; + ssize_t written = 0; + int ret = 0; + + bch2_pagecache_add_get(&inode->ei_pagecache_lock); + + do { + unsigned offset = pos & (PAGE_SIZE - 1); + unsigned bytes = min_t(unsigned long, iov_iter_count(iter), + PAGE_SIZE * WRITE_BATCH_PAGES - offset); +again: + /* + * Bring in the user page that we will copy from _first_. + * Otherwise there's a nasty deadlock on copying from the + * same page as we're writing to, without it being marked + * up-to-date. + * + * Not only is this an optimisation, but it is also required + * to check that the address is actually valid, when atomic + * usercopies are used, below. + */ + if (unlikely(iov_iter_fault_in_readable(iter, bytes))) { + bytes = min_t(unsigned long, iov_iter_count(iter), + PAGE_SIZE - offset); + + if (unlikely(iov_iter_fault_in_readable(iter, bytes))) { + ret = -EFAULT; + break; + } + } + + if (unlikely(fatal_signal_pending(current))) { + ret = -EINTR; + break; + } + + ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes); + if (unlikely(ret < 0)) + break; + + cond_resched(); + + if (unlikely(ret == 0)) { + /* + * If we were unable to copy any data at all, we must + * fall back to a single segment length write. + * + * If we didn't fallback here, we could livelock + * because not all segments in the iov can be copied at + * once without a pagefault. + */ + bytes = min_t(unsigned long, PAGE_SIZE - offset, + iov_iter_single_seg_count(iter)); + goto again; + } + pos += ret; + written += ret; + ret = 0; + + balance_dirty_pages_ratelimited(mapping); + } while (iov_iter_count(iter)); + + bch2_pagecache_add_put(&inode->ei_pagecache_lock); + + return written ? written : ret; +} + +/* O_DIRECT reads */ + +static void bch2_dio_read_complete(struct closure *cl) +{ + struct dio_read *dio = container_of(cl, struct dio_read, cl); + + dio->req->ki_complete(dio->req, dio->ret, 0); + bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */ +} + +static void bch2_direct_IO_read_endio(struct bio *bio) +{ + struct dio_read *dio = bio->bi_private; + + if (bio->bi_status) + dio->ret = blk_status_to_errno(bio->bi_status); + + closure_put(&dio->cl); +} + +static void bch2_direct_IO_read_split_endio(struct bio *bio) +{ + bch2_direct_IO_read_endio(bio); + bio_check_pages_dirty(bio); /* transfers ownership */ +} + +static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter) +{ + struct file *file = req->ki_filp; + struct bch_inode_info *inode = file_bch_inode(file); + struct bch_fs *c = inode->v.i_sb->s_fs_info; + struct bch_io_opts opts = io_opts(c, &inode->ei_inode); + struct dio_read *dio; struct bio *bio; + loff_t offset = req->ki_pos; bool sync = is_sync_kiocb(req); + size_t shorten; ssize_t ret; if ((offset|iter->count) & (block_bytes(c) - 1)) @@ -1466,11 +1616,13 @@ static int bch2_direct_IO_read(struct bch_fs *c, struct kiocb *req, ret = min_t(loff_t, iter->count, max_t(loff_t, 0, i_size_read(&inode->v) - offset)); - iov_iter_truncate(iter, round_up(ret, block_bytes(c))); if (!ret) return ret; + shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c)); + iter->count -= shorten; + bio = bio_alloc_bioset(GFP_KERNEL, iov_iter_npages(iter, BIO_MAX_PAGES), &c->dio_read_bioset); @@ -1512,7 +1664,7 @@ start: ret = bio_iov_iter_get_pages(bio, iter); if (ret < 0) { /* XXX: fault inject this path */ - bio->bi_error = ret; + bio->bi_status = BLK_STS_RESOURCE; bio_endio(bio); break; } @@ -1523,9 +1675,11 @@ start: if (iter->count) closure_get(&dio->cl); - bch2_read(c, to_rbio(bio), inode->v.i_ino); + bch2_read(c, rbio_init(bio, opts), inode->v.i_ino); } + iter->count += shorten; + if (sync) { closure_sync(&dio->cl); closure_debug_destroy(&dio->cl); @@ -1537,441 +1691,371 @@ start: } } -static long __bch2_dio_write_complete(struct dio_write *dio) +ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter) { - struct file *file = dio->req->ki_filp; - struct address_space *mapping = file->f_mapping; + struct file *file = iocb->ki_filp; struct bch_inode_info *inode = file_bch_inode(file); - long ret = dio->error ?: dio->written; + struct address_space *mapping = file->f_mapping; + size_t count = iov_iter_count(iter); + ssize_t ret; - bch2_disk_reservation_put(dio->c, &dio->res); + if (!count) + return 0; /* skip atime */ - __pagecache_block_put(&mapping->add_lock); - inode_dio_end(&inode->v); + if (iocb->ki_flags & IOCB_DIRECT) { + struct blk_plug plug; + + ret = filemap_write_and_wait_range(mapping, + iocb->ki_pos, + iocb->ki_pos + count - 1); + if (ret < 0) + return ret; - if (dio->iovec && dio->iovec != dio->inline_vecs) - kfree(dio->iovec); + file_accessed(file); + + blk_start_plug(&plug); + ret = bch2_direct_IO_read(iocb, iter); + blk_finish_plug(&plug); + + if (ret >= 0) + iocb->ki_pos += ret; + } else { + bch2_pagecache_add_get(&inode->ei_pagecache_lock); + ret = generic_file_read_iter(iocb, iter); + bch2_pagecache_add_put(&inode->ei_pagecache_lock); + } - bio_put(&dio->iop.op.wbio.bio); return ret; } -static void bch2_dio_write_complete(struct closure *cl) -{ - struct dio_write *dio = container_of(cl, struct dio_write, cl); - struct kiocb *req = dio->req; +/* O_DIRECT writes */ - req->ki_complete(req, __bch2_dio_write_complete(dio), 0); -} +static void bch2_dio_write_loop_async(struct bch_write_op *); -static void bch2_dio_write_done(struct dio_write *dio) +static long bch2_dio_write_loop(struct dio_write *dio) { + bool kthread = (current->flags & PF_KTHREAD) != 0; + struct kiocb *req = dio->req; + struct address_space *mapping = req->ki_filp->f_mapping; + struct bch_inode_info *inode = file_bch_inode(req->ki_filp); + struct bch_fs *c = inode->v.i_sb->s_fs_info; + struct bio *bio = &dio->op.wbio.bio; + struct bvec_iter_all iter; struct bio_vec *bv; - int i; + unsigned unaligned; + bool sync = dio->sync; + long ret; - dio->written += dio->iop.op.written << 9; + if (dio->loop) + goto loop; - if (dio->iop.op.error) - dio->error = dio->iop.op.error; + while (1) { + if (kthread) + kthread_use_mm(dio->mm); + BUG_ON(current->faults_disabled_mapping); + current->faults_disabled_mapping = mapping; - bio_for_each_segment_all(bv, &dio->iop.op.wbio.bio, i) - put_page(bv->bv_page); + ret = bio_iov_iter_get_pages(bio, &dio->iter); - if (dio->iter.count) - bio_reset(&dio->iop.op.wbio.bio); -} + current->faults_disabled_mapping = NULL; + if (kthread) + kthread_unuse_mm(dio->mm); -static void bch2_do_direct_IO_write(struct dio_write *dio) -{ - struct file *file = dio->req->ki_filp; - struct bch_inode_info *inode = file_bch_inode(file); - struct bio *bio = &dio->iop.op.wbio.bio; - unsigned flags = 0; - int ret; - - if ((dio->req->ki_flags & IOCB_DSYNC) && - !dio->c->opts.journal_flush_disabled) - flags |= BCH_WRITE_FLUSH; + if (unlikely(ret < 0)) + goto err; - ret = bio_iov_iter_get_pages(bio, &dio->iter); - if (ret < 0) { - /* - * these didn't get initialized, but bch2_dio_write_done() will - * look at them: - */ - dio->iop.op.error = 0; - dio->iop.op.written = 0; - dio->error = ret; - return; - } + unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1); + bio->bi_iter.bi_size -= unaligned; + iov_iter_revert(&dio->iter, unaligned); - dio->iop.sectors_added = 0; - bch2_write_op_init(&dio->iop.op, dio->c, dio->res, - dio->c->fastest_devs, - writepoint_hashed((unsigned long) dio->task), - POS(inode->v.i_ino, (dio->offset + dio->written) >> 9), - &inode->ei_journal_seq, - flags); - dio->iop.op.index_update_fn = bchfs_write_index_update; + if (!bio->bi_iter.bi_size) { + /* + * bio_iov_iter_get_pages was only able to get < + * blocksize worth of pages: + */ + bio_for_each_segment_all(bv, bio, iter) + put_page(bv->bv_page); + ret = -EFAULT; + goto err; + } - if (!dio->iop.unalloc) { - dio->res.sectors -= bio_sectors(bio); - dio->iop.op.res.sectors = bio_sectors(bio); - } + bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode)); + dio->op.end_io = bch2_dio_write_loop_async; + dio->op.target = dio->op.opts.foreground_target; + op_journal_seq_set(&dio->op, &inode->ei_journal_seq); + dio->op.write_point = writepoint_hashed((unsigned long) current); + dio->op.nr_replicas = dio->op.opts.data_replicas; + dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9); + + if ((req->ki_flags & IOCB_DSYNC) && + !c->opts.journal_flush_disabled) + dio->op.flags |= BCH_WRITE_FLUSH; + + ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio), + dio->op.opts.data_replicas, 0); + if (unlikely(ret) && + !bch2_check_range_allocated(c, dio->op.pos, + bio_sectors(bio), dio->op.opts.data_replicas)) + goto err; - task_io_account_write(bio->bi_iter.bi_size); + task_io_account_write(bio->bi_iter.bi_size); - closure_call(&dio->iop.op.cl, bch2_write, NULL, &dio->cl); -} + if (!dio->sync && !dio->loop && dio->iter.count) { + struct iovec *iov = dio->inline_vecs; -static void bch2_dio_write_loop_async(struct closure *cl) -{ - struct dio_write *dio = - container_of(cl, struct dio_write, cl); - struct address_space *mapping = dio->req->ki_filp->f_mapping; + if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) { + iov = kmalloc(dio->iter.nr_segs * sizeof(*iov), + GFP_KERNEL); + if (unlikely(!iov)) { + dio->sync = sync = true; + goto do_io; + } + + dio->free_iov = true; + } - bch2_dio_write_done(dio); + memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov)); + dio->iter.iov = iov; + } +do_io: + dio->loop = true; + closure_call(&dio->op.cl, bch2_write, NULL, NULL); - if (dio->iter.count && !dio->error) { - use_mm(dio->task->mm); - pagecache_block_get(&mapping->add_lock); + if (sync) + wait_for_completion(&dio->done); + else + return -EIOCBQUEUED; +loop: + i_sectors_acct(c, inode, &dio->quota_res, + dio->op.i_sectors_delta); + req->ki_pos += (u64) dio->op.written << 9; + dio->written += dio->op.written; - bch2_do_direct_IO_write(dio); + spin_lock(&inode->v.i_lock); + if (req->ki_pos > inode->v.i_size) + i_size_write(&inode->v, req->ki_pos); + spin_unlock(&inode->v.i_lock); - pagecache_block_put(&mapping->add_lock); - unuse_mm(dio->task->mm); + bio_for_each_segment_all(bv, bio, iter) + put_page(bv->bv_page); + if (!dio->iter.count || dio->op.error) + break; - continue_at(&dio->cl, bch2_dio_write_loop_async, NULL); - } else { -#if 0 - closure_return_with_destructor(cl, bch2_dio_write_complete); -#else - closure_debug_destroy(cl); - bch2_dio_write_complete(cl); -#endif + bio_reset(bio); + reinit_completion(&dio->done); } -} -static int bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, - u64 size) -{ - struct btree_iter iter; - struct bpos end = pos; - struct bkey_s_c k; - int ret = 0; + ret = dio->op.error ?: ((long) dio->written << 9); +err: + bch2_pagecache_block_put(&inode->ei_pagecache_lock); + bch2_quota_reservation_put(c, inode, &dio->quota_res); - end.offset += size; + if (dio->free_iov) + kfree(dio->iter.iov); - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, pos, - BTREE_ITER_WITH_HOLES, k) { - if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) - break; + bio_put(bio); - if (!bch2_extent_is_fully_allocated(k)) { - ret = -ENOSPC; - break; - } - } - bch2_btree_iter_unlock(&iter); + /* inode->i_dio_count is our ref on inode and thus bch_fs */ + inode_dio_end(&inode->v); + if (!sync) { + req->ki_complete(req, ret, 0); + ret = -EIOCBQUEUED; + } return ret; } -static int bch2_direct_IO_write(struct bch_fs *c, - struct kiocb *req, struct file *file, - struct bch_inode_info *inode, - struct iov_iter *iter, loff_t offset) +static void bch2_dio_write_loop_async(struct bch_write_op *op) { + struct dio_write *dio = container_of(op, struct dio_write, op); + + if (dio->sync) + complete(&dio->done); + else + bch2_dio_write_loop(dio); +} + +static noinline +ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter) +{ + struct file *file = req->ki_filp; struct address_space *mapping = file->f_mapping; + struct bch_inode_info *inode = file_bch_inode(file); + struct bch_fs *c = inode->v.i_sb->s_fs_info; struct dio_write *dio; struct bio *bio; + bool locked = true, extending; ssize_t ret; - bool sync = is_sync_kiocb(req); - lockdep_assert_held(&inode->v.i_rwsem); - - if (unlikely(!iter->count)) - return 0; + prefetch(&c->opts); + prefetch((void *) &c->opts + 64); + prefetch(&inode->ei_inode); + prefetch((void *) &inode->ei_inode + 64); - if (unlikely((offset|iter->count) & (block_bytes(c) - 1))) - return -EINVAL; + inode_lock(&inode->v); - bio = bio_alloc_bioset(GFP_KERNEL, - iov_iter_npages(iter, BIO_MAX_PAGES), - &c->dio_write_bioset); - dio = container_of(bio, struct dio_write, iop.op.wbio.bio); - closure_init(&dio->cl, NULL); - dio->req = req; - dio->c = c; - dio->written = 0; - dio->error = 0; - dio->offset = offset; - dio->iovec = NULL; - dio->iter = *iter; - dio->task = current; - bch2_fswrite_op_init(&dio->iop, inode, true); + ret = generic_write_checks(req, iter); + if (unlikely(ret <= 0)) + goto err; - if (offset + iter->count > inode->v.i_size) - sync = true; + ret = file_remove_privs(file); + if (unlikely(ret)) + goto err; - /* - * XXX: we shouldn't return -ENOSPC if we're overwriting existing data - - * if getting a reservation fails we should check if we are doing an - * overwrite. - * - * Have to then guard against racing with truncate (deleting data that - * we would have been overwriting) - */ - ret = bch2_disk_reservation_get(c, &dio->res, iter->count >> 9, 0); - if (unlikely(ret)) { - if (bch2_check_range_allocated(c, POS(inode->v.i_ino, - offset >> 9), - iter->count >> 9)) { - closure_debug_destroy(&dio->cl); - bio_put(bio); - return ret; - } + ret = file_update_time(file); + if (unlikely(ret)) + goto err; - dio->iop.unalloc = true; - } + if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1))) + goto err; inode_dio_begin(&inode->v); - __pagecache_block_get(&mapping->add_lock); + bch2_pagecache_block_get(&inode->ei_pagecache_lock); - if (sync) { - do { - bch2_do_direct_IO_write(dio); - - closure_sync(&dio->cl); - bch2_dio_write_done(dio); - } while (dio->iter.count && !dio->error); - - closure_debug_destroy(&dio->cl); - return __bch2_dio_write_complete(dio); - } else { - bch2_do_direct_IO_write(dio); - - if (dio->iter.count && !dio->error) { - if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) { - dio->iovec = kmalloc(dio->iter.nr_segs * - sizeof(struct iovec), - GFP_KERNEL); - if (!dio->iovec) - dio->error = -ENOMEM; - } else { - dio->iovec = dio->inline_vecs; - } - - memcpy(dio->iovec, - dio->iter.iov, - dio->iter.nr_segs * sizeof(struct iovec)); - dio->iter.iov = dio->iovec; - } - - continue_at_noreturn(&dio->cl, bch2_dio_write_loop_async, NULL); - return -EIOCBQUEUED; + extending = req->ki_pos + iter->count > inode->v.i_size; + if (!extending) { + inode_unlock(&inode->v); + locked = false; } -} -ssize_t bch2_direct_IO(struct kiocb *req, struct iov_iter *iter) -{ - struct file *file = req->ki_filp; - struct bch_inode_info *inode = file_bch_inode(file); - struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct blk_plug plug; - ssize_t ret; - - blk_start_plug(&plug); - ret = ((iov_iter_rw(iter) == WRITE) - ? bch2_direct_IO_write - : bch2_direct_IO_read)(c, req, file, inode, iter, req->ki_pos); - blk_finish_plug(&plug); - - return ret; -} - -static ssize_t -bch2_direct_write(struct kiocb *iocb, struct iov_iter *iter) -{ - struct file *file = iocb->ki_filp; - struct bch_inode_info *inode = file_bch_inode(file); - struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct address_space *mapping = file->f_mapping; - loff_t pos = iocb->ki_pos; - ssize_t ret; + bio = bio_alloc_bioset(GFP_KERNEL, + iov_iter_npages(iter, BIO_MAX_PAGES), + &c->dio_write_bioset); + dio = container_of(bio, struct dio_write, op.wbio.bio); + init_completion(&dio->done); + dio->req = req; + dio->mm = current->mm; + dio->loop = false; + dio->sync = is_sync_kiocb(req) || extending; + dio->free_iov = false; + dio->quota_res.sectors = 0; + dio->written = 0; + dio->iter = *iter; - pagecache_block_get(&mapping->add_lock); + ret = bch2_quota_reservation_add(c, inode, &dio->quota_res, + iter->count >> 9, true); + if (unlikely(ret)) + goto err_put_bio; - /* Write and invalidate pagecache range that we're writing to: */ - ret = write_invalidate_inode_pages_range(file->f_mapping, pos, - pos + iov_iter_count(iter) - 1); + ret = write_invalidate_inode_pages_range(mapping, + req->ki_pos, + req->ki_pos + iter->count - 1); if (unlikely(ret)) - goto err; + goto err_put_bio; - ret = bch2_direct_IO_write(c, iocb, file, inode, iter, pos); + ret = bch2_dio_write_loop(dio); err: - pagecache_block_put(&mapping->add_lock); - + if (locked) + inode_unlock(&inode->v); return ret; +err_put_bio: + bch2_pagecache_block_put(&inode->ei_pagecache_lock); + bch2_quota_reservation_put(c, inode, &dio->quota_res); + bio_put(bio); + inode_dio_end(&inode->v); + goto err; } -static ssize_t __bch2_write_iter(struct kiocb *iocb, struct iov_iter *from) +ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct bch_inode_info *inode = file_bch_inode(file); - ssize_t ret; + ssize_t ret; + + if (iocb->ki_flags & IOCB_DIRECT) + return bch2_direct_write(iocb, from); /* We can write back this queue in page reclaim */ current->backing_dev_info = inode_to_bdi(&inode->v); + inode_lock(&inode->v); + + ret = generic_write_checks(iocb, from); + if (ret <= 0) + goto unlock; + ret = file_remove_privs(file); if (ret) - goto out; + goto unlock; ret = file_update_time(file); if (ret) - goto out; - - ret = iocb->ki_flags & IOCB_DIRECT - ? bch2_direct_write(iocb, from) - : generic_perform_write(file, from, iocb->ki_pos); + goto unlock; + ret = bch2_buffered_write(iocb, from); if (likely(ret > 0)) iocb->ki_pos += ret; -out: +unlock: + inode_unlock(&inode->v); current->backing_dev_info = NULL; - return ret; -} - -ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from) -{ - struct bch_inode_info *inode = file_bch_inode(iocb->ki_filp); - bool direct = iocb->ki_flags & IOCB_DIRECT; - ssize_t ret; - inode_lock(&inode->v); - ret = generic_write_checks(iocb, from); if (ret > 0) - ret = __bch2_write_iter(iocb, from); - inode_unlock(&inode->v); - - if (ret > 0 && !direct) ret = generic_write_sync(iocb, ret); return ret; } -int bch2_page_mkwrite(struct vm_fault *vmf) +/* fsync: */ + +int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync) { - struct page *page = vmf->page; - struct file *file = vmf->vma->vm_file; struct bch_inode_info *inode = file_bch_inode(file); - struct address_space *mapping = inode->v.i_mapping; struct bch_fs *c = inode->v.i_sb->s_fs_info; - int ret = VM_FAULT_LOCKED; - - sb_start_pagefault(inode->v.i_sb); - file_update_time(file); + int ret, ret2; - /* - * Not strictly necessary, but helps avoid dio writes livelocking in - * write_invalidate_inode_pages_range() - can drop this if/when we get - * a write_invalidate_inode_pages_range() that works without dropping - * page lock before invalidating page - */ - if (current->pagecache_lock != &mapping->add_lock) - pagecache_add_get(&mapping->add_lock); + ret = file_write_and_wait_range(file, start, end); + if (ret) + return ret; - lock_page(page); - if (page->mapping != mapping || - page_offset(page) > i_size_read(&inode->v)) { - unlock_page(page); - ret = VM_FAULT_NOPAGE; + if (datasync && !(inode->v.i_state & I_DIRTY_DATASYNC)) goto out; - } - if (bch2_get_page_reservation(c, page, true)) { - unlock_page(page); - ret = VM_FAULT_SIGBUS; - goto out; - } - - if (!PageDirty(page)) - set_page_dirty(page); - wait_for_stable_page(page); + ret = sync_inode_metadata(&inode->v, 1); + if (ret) + return ret; out: - if (current->pagecache_lock != &mapping->add_lock) - pagecache_add_put(&mapping->add_lock); - sb_end_pagefault(inode->v.i_sb); - return ret; -} + if (!c->opts.journal_flush_disabled) + ret = bch2_journal_flush_seq(&c->journal, + inode->ei_journal_seq); + ret2 = file_check_and_advance_wb_err(file); -void bch2_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) -{ - EBUG_ON(!PageLocked(page)); - EBUG_ON(PageWriteback(page)); - - if (offset || length < PAGE_SIZE) - return; - - bch2_clear_page_bits(page); + return ret ?: ret2; } -int bch2_releasepage(struct page *page, gfp_t gfp_mask) -{ - EBUG_ON(!PageLocked(page)); - EBUG_ON(PageWriteback(page)); - - if (PageDirty(page)) - return 0; +/* truncate: */ - bch2_clear_page_bits(page); - return 1; -} - -#ifdef CONFIG_MIGRATION -int bch2_migrate_page(struct address_space *mapping, struct page *newpage, - struct page *page, enum migrate_mode mode) +static inline int range_has_data(struct bch_fs *c, + struct bpos start, + struct bpos end) { - int ret; - - ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); - if (ret != MIGRATEPAGE_SUCCESS) - return ret; - - if (PagePrivate(page)) { - *page_state(newpage) = *page_state(page); - ClearPagePrivate(page); - } - - migrate_page_copy(newpage, page); - return MIGRATEPAGE_SUCCESS; -} -#endif + struct btree_trans trans; + struct btree_iter *iter; + struct bkey_s_c k; + int ret = 0; -int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync) -{ - struct bch_inode_info *inode = file_bch_inode(file); - struct bch_fs *c = inode->v.i_sb->s_fs_info; - int ret; + bch2_trans_init(&trans, c, 0, 0); - ret = filemap_write_and_wait_range(inode->v.i_mapping, start, end); - if (ret) - return ret; + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) { + if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) + break; - if (c->opts.journal_flush_disabled) - return 0; + if (bkey_extent_is_data(k.k)) { + ret = 1; + break; + } + } - return bch2_journal_flush_seq(&c->journal, inode->ei_journal_seq); + return bch2_trans_exit(&trans) ?: ret; } -static int __bch2_truncate_page(struct address_space *mapping, +static int __bch2_truncate_page(struct bch_inode_info *inode, pgoff_t index, loff_t start, loff_t end) { - struct bch_inode_info *inode = to_bch_ei(mapping->host); struct bch_fs *c = inode->v.i_sb->s_fs_info; + struct address_space *mapping = inode->v.i_mapping; + struct bch_page_state *s; unsigned start_offset = start & (PAGE_SIZE - 1); unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1; + unsigned i; struct page *page; int ret = 0; @@ -1986,30 +2070,16 @@ static int __bch2_truncate_page(struct address_space *mapping, page = find_lock_page(mapping, index); if (!page) { - struct btree_iter iter; - struct bkey_s_c k = bkey_s_c_null; - /* * XXX: we're doing two index lookups when we end up reading the * page */ - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, - POS(inode->v.i_ino, - index << PAGE_SECTOR_SHIFT), 0, k) { - if (bkey_cmp(bkey_start_pos(k.k), - POS(inode->v.i_ino, - (index + 1) << PAGE_SECTOR_SHIFT)) >= 0) - break; + ret = range_has_data(c, + POS(inode->v.i_ino, index << PAGE_SECTOR_SHIFT), + POS(inode->v.i_ino, (index + 1) << PAGE_SECTOR_SHIFT)); + if (ret <= 0) + return ret; - if (k.k->type != KEY_TYPE_DISCARD && - k.k->type != BCH_RESERVATION) { - bch2_btree_iter_unlock(&iter); - goto create; - } - } - bch2_btree_iter_unlock(&iter); - return 0; -create: page = find_or_create_page(mapping, index, GFP_KERNEL); if (unlikely(!page)) { ret = -ENOMEM; @@ -2017,202 +2087,270 @@ create: } } + s = bch2_page_state_create(page, 0); + if (!s) { + ret = -ENOMEM; + goto unlock; + } + if (!PageUptodate(page)) { ret = bch2_read_single_page(page, mapping); if (ret) goto unlock; } + if (index != start >> PAGE_SHIFT) + start_offset = 0; + if (index != end >> PAGE_SHIFT) + end_offset = PAGE_SIZE; + + for (i = round_up(start_offset, block_bytes(c)) >> 9; + i < round_down(end_offset, block_bytes(c)) >> 9; + i++) { + s->s[i].nr_replicas = 0; + s->s[i].state = SECTOR_UNALLOCATED; + } + + zero_user_segment(page, start_offset, end_offset); + /* * Bit of a hack - we don't want truncate to fail due to -ENOSPC. * - * XXX: because we aren't currently tracking whether the page has actual - * data in it (vs. just 0s, or only partially written) this wrong. ick. + * XXX: because we aren't currently tracking whether the page has actual + * data in it (vs. just 0s, or only partially written) this wrong. ick. + */ + ret = bch2_get_page_disk_reservation(c, inode, page, false); + BUG_ON(ret); + + /* + * This removes any writeable userspace mappings; we need to force + * .page_mkwrite to be called again before any mmapped writes, to + * redirty the full page: + */ + page_mkclean(page); + __set_page_dirty_nobuffers(page); +unlock: + unlock_page(page); + put_page(page); +out: + return ret; +} + +static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from) +{ + return __bch2_truncate_page(inode, from >> PAGE_SHIFT, + from, round_up(from, PAGE_SIZE)); +} + +static int bch2_extend(struct bch_inode_info *inode, + struct bch_inode_unpacked *inode_u, + struct iattr *iattr) +{ + struct bch_fs *c = inode->v.i_sb->s_fs_info; + struct address_space *mapping = inode->v.i_mapping; + int ret; + + /* + * sync appends: + * + * this has to be done _before_ extending i_size: */ - ret = bch2_get_page_reservation(c, page, false); - BUG_ON(ret); + ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX); + if (ret) + return ret; - if (index == start >> PAGE_SHIFT && - index == end >> PAGE_SHIFT) - zero_user_segment(page, start_offset, end_offset); - else if (index == start >> PAGE_SHIFT) - zero_user_segment(page, start_offset, PAGE_SIZE); - else if (index == end >> PAGE_SHIFT) - zero_user_segment(page, 0, end_offset); + truncate_setsize(&inode->v, iattr->ia_size); + setattr_copy(&inode->v, iattr); + + mutex_lock(&inode->ei_update_lock); + ret = bch2_write_inode_size(c, inode, inode->v.i_size, + ATTR_MTIME|ATTR_CTIME); + mutex_unlock(&inode->ei_update_lock); - if (!PageDirty(page)) - set_page_dirty(page); -unlock: - unlock_page(page); - put_page(page); -out: return ret; } -static int bch2_truncate_page(struct address_space *mapping, loff_t from) +static int bch2_truncate_finish_fn(struct bch_inode_info *inode, + struct bch_inode_unpacked *bi, + void *p) +{ + struct bch_fs *c = inode->v.i_sb->s_fs_info; + + bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY; + bi->bi_mtime = bi->bi_ctime = bch2_current_time(c); + return 0; +} + +static int bch2_truncate_start_fn(struct bch_inode_info *inode, + struct bch_inode_unpacked *bi, void *p) { - return __bch2_truncate_page(mapping, from >> PAGE_SHIFT, - from, from + PAGE_SIZE); + u64 *new_i_size = p; + + bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY; + bi->bi_size = *new_i_size; + return 0; } int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr) { struct bch_fs *c = inode->v.i_sb->s_fs_info; struct address_space *mapping = inode->v.i_mapping; - bool shrink = iattr->ia_size <= inode->v.i_size; + struct bch_inode_unpacked inode_u; + struct btree_trans trans; + struct btree_iter *iter; + u64 new_i_size = iattr->ia_size; + s64 i_sectors_delta = 0; int ret = 0; inode_dio_wait(&inode->v); - pagecache_block_get(&mapping->add_lock); + bch2_pagecache_block_get(&inode->ei_pagecache_lock); - truncate_setsize(&inode->v, iattr->ia_size); + /* + * fetch current on disk i_size: inode is locked, i_size can only + * increase underneath us: + */ + bch2_trans_init(&trans, c, 0, 0); + iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino, 0); + ret = PTR_ERR_OR_ZERO(iter); + bch2_trans_exit(&trans); - /* sync appends.. */ - /* XXX what protects inode->i_size? */ - if (iattr->ia_size > inode->ei_size) - ret = filemap_write_and_wait_range(mapping, - inode->ei_size, S64_MAX); if (ret) - goto err_put_pagecache; + goto err; - mutex_lock(&inode->ei_update_lock); - i_size_dirty_get(inode); - ret = bch2_write_inode_size(c, inode, inode->v.i_size); - mutex_unlock(&inode->ei_update_lock); + /* + * check this before next assertion; on filesystem error our normal + * invariants are a bit broken (truncate has to truncate the page cache + * before the inode). + */ + ret = bch2_journal_error(&c->journal); + if (ret) + goto err; + + BUG_ON(inode->v.i_size < inode_u.bi_size); + if (iattr->ia_size > inode->v.i_size) { + ret = bch2_extend(inode, &inode_u, iattr); + goto err; + } + + ret = bch2_truncate_page(inode, iattr->ia_size); if (unlikely(ret)) goto err; /* - * There might be persistent reservations (from fallocate()) - * above i_size, which bch2_inode_truncate() will discard - we're - * only supposed to discard them if we're doing a real truncate - * here (new i_size < current i_size): + * When extending, we're going to write the new i_size to disk + * immediately so we need to flush anything above the current on disk + * i_size first: + * + * Also, when extending we need to flush the page that i_size currently + * straddles - if it's mapped to userspace, we need to ensure that + * userspace has to redirty it and call .mkwrite -> set_page_dirty + * again to allocate the part of the page that was extended. */ - if (shrink) { - struct i_sectors_hook i_sectors_hook; - int ret; + if (iattr->ia_size > inode_u.bi_size) + ret = filemap_write_and_wait_range(mapping, + inode_u.bi_size, + iattr->ia_size - 1); + else if (iattr->ia_size & (PAGE_SIZE - 1)) + ret = filemap_write_and_wait_range(mapping, + round_down(iattr->ia_size, PAGE_SIZE), + iattr->ia_size - 1); + if (ret) + goto err; - ret = i_sectors_dirty_get(c, inode, &i_sectors_hook); - if (unlikely(ret)) - goto err; + mutex_lock(&inode->ei_update_lock); + ret = bch2_write_inode(c, inode, bch2_truncate_start_fn, + &new_i_size, 0); + mutex_unlock(&inode->ei_update_lock); - ret = bch2_truncate_page(inode->v.i_mapping, iattr->ia_size); - if (unlikely(ret)) { - i_sectors_dirty_put(c, inode, &i_sectors_hook); - goto err; - } + if (unlikely(ret)) + goto err; - ret = bch2_inode_truncate(c, inode->v.i_ino, - round_up(iattr->ia_size, PAGE_SIZE) >> 9, - &i_sectors_hook.hook, - &inode->ei_journal_seq); + truncate_setsize(&inode->v, iattr->ia_size); - i_sectors_dirty_put(c, inode, &i_sectors_hook); + ret = bch2_fpunch(c, inode->v.i_ino, + round_up(iattr->ia_size, block_bytes(c)) >> 9, + U64_MAX, &inode->ei_journal_seq, &i_sectors_delta); + i_sectors_acct(c, inode, NULL, i_sectors_delta); - if (unlikely(ret)) - goto err; - } + if (unlikely(ret)) + goto err; - mutex_lock(&inode->ei_update_lock); setattr_copy(&inode->v, iattr); - inode->v.i_mtime = inode->v.i_ctime = current_fs_time(inode->v.i_sb); -out: - /* clear I_SIZE_DIRTY: */ - i_size_dirty_put(inode); - ret = bch2_write_inode_size(c, inode, inode->v.i_size); - mutex_unlock(&inode->ei_update_lock); -err_put_pagecache: - pagecache_block_put(&mapping->add_lock); - return ret; -err: mutex_lock(&inode->ei_update_lock); - goto out; + ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, + ATTR_MTIME|ATTR_CTIME); + mutex_unlock(&inode->ei_update_lock); +err: + bch2_pagecache_block_put(&inode->ei_pagecache_lock); + return ret; } -static long bch2_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len) +/* fallocate: */ + +static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len) { struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct address_space *mapping = inode->v.i_mapping; - u64 ino = inode->v.i_ino; - u64 discard_start = round_up(offset, PAGE_SIZE) >> 9; - u64 discard_end = round_down(offset + len, PAGE_SIZE) >> 9; + u64 discard_start = round_up(offset, block_bytes(c)) >> 9; + u64 discard_end = round_down(offset + len, block_bytes(c)) >> 9; int ret = 0; inode_lock(&inode->v); inode_dio_wait(&inode->v); - pagecache_block_get(&mapping->add_lock); + bch2_pagecache_block_get(&inode->ei_pagecache_lock); - ret = __bch2_truncate_page(mapping, + ret = __bch2_truncate_page(inode, offset >> PAGE_SHIFT, offset, offset + len); if (unlikely(ret)) - goto out; + goto err; if (offset >> PAGE_SHIFT != (offset + len) >> PAGE_SHIFT) { - ret = __bch2_truncate_page(mapping, + ret = __bch2_truncate_page(inode, (offset + len) >> PAGE_SHIFT, offset, offset + len); if (unlikely(ret)) - goto out; + goto err; } truncate_pagecache_range(&inode->v, offset, offset + len - 1); if (discard_start < discard_end) { - struct disk_reservation disk_res; - struct i_sectors_hook i_sectors_hook; - int ret; - - BUG_ON(bch2_disk_reservation_get(c, &disk_res, 0, 0)); - - ret = i_sectors_dirty_get(c, inode, &i_sectors_hook); - if (unlikely(ret)) - goto out; - - ret = bch2_btree_delete_range(c, - BTREE_ID_EXTENTS, - POS(ino, discard_start), - POS(ino, discard_end), - ZERO_VERSION, - &disk_res, - &i_sectors_hook.hook, - &inode->ei_journal_seq); + s64 i_sectors_delta = 0; - i_sectors_dirty_put(c, inode, &i_sectors_hook); - bch2_disk_reservation_put(c, &disk_res); + ret = bch2_fpunch(c, inode->v.i_ino, + discard_start, discard_end, + &inode->ei_journal_seq, + &i_sectors_delta); + i_sectors_acct(c, inode, NULL, i_sectors_delta); } -out: - pagecache_block_put(&mapping->add_lock); +err: + bch2_pagecache_block_put(&inode->ei_pagecache_lock); inode_unlock(&inode->v); return ret; } -static long bch2_fcollapse(struct bch_inode_info *inode, - loff_t offset, loff_t len) +static long bchfs_fcollapse_finsert(struct bch_inode_info *inode, + loff_t offset, loff_t len, + bool insert) { struct bch_fs *c = inode->v.i_sb->s_fs_info; struct address_space *mapping = inode->v.i_mapping; - struct btree_iter src; - struct btree_iter dst; - BKEY_PADDED(k) copy; - struct bkey_s_c k; - struct i_sectors_hook i_sectors_hook; - loff_t new_size; + struct bkey_on_stack copy; + struct btree_trans trans; + struct btree_iter *src, *dst; + loff_t shift, new_size; + u64 src_start; int ret; - if ((offset | len) & (PAGE_SIZE - 1)) + if ((offset | len) & (block_bytes(c) - 1)) return -EINVAL; - bch2_btree_iter_init(&dst, c, BTREE_ID_EXTENTS, - POS(inode->v.i_ino, offset >> 9), - BTREE_ITER_INTENT); - /* position will be set from dst iter's position: */ - bch2_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN, 0); - bch2_btree_iter_link(&src, &dst); + bkey_on_stack_init(©); + bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256); /* * We need i_mutex to keep the page cache consistent with the extents @@ -2222,256 +2360,332 @@ static long bch2_fcollapse(struct bch_inode_info *inode, */ inode_lock(&inode->v); inode_dio_wait(&inode->v); - pagecache_block_get(&mapping->add_lock); + bch2_pagecache_block_get(&inode->ei_pagecache_lock); - ret = -EINVAL; - if (offset + len >= inode->v.i_size) - goto err; + if (insert) { + ret = -EFBIG; + if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len) + goto err; - if (inode->v.i_size < len) - goto err; + ret = -EINVAL; + if (offset >= inode->v.i_size) + goto err; - new_size = inode->v.i_size - len; + src_start = U64_MAX; + shift = len; + } else { + ret = -EINVAL; + if (offset + len >= inode->v.i_size) + goto err; - ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX); - if (ret) - goto err; + src_start = offset + len; + shift = -len; + } - ret = i_sectors_dirty_get(c, inode, &i_sectors_hook); + new_size = inode->v.i_size + shift; + + ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX); if (ret) goto err; - while (bkey_cmp(dst.pos, - POS(inode->v.i_ino, - round_up(new_size, PAGE_SIZE) >> 9)) < 0) { - struct disk_reservation disk_res; + if (insert) { + i_size_write(&inode->v, new_size); + mutex_lock(&inode->ei_update_lock); + ret = bch2_write_inode_size(c, inode, new_size, + ATTR_MTIME|ATTR_CTIME); + mutex_unlock(&inode->ei_update_lock); + } else { + s64 i_sectors_delta = 0; - bch2_btree_iter_set_pos(&src, - POS(dst.pos.inode, dst.pos.offset + (len >> 9))); + ret = bch2_fpunch(c, inode->v.i_ino, + offset >> 9, (offset + len) >> 9, + &inode->ei_journal_seq, + &i_sectors_delta); + i_sectors_acct(c, inode, NULL, i_sectors_delta); - ret = bch2_btree_iter_traverse(&dst); if (ret) - goto btree_iter_err; + goto err; + } - k = bch2_btree_iter_peek_with_holes(&src); - if ((ret = btree_iter_err(k))) - goto btree_iter_err; + src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, + POS(inode->v.i_ino, src_start >> 9), + BTREE_ITER_INTENT); + BUG_ON(IS_ERR_OR_NULL(src)); - bkey_reassemble(©.k, k); + dst = bch2_trans_copy_iter(&trans, src); + BUG_ON(IS_ERR_OR_NULL(dst)); + + while (1) { + struct disk_reservation disk_res = + bch2_disk_reservation_init(c, 0); + struct bkey_i delete; + struct bkey_s_c k; + struct bpos next_pos; + struct bpos move_pos = POS(inode->v.i_ino, offset >> 9); + struct bpos atomic_end; + unsigned trigger_flags = 0; + + k = insert + ? bch2_btree_iter_peek_prev(src) + : bch2_btree_iter_peek(src); + if ((ret = bkey_err(k))) + goto bkey_err; + + if (!k.k || k.k->p.inode != inode->v.i_ino) + break; - if (bkey_deleted(©.k.k)) - copy.k.k.type = KEY_TYPE_DISCARD; + BUG_ON(bkey_cmp(src->pos, bkey_start_pos(k.k))); - bch2_cut_front(src.pos, ©.k); - copy.k.k.p.offset -= len >> 9; + if (insert && + bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0) + break; +reassemble: + bkey_on_stack_reassemble(©, c, k); - BUG_ON(bkey_cmp(dst.pos, bkey_start_pos(©.k.k))); + if (insert && + bkey_cmp(bkey_start_pos(k.k), move_pos) < 0) + bch2_cut_front(move_pos, copy.k); - ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size, - BCH_DISK_RESERVATION_NOFAIL); - BUG_ON(ret); + copy.k->k.p.offset += shift >> 9; + bch2_btree_iter_set_pos(dst, bkey_start_pos(©.k->k)); - ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook, - &inode->ei_journal_seq, - BTREE_INSERT_ATOMIC| - BTREE_INSERT_NOFAIL, - BTREE_INSERT_ENTRY(&dst, ©.k)); - bch2_disk_reservation_put(c, &disk_res); -btree_iter_err: - if (ret < 0 && ret != -EINTR) - goto err_unwind; + ret = bch2_extent_atomic_end(dst, copy.k, &atomic_end); + if (ret) + goto bkey_err; - bch2_btree_iter_cond_resched(&src); - } + if (bkey_cmp(atomic_end, copy.k->k.p)) { + if (insert) { + move_pos = atomic_end; + move_pos.offset -= shift >> 9; + goto reassemble; + } else { + bch2_cut_back(atomic_end, copy.k); + } + } - bch2_btree_iter_unlock(&src); - bch2_btree_iter_unlock(&dst); + bkey_init(&delete.k); + delete.k.p = copy.k->k.p; + delete.k.size = copy.k->k.size; + delete.k.p.offset -= shift >> 9; - ret = bch2_inode_truncate(c, inode->v.i_ino, - round_up(new_size, PAGE_SIZE) >> 9, - &i_sectors_hook.hook, - &inode->ei_journal_seq); - if (ret) - goto err_unwind; + next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p; - i_sectors_dirty_put(c, inode, &i_sectors_hook); + if (copy.k->k.size == k.k->size) { + /* + * If we're moving the entire extent, we can skip + * running triggers: + */ + trigger_flags |= BTREE_TRIGGER_NORUN; + } else { + /* We might end up splitting compressed extents: */ + unsigned nr_ptrs = + bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k)); - mutex_lock(&inode->ei_update_lock); - i_size_write(&inode->v, new_size); - ret = bch2_write_inode_size(c, inode, inode->v.i_size); - mutex_unlock(&inode->ei_update_lock); + ret = bch2_disk_reservation_get(c, &disk_res, + copy.k->k.size, nr_ptrs, + BCH_DISK_RESERVATION_NOFAIL); + BUG_ON(ret); + } - pagecache_block_put(&mapping->add_lock); - inode_unlock(&inode->v); + bch2_btree_iter_set_pos(src, bkey_start_pos(&delete.k)); - return ret; -err_unwind: - /* - * XXX: we've left data with multiple pointers... which isn't a _super_ - * serious problem... - */ - i_sectors_dirty_put(c, inode, &i_sectors_hook); + ret = bch2_trans_update(&trans, src, &delete, trigger_flags) ?: + bch2_trans_update(&trans, dst, copy.k, trigger_flags) ?: + bch2_trans_commit(&trans, &disk_res, + &inode->ei_journal_seq, + BTREE_INSERT_NOFAIL); + bch2_disk_reservation_put(c, &disk_res); +bkey_err: + if (!ret) + bch2_btree_iter_set_pos(src, next_pos); + + if (ret == -EINTR) + ret = 0; + if (ret) + goto err; + + bch2_trans_cond_resched(&trans); + } + bch2_trans_unlock(&trans); + + if (!insert) { + i_size_write(&inode->v, new_size); + mutex_lock(&inode->ei_update_lock); + ret = bch2_write_inode_size(c, inode, new_size, + ATTR_MTIME|ATTR_CTIME); + mutex_unlock(&inode->ei_update_lock); + } err: - bch2_btree_iter_unlock(&src); - bch2_btree_iter_unlock(&dst); - pagecache_block_put(&mapping->add_lock); + bch2_trans_exit(&trans); + bkey_on_stack_exit(©, c); + bch2_pagecache_block_put(&inode->ei_pagecache_lock); inode_unlock(&inode->v); return ret; } -static long bch2_fallocate(struct bch_inode_info *inode, int mode, - loff_t offset, loff_t len) +static long bchfs_fallocate(struct bch_inode_info *inode, int mode, + loff_t offset, loff_t len) { struct address_space *mapping = inode->v.i_mapping; struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct i_sectors_hook i_sectors_hook; - struct btree_iter iter; - struct bpos end; - loff_t block_start, block_end; - loff_t new_size = offset + len; + struct btree_trans trans; + struct btree_iter *iter; + struct bpos end_pos; + loff_t end = offset + len; + loff_t block_start = round_down(offset, block_bytes(c)); + loff_t block_end = round_up(end, block_bytes(c)); unsigned sectors; - unsigned replicas = READ_ONCE(c->opts.data_replicas); + unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas; int ret; - bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, - BTREE_ITER_INTENT); + bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); inode_lock(&inode->v); inode_dio_wait(&inode->v); - pagecache_block_get(&mapping->add_lock); + bch2_pagecache_block_get(&inode->ei_pagecache_lock); - if (!(mode & FALLOC_FL_KEEP_SIZE) && - new_size > inode->v.i_size) { - ret = inode_newsize_ok(&inode->v, new_size); + if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) { + ret = inode_newsize_ok(&inode->v, end); if (ret) goto err; } if (mode & FALLOC_FL_ZERO_RANGE) { - ret = __bch2_truncate_page(mapping, + ret = __bch2_truncate_page(inode, offset >> PAGE_SHIFT, - offset, offset + len); + offset, end); if (!ret && - offset >> PAGE_SHIFT != - (offset + len) >> PAGE_SHIFT) - ret = __bch2_truncate_page(mapping, - (offset + len) >> PAGE_SHIFT, - offset, offset + len); + offset >> PAGE_SHIFT != end >> PAGE_SHIFT) + ret = __bch2_truncate_page(inode, + end >> PAGE_SHIFT, + offset, end); if (unlikely(ret)) goto err; - truncate_pagecache_range(&inode->v, offset, offset + len - 1); - - block_start = round_up(offset, PAGE_SIZE); - block_end = round_down(offset + len, PAGE_SIZE); - } else { - block_start = round_down(offset, PAGE_SIZE); - block_end = round_up(offset + len, PAGE_SIZE); + truncate_pagecache_range(&inode->v, offset, end - 1); } - bch2_btree_iter_set_pos(&iter, POS(inode->v.i_ino, block_start >> 9)); - end = POS(inode->v.i_ino, block_end >> 9); - - ret = i_sectors_dirty_get(c, inode, &i_sectors_hook); - if (unlikely(ret)) - goto err; + iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, + POS(inode->v.i_ino, block_start >> 9), + BTREE_ITER_SLOTS|BTREE_ITER_INTENT); + end_pos = POS(inode->v.i_ino, block_end >> 9); - while (bkey_cmp(iter.pos, end) < 0) { + while (bkey_cmp(iter->pos, end_pos) < 0) { + s64 i_sectors_delta = 0; struct disk_reservation disk_res = { 0 }; + struct quota_res quota_res = { 0 }; struct bkey_i_reservation reservation; struct bkey_s_c k; - k = bch2_btree_iter_peek_with_holes(&iter); - if ((ret = btree_iter_err(k))) - goto btree_iter_err; + bch2_trans_begin(&trans); + + k = bch2_btree_iter_peek_slot(iter); + if ((ret = bkey_err(k))) + goto bkey_err; /* already reserved */ - if (k.k->type == BCH_RESERVATION && + if (k.k->type == KEY_TYPE_reservation && bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) { - bch2_btree_iter_advance_pos(&iter); + bch2_btree_iter_next_slot(iter); continue; } - if (bkey_extent_is_data(k.k)) { - if (!(mode & FALLOC_FL_ZERO_RANGE)) { - bch2_btree_iter_advance_pos(&iter); - continue; - } + if (bkey_extent_is_data(k.k) && + !(mode & FALLOC_FL_ZERO_RANGE)) { + bch2_btree_iter_next_slot(iter); + continue; } bkey_reservation_init(&reservation.k_i); - reservation.k.type = BCH_RESERVATION; + reservation.k.type = KEY_TYPE_reservation; reservation.k.p = k.k->p; reservation.k.size = k.k->size; - bch2_cut_front(iter.pos, &reservation.k_i); - bch2_cut_back(end, &reservation.k); + bch2_cut_front(iter->pos, &reservation.k_i); + bch2_cut_back(end_pos, &reservation.k_i); sectors = reservation.k.size; - reservation.v.nr_replicas = bch2_extent_nr_dirty_ptrs(k); + reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k); + + if (!bkey_extent_is_allocation(k.k)) { + ret = bch2_quota_reservation_add(c, inode, + "a_res, + sectors, true); + if (unlikely(ret)) + goto bkey_err; + } if (reservation.v.nr_replicas < replicas || - bch2_extent_is_compressed(k)) { - ret = bch2_disk_reservation_get(c, &disk_res, - sectors, 0); - if (ret) - goto err_put_sectors_dirty; + bch2_bkey_sectors_compressed(k)) { + ret = bch2_disk_reservation_get(c, &disk_res, sectors, + replicas, 0); + if (unlikely(ret)) + goto bkey_err; reservation.v.nr_replicas = disk_res.nr_replicas; } - ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook, - &inode->ei_journal_seq, - BTREE_INSERT_ATOMIC| - BTREE_INSERT_NOFAIL, - BTREE_INSERT_ENTRY(&iter, &reservation.k_i)); + ret = bch2_extent_update(&trans, iter, &reservation.k_i, + &disk_res, &inode->ei_journal_seq, + 0, &i_sectors_delta); + i_sectors_acct(c, inode, "a_res, i_sectors_delta); +bkey_err: + bch2_quota_reservation_put(c, inode, "a_res); bch2_disk_reservation_put(c, &disk_res); -btree_iter_err: - if (ret < 0 && ret != -EINTR) - goto err_put_sectors_dirty; - + if (ret == -EINTR) + ret = 0; + if (ret) + goto err; } - bch2_btree_iter_unlock(&iter); - i_sectors_dirty_put(c, inode, &i_sectors_hook); + /* + * Do we need to extend the file? + * + * If we zeroed up to the end of the file, we dropped whatever writes + * were going to write out the current i_size, so we have to extend + * manually even if FL_KEEP_SIZE was set: + */ + if (end >= inode->v.i_size && + (!(mode & FALLOC_FL_KEEP_SIZE) || + (mode & FALLOC_FL_ZERO_RANGE))) { + struct btree_iter *inode_iter; + struct bch_inode_unpacked inode_u; - if (!(mode & FALLOC_FL_KEEP_SIZE) && - new_size > inode->v.i_size) { - i_size_write(&inode->v, new_size); + do { + bch2_trans_begin(&trans); + inode_iter = bch2_inode_peek(&trans, &inode_u, + inode->v.i_ino, 0); + ret = PTR_ERR_OR_ZERO(inode_iter); + } while (ret == -EINTR); - mutex_lock(&inode->ei_update_lock); - ret = bch2_write_inode_size(c, inode, inode->v.i_size); - mutex_unlock(&inode->ei_update_lock); - } + bch2_trans_unlock(&trans); - /* blech */ - if ((mode & FALLOC_FL_KEEP_SIZE) && - (mode & FALLOC_FL_ZERO_RANGE) && - inode->ei_size != inode->v.i_size) { - /* sync appends.. */ - ret = filemap_write_and_wait_range(mapping, - inode->ei_size, S64_MAX); if (ret) goto err; - if (inode->ei_size != inode->v.i_size) { - mutex_lock(&inode->ei_update_lock); - ret = bch2_write_inode_size(c, inode, inode->v.i_size); - mutex_unlock(&inode->ei_update_lock); - } - } + /* + * Sync existing appends before extending i_size, + * as in bch2_extend(): + */ + ret = filemap_write_and_wait_range(mapping, + inode_u.bi_size, S64_MAX); + if (ret) + goto err; - pagecache_block_put(&mapping->add_lock); - inode_unlock(&inode->v); + if (mode & FALLOC_FL_KEEP_SIZE) + end = inode->v.i_size; + else + i_size_write(&inode->v, end); - return 0; -err_put_sectors_dirty: - i_sectors_dirty_put(c, inode, &i_sectors_hook); + mutex_lock(&inode->ei_update_lock); + ret = bch2_write_inode_size(c, inode, end, 0); + mutex_unlock(&inode->ei_update_lock); + } err: - bch2_btree_iter_unlock(&iter); - pagecache_block_put(&mapping->add_lock); + bch2_trans_exit(&trans); + bch2_pagecache_block_put(&inode->ei_pagecache_lock); inode_unlock(&inode->v); return ret; } @@ -2480,47 +2694,186 @@ long bch2_fallocate_dispatch(struct file *file, int mode, loff_t offset, loff_t len) { struct bch_inode_info *inode = file_bch_inode(file); + struct bch_fs *c = inode->v.i_sb->s_fs_info; + long ret; + + if (!percpu_ref_tryget(&c->writes)) + return -EROFS; if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE))) - return bch2_fallocate(inode, mode, offset, len); + ret = bchfs_fallocate(inode, mode, offset, len); + else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE)) + ret = bchfs_fpunch(inode, offset, len); + else if (mode == FALLOC_FL_INSERT_RANGE) + ret = bchfs_fcollapse_finsert(inode, offset, len, true); + else if (mode == FALLOC_FL_COLLAPSE_RANGE) + ret = bchfs_fcollapse_finsert(inode, offset, len, false); + else + ret = -EOPNOTSUPP; + + percpu_ref_put(&c->writes); + + return ret; +} + +static void mark_range_unallocated(struct bch_inode_info *inode, + loff_t start, loff_t end) +{ + pgoff_t index = start >> PAGE_SHIFT; + pgoff_t end_index = (end - 1) >> PAGE_SHIFT; + struct pagevec pvec; + + pagevec_init(&pvec); + + do { + unsigned nr_pages, i, j; + + nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping, + &index, end_index); + if (nr_pages == 0) + break; - if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE)) - return bch2_fpunch(inode, offset, len); + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + struct bch_page_state *s; - if (mode == FALLOC_FL_COLLAPSE_RANGE) - return bch2_fcollapse(inode, offset, len); + lock_page(page); + s = bch2_page_state(page); + + if (s) { + spin_lock(&s->lock); + for (j = 0; j < PAGE_SECTORS; j++) + s->s[j].nr_replicas = 0; + spin_unlock(&s->lock); + } + + unlock_page(page); + } + pagevec_release(&pvec); + } while (index <= end_index); +} + +loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src, + struct file *file_dst, loff_t pos_dst, + loff_t len, unsigned remap_flags) +{ + struct bch_inode_info *src = file_bch_inode(file_src); + struct bch_inode_info *dst = file_bch_inode(file_dst); + struct bch_fs *c = src->v.i_sb->s_fs_info; + s64 i_sectors_delta = 0; + u64 aligned_len; + loff_t ret = 0; + + if (!c->opts.reflink) + return -EOPNOTSUPP; + + if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY)) + return -EINVAL; + + if (remap_flags & REMAP_FILE_DEDUP) + return -EOPNOTSUPP; + + if ((pos_src & (block_bytes(c) - 1)) || + (pos_dst & (block_bytes(c) - 1))) + return -EINVAL; + + if (src == dst && + abs(pos_src - pos_dst) < len) + return -EINVAL; + + bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst); + + file_update_time(file_dst); + + inode_dio_wait(&src->v); + inode_dio_wait(&dst->v); + + ret = generic_remap_file_range_prep(file_src, pos_src, + file_dst, pos_dst, + &len, remap_flags); + if (ret < 0 || len == 0) + goto err; + + aligned_len = round_up((u64) len, block_bytes(c)); + + ret = write_invalidate_inode_pages_range(dst->v.i_mapping, + pos_dst, pos_dst + len - 1); + if (ret) + goto err; + + mark_range_unallocated(src, pos_src, pos_src + aligned_len); + + ret = bch2_remap_range(c, + POS(dst->v.i_ino, pos_dst >> 9), + POS(src->v.i_ino, pos_src >> 9), + aligned_len >> 9, + &dst->ei_journal_seq, + pos_dst + len, &i_sectors_delta); + if (ret < 0) + goto err; + + /* + * due to alignment, we might have remapped slightly more than requsted + */ + ret = min((u64) ret << 9, (u64) len); + + /* XXX get a quota reservation */ + i_sectors_acct(c, dst, NULL, i_sectors_delta); + + spin_lock(&dst->v.i_lock); + if (pos_dst + ret > dst->v.i_size) + i_size_write(&dst->v, pos_dst + ret); + spin_unlock(&dst->v.i_lock); +err: + bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst); - return -EOPNOTSUPP; + return ret; } -static bool page_is_data(struct page *page) +/* fseek: */ + +static int page_data_offset(struct page *page, unsigned offset) { - /* XXX: should only have to check PageDirty */ - return PagePrivate(page) && - (page_state(page)->sectors || - page_state(page)->dirty_sectors); + struct bch_page_state *s = bch2_page_state(page); + unsigned i; + + if (s) + for (i = offset >> 9; i < PAGE_SECTORS; i++) + if (s->s[i].state >= SECTOR_DIRTY) + return i << 9; + + return -1; } -static loff_t bch2_next_pagecache_data(struct inode *vinode, +static loff_t bch2_seek_pagecache_data(struct inode *vinode, loff_t start_offset, loff_t end_offset) { struct address_space *mapping = vinode->i_mapping; struct page *page; - pgoff_t index; - - for (index = start_offset >> PAGE_SHIFT; - index < end_offset >> PAGE_SHIFT; - index++) { - if (find_get_pages(mapping, index, 1, &page)) { + pgoff_t start_index = start_offset >> PAGE_SHIFT; + pgoff_t end_index = end_offset >> PAGE_SHIFT; + pgoff_t index = start_index; + loff_t ret; + int offset; + + while (index <= end_index) { + if (find_get_pages_range(mapping, &index, end_index, 1, &page)) { lock_page(page); - index = page->index; - if (page_is_data(page)) - end_offset = - min(end_offset, - max(start_offset, - ((loff_t) index) << PAGE_SHIFT)); + offset = page_data_offset(page, + page->index == start_index + ? start_offset & (PAGE_SIZE - 1) + : 0); + if (offset >= 0) { + ret = clamp(((loff_t) page->index << PAGE_SHIFT) + + offset, + start_offset, end_offset); + unlock_page(page); + put_page(page); + return ret; + } + unlock_page(page); put_page(page); } else { @@ -2535,7 +2888,8 @@ static loff_t bch2_seek_data(struct file *file, u64 offset) { struct bch_inode_info *inode = file_bch_inode(file); struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; u64 isize, next_data = MAX_LFS_FILESIZE; int ret; @@ -2544,8 +2898,10 @@ static loff_t bch2_seek_data(struct file *file, u64 offset) if (offset >= isize) return -ENXIO; - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, - POS(inode->v.i_ino, offset >> 9), 0, k) { + bch2_trans_init(&trans, c, 0, 0); + + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, + POS(inode->v.i_ino, offset >> 9), 0, k, ret) { if (k.k->p.inode != inode->v.i_ino) { break; } else if (bkey_extent_is_data(k.k)) { @@ -2555,48 +2911,70 @@ static loff_t bch2_seek_data(struct file *file, u64 offset) break; } - ret = bch2_btree_iter_unlock(&iter); + ret = bch2_trans_exit(&trans) ?: ret; if (ret) return ret; if (next_data > offset) - next_data = bch2_next_pagecache_data(&inode->v, + next_data = bch2_seek_pagecache_data(&inode->v, offset, next_data); - if (next_data > isize) + if (next_data >= isize) return -ENXIO; return vfs_setpos(file, next_data, MAX_LFS_FILESIZE); } -static bool page_slot_is_data(struct address_space *mapping, pgoff_t index) +static int __page_hole_offset(struct page *page, unsigned offset) +{ + struct bch_page_state *s = bch2_page_state(page); + unsigned i; + + if (!s) + return 0; + + for (i = offset >> 9; i < PAGE_SECTORS; i++) + if (s->s[i].state < SECTOR_DIRTY) + return i << 9; + + return -1; +} + +static loff_t page_hole_offset(struct address_space *mapping, loff_t offset) { + pgoff_t index = offset >> PAGE_SHIFT; struct page *page; - bool ret; + int pg_offset; + loff_t ret = -1; page = find_lock_entry(mapping, index); - if (!page || radix_tree_exception(page)) - return false; + if (!page || xa_is_value(page)) + return offset; + + pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1)); + if (pg_offset >= 0) + ret = ((loff_t) index << PAGE_SHIFT) + pg_offset; - ret = page_is_data(page); unlock_page(page); return ret; } -static loff_t bch2_next_pagecache_hole(struct inode *vinode, +static loff_t bch2_seek_pagecache_hole(struct inode *vinode, loff_t start_offset, loff_t end_offset) { struct address_space *mapping = vinode->i_mapping; - pgoff_t index; + loff_t offset = start_offset, hole; + + while (offset < end_offset) { + hole = page_hole_offset(mapping, offset); + if (hole >= 0 && hole <= end_offset) + return max(start_offset, hole); - for (index = start_offset >> PAGE_SHIFT; - index < end_offset >> PAGE_SHIFT; - index++) - if (!page_slot_is_data(mapping, index)) - end_offset = max(start_offset, - ((loff_t) index) << PAGE_SHIFT); + offset += PAGE_SIZE; + offset &= PAGE_MASK; + } return end_offset; } @@ -2605,7 +2983,8 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset) { struct bch_inode_info *inode = file_bch_inode(file); struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; u64 isize, next_hole = MAX_LFS_FILESIZE; int ret; @@ -2614,15 +2993,17 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset) if (offset >= isize) return -ENXIO; - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, + bch2_trans_init(&trans, c, 0, 0); + + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(inode->v.i_ino, offset >> 9), - BTREE_ITER_WITH_HOLES, k) { + BTREE_ITER_SLOTS, k, ret) { if (k.k->p.inode != inode->v.i_ino) { - next_hole = bch2_next_pagecache_hole(&inode->v, + next_hole = bch2_seek_pagecache_hole(&inode->v, offset, MAX_LFS_FILESIZE); break; } else if (!bkey_extent_is_data(k.k)) { - next_hole = bch2_next_pagecache_hole(&inode->v, + next_hole = bch2_seek_pagecache_hole(&inode->v, max(offset, bkey_start_offset(k.k) << 9), k.k->p.offset << 9); @@ -2633,7 +3014,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset) } } - ret = bch2_btree_iter_unlock(&iter); + ret = bch2_trans_exit(&trans) ?: ret; if (ret) return ret; @@ -2668,15 +3049,23 @@ void bch2_fs_fsio_exit(struct bch_fs *c) int bch2_fs_fsio_init(struct bch_fs *c) { + int ret = 0; + + pr_verbose_init(c->opts, ""); + if (bioset_init(&c->writepage_bioset, - 4, offsetof(struct bch_writepage_io, op.op.wbio.bio)) || + 4, offsetof(struct bch_writepage_io, op.wbio.bio), + BIOSET_NEED_BVECS) || bioset_init(&c->dio_read_bioset, - 4, offsetof(struct dio_read, rbio.bio)) || + 4, offsetof(struct dio_read, rbio.bio), + BIOSET_NEED_BVECS) || bioset_init(&c->dio_write_bioset, - 4, offsetof(struct dio_write, iop.op.wbio.bio))) - return -ENOMEM; + 4, offsetof(struct dio_write, op.wbio.bio), + BIOSET_NEED_BVECS)) + ret = -ENOMEM; - return 0; + pr_verbose_init(c->opts, "ret %i", ret); + return ret; } #endif /* NO_BCACHEFS_FS */