1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_foreground.h"
6 //#include "bkey_buf.h"
7 #include "btree_update.h"
12 #include "extent_update.h"
15 #include "fs-io-buffered.h"
16 //#include "fs-io-direct.h"
17 #include "fs-io-pagecache.h"
27 #include <linux/aio.h>
28 #include <linux/backing-dev.h>
29 #include <linux/falloc.h>
30 #include <linux/migrate.h>
31 #include <linux/mmu_context.h>
32 #include <linux/pagevec.h>
33 #include <linux/rmap.h>
34 #include <linux/sched/signal.h>
35 #include <linux/task_io_accounting_ops.h>
36 #include <linux/uio.h>
38 #include <trace/events/writeback.h>
46 static void nocow_flush_endio(struct bio *_bio)
49 struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
52 percpu_ref_put(&bio->ca->io_ref);
56 void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
57 struct bch_inode_info *inode,
60 struct nocow_flush *bio;
62 struct bch_devs_mask devs;
65 dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
66 if (dev == BCH_SB_MEMBERS_MAX)
69 devs = inode->ei_devs_need_flush;
70 memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
72 for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
74 ca = rcu_dereference(c->devs[dev]);
75 if (ca && !percpu_ref_tryget(&ca->io_ref))
82 bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
85 &c->nocow_flush_bioset),
86 struct nocow_flush, bio);
89 bio->bio.bi_end_io = nocow_flush_endio;
90 closure_bio_submit(&bio->bio, cl);
94 static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
95 struct bch_inode_info *inode)
99 closure_init_stack(&cl);
100 bch2_inode_flush_nocow_writes_async(c, inode, &cl);
106 /* i_size updates: */
108 struct inode_new_size {
114 static int inode_set_size(struct bch_inode_info *inode,
115 struct bch_inode_unpacked *bi,
118 struct inode_new_size *s = p;
120 bi->bi_size = s->new_size;
121 if (s->fields & ATTR_ATIME)
122 bi->bi_atime = s->now;
123 if (s->fields & ATTR_MTIME)
124 bi->bi_mtime = s->now;
125 if (s->fields & ATTR_CTIME)
126 bi->bi_ctime = s->now;
131 int __must_check bch2_write_inode_size(struct bch_fs *c,
132 struct bch_inode_info *inode,
133 loff_t new_size, unsigned fields)
135 struct inode_new_size s = {
136 .new_size = new_size,
137 .now = bch2_current_time(c),
141 return bch2_write_inode(c, inode, inode_set_size, &s, fields);
144 void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
145 struct quota_res *quota_res, s64 sectors)
147 bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
148 "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
149 inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
150 inode->ei_inode.bi_sectors);
151 inode->v.i_blocks += sectors;
153 #ifdef CONFIG_BCACHEFS_QUOTA
155 !test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags) &&
157 BUG_ON(sectors > quota_res->sectors);
158 BUG_ON(sectors > inode->ei_quota_reserved);
160 quota_res->sectors -= sectors;
161 inode->ei_quota_reserved -= sectors;
163 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
172 * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
173 * insert trigger: look up the btree inode instead
175 static int bch2_flush_inode(struct bch_fs *c,
176 struct bch_inode_info *inode)
178 struct bch_inode_unpacked u;
181 if (c->opts.journal_flush_disabled)
184 ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
188 return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
189 bch2_inode_flush_nocow_writes(c, inode);
192 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
194 struct bch_inode_info *inode = file_bch_inode(file);
195 struct bch_fs *c = inode->v.i_sb->s_fs_info;
198 ret = file_write_and_wait_range(file, start, end);
199 ret2 = sync_inode_metadata(&inode->v, 1);
200 ret3 = bch2_flush_inode(c, inode);
202 return bch2_err_class(ret ?: ret2 ?: ret3);
207 static inline int range_has_data(struct bch_fs *c, u32 subvol,
211 struct btree_trans trans;
212 struct btree_iter iter;
216 bch2_trans_init(&trans, c, 0, 0);
218 bch2_trans_begin(&trans);
220 ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
224 for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
225 if (bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k)) {
230 bch2_trans_iter_exit(&trans, &iter);
232 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
235 bch2_trans_exit(&trans);
239 static int __bch2_truncate_folio(struct bch_inode_info *inode,
240 pgoff_t index, loff_t start, loff_t end)
242 struct bch_fs *c = inode->v.i_sb->s_fs_info;
243 struct address_space *mapping = inode->v.i_mapping;
245 unsigned start_offset = start & (PAGE_SIZE - 1);
246 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
249 s64 i_sectors_delta = 0;
253 folio = filemap_lock_folio(mapping, index);
254 if (IS_ERR_OR_NULL(folio)) {
256 * XXX: we're doing two index lookups when we end up reading the
259 ret = range_has_data(c, inode->ei_subvol,
260 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
261 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
265 folio = __filemap_get_folio(mapping, index,
266 FGP_LOCK|FGP_CREAT, GFP_KERNEL);
267 if (unlikely(IS_ERR_OR_NULL(folio))) {
273 BUG_ON(start >= folio_end_pos(folio));
274 BUG_ON(end <= folio_pos(folio));
276 start_offset = max(start, folio_pos(folio)) - folio_pos(folio);
277 end_offset = min_t(u64, end, folio_end_pos(folio)) - folio_pos(folio);
279 /* Folio boundary? Nothing to do */
280 if (start_offset == 0 &&
281 end_offset == folio_size(folio)) {
286 s = bch2_folio_create(folio, 0);
292 if (!folio_test_uptodate(folio)) {
293 ret = bch2_read_single_folio(folio, mapping);
298 ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
302 for (i = round_up(start_offset, block_bytes(c)) >> 9;
303 i < round_down(end_offset, block_bytes(c)) >> 9;
305 s->s[i].nr_replicas = 0;
307 i_sectors_delta -= s->s[i].state == SECTOR_dirty;
308 bch2_folio_sector_set(folio, s, i, SECTOR_unallocated);
311 bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
314 * Caller needs to know whether this folio will be written out by
315 * writeback - doing an i_size update if necessary - or whether it will
316 * be responsible for the i_size update.
318 * Note that we shouldn't ever see a folio beyond EOF, but check and
319 * warn if so. This has been observed by failure to clean up folios
320 * after a short write and there's still a chance reclaim will fix
323 WARN_ON_ONCE(folio_pos(folio) >= inode->v.i_size);
324 end_pos = folio_end_pos(folio);
325 if (inode->v.i_size > folio_pos(folio))
326 end_pos = min_t(u64, inode->v.i_size, end_pos);
327 ret = s->s[folio_pos_to_s(folio, end_pos - 1)].state >= SECTOR_dirty;
329 folio_zero_segment(folio, start_offset, end_offset);
332 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
334 * XXX: because we aren't currently tracking whether the folio has actual
335 * data in it (vs. just 0s, or only partially written) this wrong. ick.
337 BUG_ON(bch2_get_folio_disk_reservation(c, inode, folio, false));
340 * This removes any writeable userspace mappings; we need to force
341 * .page_mkwrite to be called again before any mmapped writes, to
342 * redirty the full page:
344 folio_mkclean(folio);
345 filemap_dirty_folio(mapping, folio);
353 static int bch2_truncate_folio(struct bch_inode_info *inode, loff_t from)
355 return __bch2_truncate_folio(inode, from >> PAGE_SHIFT,
356 from, ANYSINT_MAX(loff_t));
359 static int bch2_truncate_folios(struct bch_inode_info *inode,
360 loff_t start, loff_t end)
362 int ret = __bch2_truncate_folio(inode, start >> PAGE_SHIFT,
366 start >> PAGE_SHIFT != end >> PAGE_SHIFT)
367 ret = __bch2_truncate_folio(inode,
368 (end - 1) >> PAGE_SHIFT,
373 static int bch2_extend(struct mnt_idmap *idmap,
374 struct bch_inode_info *inode,
375 struct bch_inode_unpacked *inode_u,
378 struct address_space *mapping = inode->v.i_mapping;
384 * this has to be done _before_ extending i_size:
386 ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
390 truncate_setsize(&inode->v, iattr->ia_size);
392 return bch2_setattr_nonsize(idmap, inode, iattr);
395 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
396 struct bch_inode_unpacked *bi,
399 bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
403 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
404 struct bch_inode_unpacked *bi, void *p)
408 bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
409 bi->bi_size = *new_i_size;
413 int bch2_truncate(struct mnt_idmap *idmap,
414 struct bch_inode_info *inode, struct iattr *iattr)
416 struct bch_fs *c = inode->v.i_sb->s_fs_info;
417 struct address_space *mapping = inode->v.i_mapping;
418 struct bch_inode_unpacked inode_u;
419 u64 new_i_size = iattr->ia_size;
420 s64 i_sectors_delta = 0;
424 * If the truncate call with change the size of the file, the
425 * cmtimes should be updated. If the size will not change, we
426 * do not need to update the cmtimes.
428 if (iattr->ia_size != inode->v.i_size) {
429 if (!(iattr->ia_valid & ATTR_MTIME))
430 ktime_get_coarse_real_ts64(&iattr->ia_mtime);
431 if (!(iattr->ia_valid & ATTR_CTIME))
432 ktime_get_coarse_real_ts64(&iattr->ia_ctime);
433 iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
436 inode_dio_wait(&inode->v);
437 bch2_pagecache_block_get(inode);
439 ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
444 * check this before next assertion; on filesystem error our normal
445 * invariants are a bit broken (truncate has to truncate the page cache
448 ret = bch2_journal_error(&c->journal);
452 WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
453 inode->v.i_size < inode_u.bi_size,
454 "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
455 (u64) inode->v.i_size, inode_u.bi_size);
457 if (iattr->ia_size > inode->v.i_size) {
458 ret = bch2_extend(idmap, inode, &inode_u, iattr);
462 iattr->ia_valid &= ~ATTR_SIZE;
464 ret = bch2_truncate_folio(inode, iattr->ia_size);
465 if (unlikely(ret < 0))
469 * When extending, we're going to write the new i_size to disk
470 * immediately so we need to flush anything above the current on disk
473 * Also, when extending we need to flush the page that i_size currently
474 * straddles - if it's mapped to userspace, we need to ensure that
475 * userspace has to redirty it and call .mkwrite -> set_page_dirty
476 * again to allocate the part of the page that was extended.
478 if (iattr->ia_size > inode_u.bi_size)
479 ret = filemap_write_and_wait_range(mapping,
482 else if (iattr->ia_size & (PAGE_SIZE - 1))
483 ret = filemap_write_and_wait_range(mapping,
484 round_down(iattr->ia_size, PAGE_SIZE),
489 mutex_lock(&inode->ei_update_lock);
490 ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
492 mutex_unlock(&inode->ei_update_lock);
497 truncate_setsize(&inode->v, iattr->ia_size);
499 ret = bch2_fpunch(c, inode_inum(inode),
500 round_up(iattr->ia_size, block_bytes(c)) >> 9,
501 U64_MAX, &i_sectors_delta);
502 bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
504 bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
505 !bch2_journal_error(&c->journal), c,
506 "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
507 inode->v.i_ino, (u64) inode->v.i_blocks,
508 inode->ei_inode.bi_sectors);
512 mutex_lock(&inode->ei_update_lock);
513 ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
514 mutex_unlock(&inode->ei_update_lock);
516 ret = bch2_setattr_nonsize(idmap, inode, iattr);
518 bch2_pagecache_block_put(inode);
519 return bch2_err_class(ret);
524 static int inode_update_times_fn(struct bch_inode_info *inode,
525 struct bch_inode_unpacked *bi, void *p)
527 struct bch_fs *c = inode->v.i_sb->s_fs_info;
529 bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
533 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
535 struct bch_fs *c = inode->v.i_sb->s_fs_info;
536 u64 end = offset + len;
537 u64 block_start = round_up(offset, block_bytes(c));
538 u64 block_end = round_down(end, block_bytes(c));
539 bool truncated_last_page;
542 ret = bch2_truncate_folios(inode, offset, end);
543 if (unlikely(ret < 0))
546 truncated_last_page = ret;
548 truncate_pagecache_range(&inode->v, offset, end - 1);
550 if (block_start < block_end) {
551 s64 i_sectors_delta = 0;
553 ret = bch2_fpunch(c, inode_inum(inode),
554 block_start >> 9, block_end >> 9,
556 bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
559 mutex_lock(&inode->ei_update_lock);
560 if (end >= inode->v.i_size && !truncated_last_page) {
561 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
562 ATTR_MTIME|ATTR_CTIME);
564 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
565 ATTR_MTIME|ATTR_CTIME);
567 mutex_unlock(&inode->ei_update_lock);
572 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
573 loff_t offset, loff_t len,
576 struct bch_fs *c = inode->v.i_sb->s_fs_info;
577 struct address_space *mapping = inode->v.i_mapping;
578 struct bkey_buf copy;
579 struct btree_trans trans;
580 struct btree_iter src, dst, del;
581 loff_t shift, new_size;
585 if ((offset | len) & (block_bytes(c) - 1))
589 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
592 if (offset >= inode->v.i_size)
598 if (offset + len >= inode->v.i_size)
601 src_start = offset + len;
605 new_size = inode->v.i_size + shift;
607 ret = bch2_write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
612 i_size_write(&inode->v, new_size);
613 mutex_lock(&inode->ei_update_lock);
614 ret = bch2_write_inode_size(c, inode, new_size,
615 ATTR_MTIME|ATTR_CTIME);
616 mutex_unlock(&inode->ei_update_lock);
618 s64 i_sectors_delta = 0;
620 ret = bch2_fpunch(c, inode_inum(inode),
621 offset >> 9, (offset + len) >> 9,
623 bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
629 bch2_bkey_buf_init(©);
630 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
631 bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
632 POS(inode->v.i_ino, src_start >> 9),
634 bch2_trans_copy_iter(&dst, &src);
635 bch2_trans_copy_iter(&del, &src);
638 bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
639 struct disk_reservation disk_res =
640 bch2_disk_reservation_init(c, 0);
641 struct bkey_i delete;
643 struct bpos next_pos;
644 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
645 struct bpos atomic_end;
646 unsigned trigger_flags = 0;
649 bch2_trans_begin(&trans);
651 ret = bch2_subvolume_get_snapshot(&trans,
652 inode->ei_subvol, &snapshot);
656 bch2_btree_iter_set_snapshot(&src, snapshot);
657 bch2_btree_iter_set_snapshot(&dst, snapshot);
658 bch2_btree_iter_set_snapshot(&del, snapshot);
660 bch2_trans_begin(&trans);
663 ? bch2_btree_iter_peek_prev(&src)
664 : bch2_btree_iter_peek_upto(&src, POS(inode->v.i_ino, U64_MAX));
665 if ((ret = bkey_err(k)))
668 if (!k.k || k.k->p.inode != inode->v.i_ino)
672 bkey_le(k.k->p, POS(inode->v.i_ino, offset >> 9)))
675 bch2_bkey_buf_reassemble(©, c, k);
678 bkey_lt(bkey_start_pos(k.k), move_pos))
679 bch2_cut_front(move_pos, copy.k);
681 copy.k->k.p.offset += shift >> 9;
682 bch2_btree_iter_set_pos(&dst, bkey_start_pos(©.k->k));
684 ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
688 if (!bkey_eq(atomic_end, copy.k->k.p)) {
690 move_pos = atomic_end;
691 move_pos.offset -= shift >> 9;
694 bch2_cut_back(atomic_end, copy.k);
698 bkey_init(&delete.k);
699 delete.k.p = copy.k->k.p;
700 delete.k.size = copy.k->k.size;
701 delete.k.p.offset -= shift >> 9;
702 bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
704 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
706 if (copy.k->k.size != k.k->size) {
707 /* We might end up splitting compressed extents: */
709 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
711 ret = bch2_disk_reservation_get(c, &disk_res,
712 copy.k->k.size, nr_ptrs,
713 BCH_DISK_RESERVATION_NOFAIL);
717 ret = bch2_btree_iter_traverse(&del) ?:
718 bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
719 bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
720 bch2_trans_commit(&trans, &disk_res, NULL,
721 BTREE_INSERT_NOFAIL);
722 bch2_disk_reservation_put(c, &disk_res);
725 bch2_btree_iter_set_pos(&src, next_pos);
727 bch2_trans_iter_exit(&trans, &del);
728 bch2_trans_iter_exit(&trans, &dst);
729 bch2_trans_iter_exit(&trans, &src);
730 bch2_trans_exit(&trans);
731 bch2_bkey_buf_exit(©, c);
736 mutex_lock(&inode->ei_update_lock);
738 i_size_write(&inode->v, new_size);
739 ret = bch2_write_inode_size(c, inode, new_size,
740 ATTR_MTIME|ATTR_CTIME);
742 /* We need an inode update to update bi_journal_seq for fsync: */
743 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
744 ATTR_MTIME|ATTR_CTIME);
746 mutex_unlock(&inode->ei_update_lock);
750 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
751 u64 start_sector, u64 end_sector)
753 struct bch_fs *c = inode->v.i_sb->s_fs_info;
754 struct btree_trans trans;
755 struct btree_iter iter;
756 struct bpos end_pos = POS(inode->v.i_ino, end_sector);
757 struct bch_io_opts opts;
760 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
761 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
763 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
764 POS(inode->v.i_ino, start_sector),
765 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
767 while (!ret && bkey_lt(iter.pos, end_pos)) {
768 s64 i_sectors_delta = 0;
769 struct quota_res quota_res = { 0 };
773 u64 hole_start, hole_end;
776 bch2_trans_begin(&trans);
778 ret = bch2_subvolume_get_snapshot(&trans,
779 inode->ei_subvol, &snapshot);
783 bch2_btree_iter_set_snapshot(&iter, snapshot);
785 k = bch2_btree_iter_peek_slot(&iter);
786 if ((ret = bkey_err(k)))
789 hole_start = iter.pos.offset;
790 hole_end = bpos_min(k.k->p, end_pos).offset;
791 is_allocation = bkey_extent_is_allocation(k.k);
793 /* already reserved */
794 if (bkey_extent_is_reservation(k) &&
795 bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
796 bch2_btree_iter_advance(&iter);
800 if (bkey_extent_is_data(k.k) &&
801 !(mode & FALLOC_FL_ZERO_RANGE)) {
802 bch2_btree_iter_advance(&iter);
806 if (!(mode & FALLOC_FL_ZERO_RANGE)) {
808 * Lock ordering - can't be holding btree locks while
809 * blocking on a folio lock:
811 if (bch2_clamp_data_hole(&inode->v,
814 opts.data_replicas, true))
815 ret = drop_locks_do(&trans,
816 (bch2_clamp_data_hole(&inode->v,
819 opts.data_replicas, false), 0));
820 bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start));
825 if (hole_start == hole_end)
829 sectors = hole_end - hole_start;
831 if (!is_allocation) {
832 ret = bch2_quota_reservation_add(c, inode,
833 "a_res, sectors, true);
838 ret = bch2_extent_fallocate(&trans, inode_inum(inode), &iter,
839 sectors, opts, &i_sectors_delta,
840 writepoint_hashed((unsigned long) current));
844 bch2_i_sectors_acct(c, inode, "a_res, i_sectors_delta);
846 drop_locks_do(&trans,
847 (bch2_mark_pagecache_reserved(inode, hole_start, iter.pos.offset), 0));
849 bch2_quota_reservation_put(c, inode, "a_res);
850 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
854 if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
855 struct quota_res quota_res = { 0 };
856 s64 i_sectors_delta = 0;
858 bch2_fpunch_at(&trans, &iter, inode_inum(inode),
859 end_sector, &i_sectors_delta);
860 bch2_i_sectors_acct(c, inode, "a_res, i_sectors_delta);
861 bch2_quota_reservation_put(c, inode, "a_res);
864 bch2_trans_iter_exit(&trans, &iter);
865 bch2_trans_exit(&trans);
869 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
870 loff_t offset, loff_t len)
872 struct bch_fs *c = inode->v.i_sb->s_fs_info;
873 u64 end = offset + len;
874 u64 block_start = round_down(offset, block_bytes(c));
875 u64 block_end = round_up(end, block_bytes(c));
876 bool truncated_last_page = false;
879 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
880 ret = inode_newsize_ok(&inode->v, end);
885 if (mode & FALLOC_FL_ZERO_RANGE) {
886 ret = bch2_truncate_folios(inode, offset, end);
887 if (unlikely(ret < 0))
890 truncated_last_page = ret;
892 truncate_pagecache_range(&inode->v, offset, end - 1);
894 block_start = round_up(offset, block_bytes(c));
895 block_end = round_down(end, block_bytes(c));
898 ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
901 * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
902 * so that the VFS cache i_size is consistent with the btree i_size:
905 !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
908 if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
909 end = inode->v.i_size;
911 if (end >= inode->v.i_size &&
912 (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
913 !(mode & FALLOC_FL_KEEP_SIZE))) {
914 spin_lock(&inode->v.i_lock);
915 i_size_write(&inode->v, end);
916 spin_unlock(&inode->v.i_lock);
918 mutex_lock(&inode->ei_update_lock);
919 ret2 = bch2_write_inode_size(c, inode, end, 0);
920 mutex_unlock(&inode->ei_update_lock);
926 long bch2_fallocate_dispatch(struct file *file, int mode,
927 loff_t offset, loff_t len)
929 struct bch_inode_info *inode = file_bch_inode(file);
930 struct bch_fs *c = inode->v.i_sb->s_fs_info;
933 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fallocate))
936 inode_lock(&inode->v);
937 inode_dio_wait(&inode->v);
938 bch2_pagecache_block_get(inode);
940 ret = file_modified(file);
944 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
945 ret = bchfs_fallocate(inode, mode, offset, len);
946 else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
947 ret = bchfs_fpunch(inode, offset, len);
948 else if (mode == FALLOC_FL_INSERT_RANGE)
949 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
950 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
951 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
955 bch2_pagecache_block_put(inode);
956 inode_unlock(&inode->v);
957 bch2_write_ref_put(c, BCH_WRITE_REF_fallocate);
959 return bch2_err_class(ret);
963 * Take a quota reservation for unallocated blocks in a given file range
964 * Does not check pagecache
966 static int quota_reserve_range(struct bch_inode_info *inode,
967 struct quota_res *res,
970 struct bch_fs *c = inode->v.i_sb->s_fs_info;
971 struct btree_trans trans;
972 struct btree_iter iter;
975 u64 sectors = end - start;
979 bch2_trans_init(&trans, c, 0, 0);
981 bch2_trans_begin(&trans);
983 ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
987 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
988 SPOS(inode->v.i_ino, pos, snapshot), 0);
990 while (!(ret = btree_trans_too_many_iters(&trans)) &&
991 (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
992 !(ret = bkey_err(k))) {
993 if (bkey_extent_is_allocation(k.k)) {
994 u64 s = min(end, k.k->p.offset) -
995 max(start, bkey_start_offset(k.k));
999 bch2_btree_iter_advance(&iter);
1001 pos = iter.pos.offset;
1002 bch2_trans_iter_exit(&trans, &iter);
1004 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1007 bch2_trans_exit(&trans);
1012 return bch2_quota_reservation_add(c, inode, res, sectors, true);
1015 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
1016 struct file *file_dst, loff_t pos_dst,
1017 loff_t len, unsigned remap_flags)
1019 struct bch_inode_info *src = file_bch_inode(file_src);
1020 struct bch_inode_info *dst = file_bch_inode(file_dst);
1021 struct bch_fs *c = src->v.i_sb->s_fs_info;
1022 struct quota_res quota_res = { 0 };
1023 s64 i_sectors_delta = 0;
1027 if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
1030 if (remap_flags & REMAP_FILE_DEDUP)
1033 if ((pos_src & (block_bytes(c) - 1)) ||
1034 (pos_dst & (block_bytes(c) - 1)))
1038 abs(pos_src - pos_dst) < len)
1041 bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
1043 inode_dio_wait(&src->v);
1044 inode_dio_wait(&dst->v);
1046 ret = generic_remap_file_range_prep(file_src, pos_src,
1049 if (ret < 0 || len == 0)
1052 aligned_len = round_up((u64) len, block_bytes(c));
1054 ret = bch2_write_invalidate_inode_pages_range(dst->v.i_mapping,
1055 pos_dst, pos_dst + len - 1);
1059 ret = quota_reserve_range(dst, "a_res, pos_dst >> 9,
1060 (pos_dst + aligned_len) >> 9);
1064 file_update_time(file_dst);
1066 bch2_mark_pagecache_unallocated(src, pos_src >> 9,
1067 (pos_src + aligned_len) >> 9);
1069 ret = bch2_remap_range(c,
1070 inode_inum(dst), pos_dst >> 9,
1071 inode_inum(src), pos_src >> 9,
1073 pos_dst + len, &i_sectors_delta);
1078 * due to alignment, we might have remapped slightly more than requsted
1080 ret = min((u64) ret << 9, (u64) len);
1082 bch2_i_sectors_acct(c, dst, "a_res, i_sectors_delta);
1084 spin_lock(&dst->v.i_lock);
1085 if (pos_dst + ret > dst->v.i_size)
1086 i_size_write(&dst->v, pos_dst + ret);
1087 spin_unlock(&dst->v.i_lock);
1089 if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
1090 IS_SYNC(file_inode(file_dst)))
1091 ret = bch2_flush_inode(c, dst);
1093 bch2_quota_reservation_put(c, dst, "a_res);
1094 bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
1096 return bch2_err_class(ret);
1101 static loff_t bch2_seek_data(struct file *file, u64 offset)
1103 struct bch_inode_info *inode = file_bch_inode(file);
1104 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1105 struct btree_trans trans;
1106 struct btree_iter iter;
1108 subvol_inum inum = inode_inum(inode);
1109 u64 isize, next_data = MAX_LFS_FILESIZE;
1113 isize = i_size_read(&inode->v);
1114 if (offset >= isize)
1117 bch2_trans_init(&trans, c, 0, 0);
1119 bch2_trans_begin(&trans);
1121 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
1125 for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents,
1126 SPOS(inode->v.i_ino, offset >> 9, snapshot),
1127 POS(inode->v.i_ino, U64_MAX),
1129 if (bkey_extent_is_data(k.k)) {
1130 next_data = max(offset, bkey_start_offset(k.k) << 9);
1132 } else if (k.k->p.offset >> 9 > isize)
1135 bch2_trans_iter_exit(&trans, &iter);
1137 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1140 bch2_trans_exit(&trans);
1144 if (next_data > offset)
1145 next_data = bch2_seek_pagecache_data(&inode->v,
1146 offset, next_data, 0, false);
1148 if (next_data >= isize)
1151 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
1154 static loff_t bch2_seek_hole(struct file *file, u64 offset)
1156 struct bch_inode_info *inode = file_bch_inode(file);
1157 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1158 struct btree_trans trans;
1159 struct btree_iter iter;
1161 subvol_inum inum = inode_inum(inode);
1162 u64 isize, next_hole = MAX_LFS_FILESIZE;
1166 isize = i_size_read(&inode->v);
1167 if (offset >= isize)
1170 bch2_trans_init(&trans, c, 0, 0);
1172 bch2_trans_begin(&trans);
1174 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
1178 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
1179 SPOS(inode->v.i_ino, offset >> 9, snapshot),
1180 BTREE_ITER_SLOTS, k, ret) {
1181 if (k.k->p.inode != inode->v.i_ino) {
1182 next_hole = bch2_seek_pagecache_hole(&inode->v,
1183 offset, MAX_LFS_FILESIZE, 0, false);
1185 } else if (!bkey_extent_is_data(k.k)) {
1186 next_hole = bch2_seek_pagecache_hole(&inode->v,
1187 max(offset, bkey_start_offset(k.k) << 9),
1188 k.k->p.offset << 9, 0, false);
1190 if (next_hole < k.k->p.offset << 9)
1193 offset = max(offset, bkey_start_offset(k.k) << 9);
1196 bch2_trans_iter_exit(&trans, &iter);
1198 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1201 bch2_trans_exit(&trans);
1205 if (next_hole > isize)
1208 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
1211 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
1219 ret = generic_file_llseek(file, offset, whence);
1222 ret = bch2_seek_data(file, offset);
1225 ret = bch2_seek_hole(file, offset);
1232 return bch2_err_class(ret);
1235 void bch2_fs_fsio_exit(struct bch_fs *c)
1237 bioset_exit(&c->nocow_flush_bioset);
1240 int bch2_fs_fsio_init(struct bch_fs *c)
1242 if (bioset_init(&c->nocow_flush_bioset,
1243 1, offsetof(struct nocow_flush, bio), 0))
1244 return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
1249 #endif /* NO_BCACHEFS_FS */