]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/fs-io.c
Update bcachefs sources to 717b356d1d bcachefs: Convert journal validation to bkey_in...
[bcachefs-tools-debian] / libbcachefs / fs-io.c
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3
4 #include "bcachefs.h"
5 #include "alloc_foreground.h"
6 //#include "bkey_buf.h"
7 #include "btree_update.h"
8 #include "buckets.h"
9 #include "clock.h"
10 #include "error.h"
11 #include "extents.h"
12 #include "extent_update.h"
13 #include "fs.h"
14 #include "fs-io.h"
15 #include "fs-io-buffered.h"
16 //#include "fs-io-direct.h"
17 #include "fs-io-pagecache.h"
18 #include "fsck.h"
19 #include "inode.h"
20 #include "journal.h"
21 #include "io.h"
22 #include "keylist.h"
23 #include "quota.h"
24 #include "reflink.h"
25 #include "trace.h"
26
27 #include <linux/aio.h>
28 #include <linux/backing-dev.h>
29 #include <linux/falloc.h>
30 #include <linux/migrate.h>
31 #include <linux/mmu_context.h>
32 #include <linux/pagevec.h>
33 #include <linux/rmap.h>
34 #include <linux/sched/signal.h>
35 #include <linux/task_io_accounting_ops.h>
36 #include <linux/uio.h>
37
38 #include <trace/events/writeback.h>
39
40 struct nocow_flush {
41         struct closure  *cl;
42         struct bch_dev  *ca;
43         struct bio      bio;
44 };
45
46 static void nocow_flush_endio(struct bio *_bio)
47 {
48
49         struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
50
51         closure_put(bio->cl);
52         percpu_ref_put(&bio->ca->io_ref);
53         bio_put(&bio->bio);
54 }
55
56 void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
57                                          struct bch_inode_info *inode,
58                                          struct closure *cl)
59 {
60         struct nocow_flush *bio;
61         struct bch_dev *ca;
62         struct bch_devs_mask devs;
63         unsigned dev;
64
65         dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
66         if (dev == BCH_SB_MEMBERS_MAX)
67                 return;
68
69         devs = inode->ei_devs_need_flush;
70         memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
71
72         for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
73                 rcu_read_lock();
74                 ca = rcu_dereference(c->devs[dev]);
75                 if (ca && !percpu_ref_tryget(&ca->io_ref))
76                         ca = NULL;
77                 rcu_read_unlock();
78
79                 if (!ca)
80                         continue;
81
82                 bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
83                                                     REQ_OP_FLUSH,
84                                                     GFP_KERNEL,
85                                                     &c->nocow_flush_bioset),
86                                    struct nocow_flush, bio);
87                 bio->cl                 = cl;
88                 bio->ca                 = ca;
89                 bio->bio.bi_end_io      = nocow_flush_endio;
90                 closure_bio_submit(&bio->bio, cl);
91         }
92 }
93
94 static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
95                                          struct bch_inode_info *inode)
96 {
97         struct closure cl;
98
99         closure_init_stack(&cl);
100         bch2_inode_flush_nocow_writes_async(c, inode, &cl);
101         closure_sync(&cl);
102
103         return 0;
104 }
105
106 /* i_size updates: */
107
108 struct inode_new_size {
109         loff_t          new_size;
110         u64             now;
111         unsigned        fields;
112 };
113
114 static int inode_set_size(struct bch_inode_info *inode,
115                           struct bch_inode_unpacked *bi,
116                           void *p)
117 {
118         struct inode_new_size *s = p;
119
120         bi->bi_size = s->new_size;
121         if (s->fields & ATTR_ATIME)
122                 bi->bi_atime = s->now;
123         if (s->fields & ATTR_MTIME)
124                 bi->bi_mtime = s->now;
125         if (s->fields & ATTR_CTIME)
126                 bi->bi_ctime = s->now;
127
128         return 0;
129 }
130
131 int __must_check bch2_write_inode_size(struct bch_fs *c,
132                                        struct bch_inode_info *inode,
133                                        loff_t new_size, unsigned fields)
134 {
135         struct inode_new_size s = {
136                 .new_size       = new_size,
137                 .now            = bch2_current_time(c),
138                 .fields         = fields,
139         };
140
141         return bch2_write_inode(c, inode, inode_set_size, &s, fields);
142 }
143
144 void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
145                            struct quota_res *quota_res, s64 sectors)
146 {
147         bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
148                                 "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
149                                 inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
150                                 inode->ei_inode.bi_sectors);
151         inode->v.i_blocks += sectors;
152
153 #ifdef CONFIG_BCACHEFS_QUOTA
154         if (quota_res &&
155             !test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags) &&
156             sectors > 0) {
157                 BUG_ON(sectors > quota_res->sectors);
158                 BUG_ON(sectors > inode->ei_quota_reserved);
159
160                 quota_res->sectors -= sectors;
161                 inode->ei_quota_reserved -= sectors;
162         } else {
163                 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
164         }
165 #endif
166 }
167
168
169 /* fsync: */
170
171 /*
172  * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
173  * insert trigger: look up the btree inode instead
174  */
175 static int bch2_flush_inode(struct bch_fs *c,
176                             struct bch_inode_info *inode)
177 {
178         struct bch_inode_unpacked u;
179         int ret;
180
181         if (c->opts.journal_flush_disabled)
182                 return 0;
183
184         ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
185         if (ret)
186                 return ret;
187
188         return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
189                 bch2_inode_flush_nocow_writes(c, inode);
190 }
191
192 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
193 {
194         struct bch_inode_info *inode = file_bch_inode(file);
195         struct bch_fs *c = inode->v.i_sb->s_fs_info;
196         int ret, ret2, ret3;
197
198         ret = file_write_and_wait_range(file, start, end);
199         ret2 = sync_inode_metadata(&inode->v, 1);
200         ret3 = bch2_flush_inode(c, inode);
201
202         return bch2_err_class(ret ?: ret2 ?: ret3);
203 }
204
205 /* truncate: */
206
207 static inline int range_has_data(struct bch_fs *c, u32 subvol,
208                                  struct bpos start,
209                                  struct bpos end)
210 {
211         struct btree_trans trans;
212         struct btree_iter iter;
213         struct bkey_s_c k;
214         int ret = 0;
215
216         bch2_trans_init(&trans, c, 0, 0);
217 retry:
218         bch2_trans_begin(&trans);
219
220         ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
221         if (ret)
222                 goto err;
223
224         for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
225                 if (bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k)) {
226                         ret = 1;
227                         break;
228                 }
229         start = iter.pos;
230         bch2_trans_iter_exit(&trans, &iter);
231 err:
232         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
233                 goto retry;
234
235         bch2_trans_exit(&trans);
236         return ret;
237 }
238
239 static int __bch2_truncate_folio(struct bch_inode_info *inode,
240                                  pgoff_t index, loff_t start, loff_t end)
241 {
242         struct bch_fs *c = inode->v.i_sb->s_fs_info;
243         struct address_space *mapping = inode->v.i_mapping;
244         struct bch_folio *s;
245         unsigned start_offset = start & (PAGE_SIZE - 1);
246         unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
247         unsigned i;
248         struct folio *folio;
249         s64 i_sectors_delta = 0;
250         int ret = 0;
251         u64 end_pos;
252
253         folio = filemap_lock_folio(mapping, index);
254         if (IS_ERR_OR_NULL(folio)) {
255                 /*
256                  * XXX: we're doing two index lookups when we end up reading the
257                  * folio
258                  */
259                 ret = range_has_data(c, inode->ei_subvol,
260                                 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
261                                 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
262                 if (ret <= 0)
263                         return ret;
264
265                 folio = __filemap_get_folio(mapping, index,
266                                             FGP_LOCK|FGP_CREAT, GFP_KERNEL);
267                 if (unlikely(IS_ERR_OR_NULL(folio))) {
268                         ret = -ENOMEM;
269                         goto out;
270                 }
271         }
272
273         BUG_ON(start    >= folio_end_pos(folio));
274         BUG_ON(end      <= folio_pos(folio));
275
276         start_offset    = max(start, folio_pos(folio)) - folio_pos(folio);
277         end_offset      = min_t(u64, end, folio_end_pos(folio)) - folio_pos(folio);
278
279         /* Folio boundary? Nothing to do */
280         if (start_offset == 0 &&
281             end_offset == folio_size(folio)) {
282                 ret = 0;
283                 goto unlock;
284         }
285
286         s = bch2_folio_create(folio, 0);
287         if (!s) {
288                 ret = -ENOMEM;
289                 goto unlock;
290         }
291
292         if (!folio_test_uptodate(folio)) {
293                 ret = bch2_read_single_folio(folio, mapping);
294                 if (ret)
295                         goto unlock;
296         }
297
298         ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
299         if (ret)
300                 goto unlock;
301
302         for (i = round_up(start_offset, block_bytes(c)) >> 9;
303              i < round_down(end_offset, block_bytes(c)) >> 9;
304              i++) {
305                 s->s[i].nr_replicas     = 0;
306
307                 i_sectors_delta -= s->s[i].state == SECTOR_dirty;
308                 bch2_folio_sector_set(folio, s, i, SECTOR_unallocated);
309         }
310
311         bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
312
313         /*
314          * Caller needs to know whether this folio will be written out by
315          * writeback - doing an i_size update if necessary - or whether it will
316          * be responsible for the i_size update.
317          *
318          * Note that we shouldn't ever see a folio beyond EOF, but check and
319          * warn if so. This has been observed by failure to clean up folios
320          * after a short write and there's still a chance reclaim will fix
321          * things up.
322          */
323         WARN_ON_ONCE(folio_pos(folio) >= inode->v.i_size);
324         end_pos = folio_end_pos(folio);
325         if (inode->v.i_size > folio_pos(folio))
326                 end_pos = min_t(u64, inode->v.i_size, end_pos);
327         ret = s->s[folio_pos_to_s(folio, end_pos - 1)].state >= SECTOR_dirty;
328
329         folio_zero_segment(folio, start_offset, end_offset);
330
331         /*
332          * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
333          *
334          * XXX: because we aren't currently tracking whether the folio has actual
335          * data in it (vs. just 0s, or only partially written) this wrong. ick.
336          */
337         BUG_ON(bch2_get_folio_disk_reservation(c, inode, folio, false));
338
339         /*
340          * This removes any writeable userspace mappings; we need to force
341          * .page_mkwrite to be called again before any mmapped writes, to
342          * redirty the full page:
343          */
344         folio_mkclean(folio);
345         filemap_dirty_folio(mapping, folio);
346 unlock:
347         folio_unlock(folio);
348         folio_put(folio);
349 out:
350         return ret;
351 }
352
353 static int bch2_truncate_folio(struct bch_inode_info *inode, loff_t from)
354 {
355         return __bch2_truncate_folio(inode, from >> PAGE_SHIFT,
356                                      from, ANYSINT_MAX(loff_t));
357 }
358
359 static int bch2_truncate_folios(struct bch_inode_info *inode,
360                                 loff_t start, loff_t end)
361 {
362         int ret = __bch2_truncate_folio(inode, start >> PAGE_SHIFT,
363                                         start, end);
364
365         if (ret >= 0 &&
366             start >> PAGE_SHIFT != end >> PAGE_SHIFT)
367                 ret = __bch2_truncate_folio(inode,
368                                         (end - 1) >> PAGE_SHIFT,
369                                         start, end);
370         return ret;
371 }
372
373 static int bch2_extend(struct mnt_idmap *idmap,
374                        struct bch_inode_info *inode,
375                        struct bch_inode_unpacked *inode_u,
376                        struct iattr *iattr)
377 {
378         struct address_space *mapping = inode->v.i_mapping;
379         int ret;
380
381         /*
382          * sync appends:
383          *
384          * this has to be done _before_ extending i_size:
385          */
386         ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
387         if (ret)
388                 return ret;
389
390         truncate_setsize(&inode->v, iattr->ia_size);
391
392         return bch2_setattr_nonsize(idmap, inode, iattr);
393 }
394
395 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
396                                    struct bch_inode_unpacked *bi,
397                                    void *p)
398 {
399         bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
400         return 0;
401 }
402
403 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
404                                   struct bch_inode_unpacked *bi, void *p)
405 {
406         u64 *new_i_size = p;
407
408         bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
409         bi->bi_size = *new_i_size;
410         return 0;
411 }
412
413 int bch2_truncate(struct mnt_idmap *idmap,
414                   struct bch_inode_info *inode, struct iattr *iattr)
415 {
416         struct bch_fs *c = inode->v.i_sb->s_fs_info;
417         struct address_space *mapping = inode->v.i_mapping;
418         struct bch_inode_unpacked inode_u;
419         u64 new_i_size = iattr->ia_size;
420         s64 i_sectors_delta = 0;
421         int ret = 0;
422
423         /*
424          * If the truncate call with change the size of the file, the
425          * cmtimes should be updated. If the size will not change, we
426          * do not need to update the cmtimes.
427          */
428         if (iattr->ia_size != inode->v.i_size) {
429                 if (!(iattr->ia_valid & ATTR_MTIME))
430                         ktime_get_coarse_real_ts64(&iattr->ia_mtime);
431                 if (!(iattr->ia_valid & ATTR_CTIME))
432                         ktime_get_coarse_real_ts64(&iattr->ia_ctime);
433                 iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
434         }
435
436         inode_dio_wait(&inode->v);
437         bch2_pagecache_block_get(inode);
438
439         ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
440         if (ret)
441                 goto err;
442
443         /*
444          * check this before next assertion; on filesystem error our normal
445          * invariants are a bit broken (truncate has to truncate the page cache
446          * before the inode).
447          */
448         ret = bch2_journal_error(&c->journal);
449         if (ret)
450                 goto err;
451
452         WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
453                   inode->v.i_size < inode_u.bi_size,
454                   "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
455                   (u64) inode->v.i_size, inode_u.bi_size);
456
457         if (iattr->ia_size > inode->v.i_size) {
458                 ret = bch2_extend(idmap, inode, &inode_u, iattr);
459                 goto err;
460         }
461
462         iattr->ia_valid &= ~ATTR_SIZE;
463
464         ret = bch2_truncate_folio(inode, iattr->ia_size);
465         if (unlikely(ret < 0))
466                 goto err;
467
468         /*
469          * When extending, we're going to write the new i_size to disk
470          * immediately so we need to flush anything above the current on disk
471          * i_size first:
472          *
473          * Also, when extending we need to flush the page that i_size currently
474          * straddles - if it's mapped to userspace, we need to ensure that
475          * userspace has to redirty it and call .mkwrite -> set_page_dirty
476          * again to allocate the part of the page that was extended.
477          */
478         if (iattr->ia_size > inode_u.bi_size)
479                 ret = filemap_write_and_wait_range(mapping,
480                                 inode_u.bi_size,
481                                 iattr->ia_size - 1);
482         else if (iattr->ia_size & (PAGE_SIZE - 1))
483                 ret = filemap_write_and_wait_range(mapping,
484                                 round_down(iattr->ia_size, PAGE_SIZE),
485                                 iattr->ia_size - 1);
486         if (ret)
487                 goto err;
488
489         mutex_lock(&inode->ei_update_lock);
490         ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
491                                &new_i_size, 0);
492         mutex_unlock(&inode->ei_update_lock);
493
494         if (unlikely(ret))
495                 goto err;
496
497         truncate_setsize(&inode->v, iattr->ia_size);
498
499         ret = bch2_fpunch(c, inode_inum(inode),
500                         round_up(iattr->ia_size, block_bytes(c)) >> 9,
501                         U64_MAX, &i_sectors_delta);
502         bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
503
504         bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
505                                 !bch2_journal_error(&c->journal), c,
506                                 "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
507                                 inode->v.i_ino, (u64) inode->v.i_blocks,
508                                 inode->ei_inode.bi_sectors);
509         if (unlikely(ret))
510                 goto err;
511
512         mutex_lock(&inode->ei_update_lock);
513         ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
514         mutex_unlock(&inode->ei_update_lock);
515
516         ret = bch2_setattr_nonsize(idmap, inode, iattr);
517 err:
518         bch2_pagecache_block_put(inode);
519         return bch2_err_class(ret);
520 }
521
522 /* fallocate: */
523
524 static int inode_update_times_fn(struct bch_inode_info *inode,
525                                  struct bch_inode_unpacked *bi, void *p)
526 {
527         struct bch_fs *c = inode->v.i_sb->s_fs_info;
528
529         bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
530         return 0;
531 }
532
533 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
534 {
535         struct bch_fs *c = inode->v.i_sb->s_fs_info;
536         u64 end         = offset + len;
537         u64 block_start = round_up(offset, block_bytes(c));
538         u64 block_end   = round_down(end, block_bytes(c));
539         bool truncated_last_page;
540         int ret = 0;
541
542         ret = bch2_truncate_folios(inode, offset, end);
543         if (unlikely(ret < 0))
544                 goto err;
545
546         truncated_last_page = ret;
547
548         truncate_pagecache_range(&inode->v, offset, end - 1);
549
550         if (block_start < block_end) {
551                 s64 i_sectors_delta = 0;
552
553                 ret = bch2_fpunch(c, inode_inum(inode),
554                                   block_start >> 9, block_end >> 9,
555                                   &i_sectors_delta);
556                 bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
557         }
558
559         mutex_lock(&inode->ei_update_lock);
560         if (end >= inode->v.i_size && !truncated_last_page) {
561                 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
562                                             ATTR_MTIME|ATTR_CTIME);
563         } else {
564                 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
565                                        ATTR_MTIME|ATTR_CTIME);
566         }
567         mutex_unlock(&inode->ei_update_lock);
568 err:
569         return ret;
570 }
571
572 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
573                                    loff_t offset, loff_t len,
574                                    bool insert)
575 {
576         struct bch_fs *c = inode->v.i_sb->s_fs_info;
577         struct address_space *mapping = inode->v.i_mapping;
578         struct bkey_buf copy;
579         struct btree_trans trans;
580         struct btree_iter src, dst, del;
581         loff_t shift, new_size;
582         u64 src_start;
583         int ret = 0;
584
585         if ((offset | len) & (block_bytes(c) - 1))
586                 return -EINVAL;
587
588         if (insert) {
589                 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
590                         return -EFBIG;
591
592                 if (offset >= inode->v.i_size)
593                         return -EINVAL;
594
595                 src_start       = U64_MAX;
596                 shift           = len;
597         } else {
598                 if (offset + len >= inode->v.i_size)
599                         return -EINVAL;
600
601                 src_start       = offset + len;
602                 shift           = -len;
603         }
604
605         new_size = inode->v.i_size + shift;
606
607         ret = bch2_write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
608         if (ret)
609                 return ret;
610
611         if (insert) {
612                 i_size_write(&inode->v, new_size);
613                 mutex_lock(&inode->ei_update_lock);
614                 ret = bch2_write_inode_size(c, inode, new_size,
615                                             ATTR_MTIME|ATTR_CTIME);
616                 mutex_unlock(&inode->ei_update_lock);
617         } else {
618                 s64 i_sectors_delta = 0;
619
620                 ret = bch2_fpunch(c, inode_inum(inode),
621                                   offset >> 9, (offset + len) >> 9,
622                                   &i_sectors_delta);
623                 bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
624
625                 if (ret)
626                         return ret;
627         }
628
629         bch2_bkey_buf_init(&copy);
630         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
631         bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
632                         POS(inode->v.i_ino, src_start >> 9),
633                         BTREE_ITER_INTENT);
634         bch2_trans_copy_iter(&dst, &src);
635         bch2_trans_copy_iter(&del, &src);
636
637         while (ret == 0 ||
638                bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
639                 struct disk_reservation disk_res =
640                         bch2_disk_reservation_init(c, 0);
641                 struct bkey_i delete;
642                 struct bkey_s_c k;
643                 struct bpos next_pos;
644                 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
645                 struct bpos atomic_end;
646                 unsigned trigger_flags = 0;
647                 u32 snapshot;
648
649                 bch2_trans_begin(&trans);
650
651                 ret = bch2_subvolume_get_snapshot(&trans,
652                                         inode->ei_subvol, &snapshot);
653                 if (ret)
654                         continue;
655
656                 bch2_btree_iter_set_snapshot(&src, snapshot);
657                 bch2_btree_iter_set_snapshot(&dst, snapshot);
658                 bch2_btree_iter_set_snapshot(&del, snapshot);
659
660                 bch2_trans_begin(&trans);
661
662                 k = insert
663                         ? bch2_btree_iter_peek_prev(&src)
664                         : bch2_btree_iter_peek_upto(&src, POS(inode->v.i_ino, U64_MAX));
665                 if ((ret = bkey_err(k)))
666                         continue;
667
668                 if (!k.k || k.k->p.inode != inode->v.i_ino)
669                         break;
670
671                 if (insert &&
672                     bkey_le(k.k->p, POS(inode->v.i_ino, offset >> 9)))
673                         break;
674 reassemble:
675                 bch2_bkey_buf_reassemble(&copy, c, k);
676
677                 if (insert &&
678                     bkey_lt(bkey_start_pos(k.k), move_pos))
679                         bch2_cut_front(move_pos, copy.k);
680
681                 copy.k->k.p.offset += shift >> 9;
682                 bch2_btree_iter_set_pos(&dst, bkey_start_pos(&copy.k->k));
683
684                 ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
685                 if (ret)
686                         continue;
687
688                 if (!bkey_eq(atomic_end, copy.k->k.p)) {
689                         if (insert) {
690                                 move_pos = atomic_end;
691                                 move_pos.offset -= shift >> 9;
692                                 goto reassemble;
693                         } else {
694                                 bch2_cut_back(atomic_end, copy.k);
695                         }
696                 }
697
698                 bkey_init(&delete.k);
699                 delete.k.p = copy.k->k.p;
700                 delete.k.size = copy.k->k.size;
701                 delete.k.p.offset -= shift >> 9;
702                 bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
703
704                 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
705
706                 if (copy.k->k.size != k.k->size) {
707                         /* We might end up splitting compressed extents: */
708                         unsigned nr_ptrs =
709                                 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
710
711                         ret = bch2_disk_reservation_get(c, &disk_res,
712                                         copy.k->k.size, nr_ptrs,
713                                         BCH_DISK_RESERVATION_NOFAIL);
714                         BUG_ON(ret);
715                 }
716
717                 ret =   bch2_btree_iter_traverse(&del) ?:
718                         bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
719                         bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
720                         bch2_trans_commit(&trans, &disk_res, NULL,
721                                           BTREE_INSERT_NOFAIL);
722                 bch2_disk_reservation_put(c, &disk_res);
723
724                 if (!ret)
725                         bch2_btree_iter_set_pos(&src, next_pos);
726         }
727         bch2_trans_iter_exit(&trans, &del);
728         bch2_trans_iter_exit(&trans, &dst);
729         bch2_trans_iter_exit(&trans, &src);
730         bch2_trans_exit(&trans);
731         bch2_bkey_buf_exit(&copy, c);
732
733         if (ret)
734                 return ret;
735
736         mutex_lock(&inode->ei_update_lock);
737         if (!insert) {
738                 i_size_write(&inode->v, new_size);
739                 ret = bch2_write_inode_size(c, inode, new_size,
740                                             ATTR_MTIME|ATTR_CTIME);
741         } else {
742                 /* We need an inode update to update bi_journal_seq for fsync: */
743                 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
744                                        ATTR_MTIME|ATTR_CTIME);
745         }
746         mutex_unlock(&inode->ei_update_lock);
747         return ret;
748 }
749
750 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
751                              u64 start_sector, u64 end_sector)
752 {
753         struct bch_fs *c = inode->v.i_sb->s_fs_info;
754         struct btree_trans trans;
755         struct btree_iter iter;
756         struct bpos end_pos = POS(inode->v.i_ino, end_sector);
757         struct bch_io_opts opts;
758         int ret = 0;
759
760         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
761         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
762
763         bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
764                         POS(inode->v.i_ino, start_sector),
765                         BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
766
767         while (!ret && bkey_lt(iter.pos, end_pos)) {
768                 s64 i_sectors_delta = 0;
769                 struct quota_res quota_res = { 0 };
770                 struct bkey_s_c k;
771                 unsigned sectors;
772                 bool is_allocation;
773                 u64 hole_start, hole_end;
774                 u32 snapshot;
775
776                 bch2_trans_begin(&trans);
777
778                 ret = bch2_subvolume_get_snapshot(&trans,
779                                         inode->ei_subvol, &snapshot);
780                 if (ret)
781                         goto bkey_err;
782
783                 bch2_btree_iter_set_snapshot(&iter, snapshot);
784
785                 k = bch2_btree_iter_peek_slot(&iter);
786                 if ((ret = bkey_err(k)))
787                         goto bkey_err;
788
789                 hole_start      = iter.pos.offset;
790                 hole_end        = bpos_min(k.k->p, end_pos).offset;
791                 is_allocation   = bkey_extent_is_allocation(k.k);
792
793                 /* already reserved */
794                 if (bkey_extent_is_reservation(k) &&
795                     bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
796                         bch2_btree_iter_advance(&iter);
797                         continue;
798                 }
799
800                 if (bkey_extent_is_data(k.k) &&
801                     !(mode & FALLOC_FL_ZERO_RANGE)) {
802                         bch2_btree_iter_advance(&iter);
803                         continue;
804                 }
805
806                 if (!(mode & FALLOC_FL_ZERO_RANGE)) {
807                         /*
808                          * Lock ordering - can't be holding btree locks while
809                          * blocking on a folio lock:
810                          */
811                         if (bch2_clamp_data_hole(&inode->v,
812                                                  &hole_start,
813                                                  &hole_end,
814                                                  opts.data_replicas, true))
815                                 ret = drop_locks_do(&trans,
816                                         (bch2_clamp_data_hole(&inode->v,
817                                                               &hole_start,
818                                                               &hole_end,
819                                                               opts.data_replicas, false), 0));
820                         bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start));
821
822                         if (ret)
823                                 goto bkey_err;
824
825                         if (hole_start == hole_end)
826                                 continue;
827                 }
828
829                 sectors = hole_end - hole_start;
830
831                 if (!is_allocation) {
832                         ret = bch2_quota_reservation_add(c, inode,
833                                         &quota_res, sectors, true);
834                         if (unlikely(ret))
835                                 goto bkey_err;
836                 }
837
838                 ret = bch2_extent_fallocate(&trans, inode_inum(inode), &iter,
839                                             sectors, opts, &i_sectors_delta,
840                                             writepoint_hashed((unsigned long) current));
841                 if (ret)
842                         goto bkey_err;
843
844                 bch2_i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
845
846                 drop_locks_do(&trans,
847                         (bch2_mark_pagecache_reserved(inode, hole_start, iter.pos.offset), 0));
848 bkey_err:
849                 bch2_quota_reservation_put(c, inode, &quota_res);
850                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
851                         ret = 0;
852         }
853
854         if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
855                 struct quota_res quota_res = { 0 };
856                 s64 i_sectors_delta = 0;
857
858                 bch2_fpunch_at(&trans, &iter, inode_inum(inode),
859                                end_sector, &i_sectors_delta);
860                 bch2_i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
861                 bch2_quota_reservation_put(c, inode, &quota_res);
862         }
863
864         bch2_trans_iter_exit(&trans, &iter);
865         bch2_trans_exit(&trans);
866         return ret;
867 }
868
869 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
870                             loff_t offset, loff_t len)
871 {
872         struct bch_fs *c = inode->v.i_sb->s_fs_info;
873         u64 end         = offset + len;
874         u64 block_start = round_down(offset,    block_bytes(c));
875         u64 block_end   = round_up(end,         block_bytes(c));
876         bool truncated_last_page = false;
877         int ret, ret2 = 0;
878
879         if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
880                 ret = inode_newsize_ok(&inode->v, end);
881                 if (ret)
882                         return ret;
883         }
884
885         if (mode & FALLOC_FL_ZERO_RANGE) {
886                 ret = bch2_truncate_folios(inode, offset, end);
887                 if (unlikely(ret < 0))
888                         return ret;
889
890                 truncated_last_page = ret;
891
892                 truncate_pagecache_range(&inode->v, offset, end - 1);
893
894                 block_start     = round_up(offset,      block_bytes(c));
895                 block_end       = round_down(end,       block_bytes(c));
896         }
897
898         ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
899
900         /*
901          * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
902          * so that the VFS cache i_size is consistent with the btree i_size:
903          */
904         if (ret &&
905             !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
906                 return ret;
907
908         if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
909                 end = inode->v.i_size;
910
911         if (end >= inode->v.i_size &&
912             (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
913              !(mode & FALLOC_FL_KEEP_SIZE))) {
914                 spin_lock(&inode->v.i_lock);
915                 i_size_write(&inode->v, end);
916                 spin_unlock(&inode->v.i_lock);
917
918                 mutex_lock(&inode->ei_update_lock);
919                 ret2 = bch2_write_inode_size(c, inode, end, 0);
920                 mutex_unlock(&inode->ei_update_lock);
921         }
922
923         return ret ?: ret2;
924 }
925
926 long bch2_fallocate_dispatch(struct file *file, int mode,
927                              loff_t offset, loff_t len)
928 {
929         struct bch_inode_info *inode = file_bch_inode(file);
930         struct bch_fs *c = inode->v.i_sb->s_fs_info;
931         long ret;
932
933         if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fallocate))
934                 return -EROFS;
935
936         inode_lock(&inode->v);
937         inode_dio_wait(&inode->v);
938         bch2_pagecache_block_get(inode);
939
940         ret = file_modified(file);
941         if (ret)
942                 goto err;
943
944         if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
945                 ret = bchfs_fallocate(inode, mode, offset, len);
946         else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
947                 ret = bchfs_fpunch(inode, offset, len);
948         else if (mode == FALLOC_FL_INSERT_RANGE)
949                 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
950         else if (mode == FALLOC_FL_COLLAPSE_RANGE)
951                 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
952         else
953                 ret = -EOPNOTSUPP;
954 err:
955         bch2_pagecache_block_put(inode);
956         inode_unlock(&inode->v);
957         bch2_write_ref_put(c, BCH_WRITE_REF_fallocate);
958
959         return bch2_err_class(ret);
960 }
961
962 /*
963  * Take a quota reservation for unallocated blocks in a given file range
964  * Does not check pagecache
965  */
966 static int quota_reserve_range(struct bch_inode_info *inode,
967                                struct quota_res *res,
968                                u64 start, u64 end)
969 {
970         struct bch_fs *c = inode->v.i_sb->s_fs_info;
971         struct btree_trans trans;
972         struct btree_iter iter;
973         struct bkey_s_c k;
974         u32 snapshot;
975         u64 sectors = end - start;
976         u64 pos = start;
977         int ret;
978
979         bch2_trans_init(&trans, c, 0, 0);
980 retry:
981         bch2_trans_begin(&trans);
982
983         ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
984         if (ret)
985                 goto err;
986
987         bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
988                              SPOS(inode->v.i_ino, pos, snapshot), 0);
989
990         while (!(ret = btree_trans_too_many_iters(&trans)) &&
991                (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
992                !(ret = bkey_err(k))) {
993                 if (bkey_extent_is_allocation(k.k)) {
994                         u64 s = min(end, k.k->p.offset) -
995                                 max(start, bkey_start_offset(k.k));
996                         BUG_ON(s > sectors);
997                         sectors -= s;
998                 }
999                 bch2_btree_iter_advance(&iter);
1000         }
1001         pos = iter.pos.offset;
1002         bch2_trans_iter_exit(&trans, &iter);
1003 err:
1004         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1005                 goto retry;
1006
1007         bch2_trans_exit(&trans);
1008
1009         if (ret)
1010                 return ret;
1011
1012         return bch2_quota_reservation_add(c, inode, res, sectors, true);
1013 }
1014
1015 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
1016                              struct file *file_dst, loff_t pos_dst,
1017                              loff_t len, unsigned remap_flags)
1018 {
1019         struct bch_inode_info *src = file_bch_inode(file_src);
1020         struct bch_inode_info *dst = file_bch_inode(file_dst);
1021         struct bch_fs *c = src->v.i_sb->s_fs_info;
1022         struct quota_res quota_res = { 0 };
1023         s64 i_sectors_delta = 0;
1024         u64 aligned_len;
1025         loff_t ret = 0;
1026
1027         if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
1028                 return -EINVAL;
1029
1030         if (remap_flags & REMAP_FILE_DEDUP)
1031                 return -EOPNOTSUPP;
1032
1033         if ((pos_src & (block_bytes(c) - 1)) ||
1034             (pos_dst & (block_bytes(c) - 1)))
1035                 return -EINVAL;
1036
1037         if (src == dst &&
1038             abs(pos_src - pos_dst) < len)
1039                 return -EINVAL;
1040
1041         bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
1042
1043         inode_dio_wait(&src->v);
1044         inode_dio_wait(&dst->v);
1045
1046         ret = generic_remap_file_range_prep(file_src, pos_src,
1047                                             file_dst, pos_dst,
1048                                             &len, remap_flags);
1049         if (ret < 0 || len == 0)
1050                 goto err;
1051
1052         aligned_len = round_up((u64) len, block_bytes(c));
1053
1054         ret = bch2_write_invalidate_inode_pages_range(dst->v.i_mapping,
1055                                 pos_dst, pos_dst + len - 1);
1056         if (ret)
1057                 goto err;
1058
1059         ret = quota_reserve_range(dst, &quota_res, pos_dst >> 9,
1060                                   (pos_dst + aligned_len) >> 9);
1061         if (ret)
1062                 goto err;
1063
1064         file_update_time(file_dst);
1065
1066         bch2_mark_pagecache_unallocated(src, pos_src >> 9,
1067                                    (pos_src + aligned_len) >> 9);
1068
1069         ret = bch2_remap_range(c,
1070                                inode_inum(dst), pos_dst >> 9,
1071                                inode_inum(src), pos_src >> 9,
1072                                aligned_len >> 9,
1073                                pos_dst + len, &i_sectors_delta);
1074         if (ret < 0)
1075                 goto err;
1076
1077         /*
1078          * due to alignment, we might have remapped slightly more than requsted
1079          */
1080         ret = min((u64) ret << 9, (u64) len);
1081
1082         bch2_i_sectors_acct(c, dst, &quota_res, i_sectors_delta);
1083
1084         spin_lock(&dst->v.i_lock);
1085         if (pos_dst + ret > dst->v.i_size)
1086                 i_size_write(&dst->v, pos_dst + ret);
1087         spin_unlock(&dst->v.i_lock);
1088
1089         if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
1090             IS_SYNC(file_inode(file_dst)))
1091                 ret = bch2_flush_inode(c, dst);
1092 err:
1093         bch2_quota_reservation_put(c, dst, &quota_res);
1094         bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
1095
1096         return bch2_err_class(ret);
1097 }
1098
1099 /* fseek: */
1100
1101 static loff_t bch2_seek_data(struct file *file, u64 offset)
1102 {
1103         struct bch_inode_info *inode = file_bch_inode(file);
1104         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1105         struct btree_trans trans;
1106         struct btree_iter iter;
1107         struct bkey_s_c k;
1108         subvol_inum inum = inode_inum(inode);
1109         u64 isize, next_data = MAX_LFS_FILESIZE;
1110         u32 snapshot;
1111         int ret;
1112
1113         isize = i_size_read(&inode->v);
1114         if (offset >= isize)
1115                 return -ENXIO;
1116
1117         bch2_trans_init(&trans, c, 0, 0);
1118 retry:
1119         bch2_trans_begin(&trans);
1120
1121         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
1122         if (ret)
1123                 goto err;
1124
1125         for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents,
1126                            SPOS(inode->v.i_ino, offset >> 9, snapshot),
1127                            POS(inode->v.i_ino, U64_MAX),
1128                            0, k, ret) {
1129                 if (bkey_extent_is_data(k.k)) {
1130                         next_data = max(offset, bkey_start_offset(k.k) << 9);
1131                         break;
1132                 } else if (k.k->p.offset >> 9 > isize)
1133                         break;
1134         }
1135         bch2_trans_iter_exit(&trans, &iter);
1136 err:
1137         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1138                 goto retry;
1139
1140         bch2_trans_exit(&trans);
1141         if (ret)
1142                 return ret;
1143
1144         if (next_data > offset)
1145                 next_data = bch2_seek_pagecache_data(&inode->v,
1146                                         offset, next_data, 0, false);
1147
1148         if (next_data >= isize)
1149                 return -ENXIO;
1150
1151         return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
1152 }
1153
1154 static loff_t bch2_seek_hole(struct file *file, u64 offset)
1155 {
1156         struct bch_inode_info *inode = file_bch_inode(file);
1157         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1158         struct btree_trans trans;
1159         struct btree_iter iter;
1160         struct bkey_s_c k;
1161         subvol_inum inum = inode_inum(inode);
1162         u64 isize, next_hole = MAX_LFS_FILESIZE;
1163         u32 snapshot;
1164         int ret;
1165
1166         isize = i_size_read(&inode->v);
1167         if (offset >= isize)
1168                 return -ENXIO;
1169
1170         bch2_trans_init(&trans, c, 0, 0);
1171 retry:
1172         bch2_trans_begin(&trans);
1173
1174         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
1175         if (ret)
1176                 goto err;
1177
1178         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
1179                            SPOS(inode->v.i_ino, offset >> 9, snapshot),
1180                            BTREE_ITER_SLOTS, k, ret) {
1181                 if (k.k->p.inode != inode->v.i_ino) {
1182                         next_hole = bch2_seek_pagecache_hole(&inode->v,
1183                                         offset, MAX_LFS_FILESIZE, 0, false);
1184                         break;
1185                 } else if (!bkey_extent_is_data(k.k)) {
1186                         next_hole = bch2_seek_pagecache_hole(&inode->v,
1187                                         max(offset, bkey_start_offset(k.k) << 9),
1188                                         k.k->p.offset << 9, 0, false);
1189
1190                         if (next_hole < k.k->p.offset << 9)
1191                                 break;
1192                 } else {
1193                         offset = max(offset, bkey_start_offset(k.k) << 9);
1194                 }
1195         }
1196         bch2_trans_iter_exit(&trans, &iter);
1197 err:
1198         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1199                 goto retry;
1200
1201         bch2_trans_exit(&trans);
1202         if (ret)
1203                 return ret;
1204
1205         if (next_hole > isize)
1206                 next_hole = isize;
1207
1208         return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
1209 }
1210
1211 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
1212 {
1213         loff_t ret;
1214
1215         switch (whence) {
1216         case SEEK_SET:
1217         case SEEK_CUR:
1218         case SEEK_END:
1219                 ret = generic_file_llseek(file, offset, whence);
1220                 break;
1221         case SEEK_DATA:
1222                 ret = bch2_seek_data(file, offset);
1223                 break;
1224         case SEEK_HOLE:
1225                 ret = bch2_seek_hole(file, offset);
1226                 break;
1227         default:
1228                 ret = -EINVAL;
1229                 break;
1230         }
1231
1232         return bch2_err_class(ret);
1233 }
1234
1235 void bch2_fs_fsio_exit(struct bch_fs *c)
1236 {
1237         bioset_exit(&c->nocow_flush_bioset);
1238 }
1239
1240 int bch2_fs_fsio_init(struct bch_fs *c)
1241 {
1242         if (bioset_init(&c->nocow_flush_bioset,
1243                         1, offsetof(struct nocow_flush, bio), 0))
1244                 return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
1245
1246         return 0;
1247 }
1248
1249 #endif /* NO_BCACHEFS_FS */