]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/fs-io.c
db1385701f0827d86da1b30901b3b98944f55a83
[bcachefs-tools-debian] / libbcachefs / fs-io.c
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3
4 #include "bcachefs.h"
5 #include "alloc_foreground.h"
6 #include "bkey_buf.h"
7 #include "btree_update.h"
8 #include "buckets.h"
9 #include "clock.h"
10 #include "error.h"
11 #include "extents.h"
12 #include "extent_update.h"
13 #include "fs.h"
14 #include "fs-io.h"
15 #include "fsck.h"
16 #include "inode.h"
17 #include "journal.h"
18 #include "io.h"
19 #include "keylist.h"
20 #include "quota.h"
21 #include "reflink.h"
22
23 #include <linux/aio.h>
24 #include <linux/backing-dev.h>
25 #include <linux/falloc.h>
26 #include <linux/migrate.h>
27 #include <linux/mmu_context.h>
28 #include <linux/pagevec.h>
29 #include <linux/rmap.h>
30 #include <linux/sched/signal.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/uio.h>
33 #include <linux/writeback.h>
34
35 #include <trace/events/bcachefs.h>
36 #include <trace/events/writeback.h>
37
38 static inline loff_t folio_end_pos(struct folio *folio)
39 {
40         return folio_pos(folio) + folio_size(folio);
41 }
42
43 static inline size_t folio_sectors(struct folio *folio)
44 {
45         return PAGE_SECTORS << folio_order(folio);
46 }
47
48 static inline loff_t folio_sector(struct folio *folio)
49 {
50         return folio_pos(folio) >> 9;
51 }
52
53 static inline loff_t folio_end_sector(struct folio *folio)
54 {
55         return folio_end_pos(folio) >> 9;
56 }
57
58 typedef DARRAY(struct folio *) folios;
59
60 static int filemap_get_contig_folios_d(struct address_space *mapping,
61                                        loff_t start, loff_t end,
62                                        int fgp_flags, gfp_t gfp,
63                                        folios *folios)
64 {
65         struct folio *f;
66         loff_t pos = start;
67         int ret = 0;
68
69         while (pos < end) {
70                 if ((u64) pos >= (u64) start + (1ULL << 20))
71                         fgp_flags &= ~FGP_CREAT;
72
73                 ret = darray_make_room_gfp(folios, 1, gfp & GFP_KERNEL);
74                 if (ret)
75                         break;
76
77                 f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
78                 if (!f)
79                         break;
80
81                 BUG_ON(folios->nr && folio_pos(f) != pos);
82
83                 pos = folio_end_pos(f);
84                 darray_push(folios, f);
85         }
86
87         if (!folios->nr && !ret && (fgp_flags & FGP_CREAT))
88                 ret = -ENOMEM;
89
90         return folios->nr ? 0 : ret;
91 }
92
93 struct nocow_flush {
94         struct closure  *cl;
95         struct bch_dev  *ca;
96         struct bio      bio;
97 };
98
99 static void nocow_flush_endio(struct bio *_bio)
100 {
101
102         struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
103
104         closure_put(bio->cl);
105         percpu_ref_put(&bio->ca->io_ref);
106         bio_put(&bio->bio);
107 }
108
109 static void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
110                                                 struct bch_inode_info *inode,
111                                                 struct closure *cl)
112 {
113         struct nocow_flush *bio;
114         struct bch_dev *ca;
115         struct bch_devs_mask devs;
116         unsigned dev;
117
118         dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
119         if (dev == BCH_SB_MEMBERS_MAX)
120                 return;
121
122         devs = inode->ei_devs_need_flush;
123         memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
124
125         for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
126                 rcu_read_lock();
127                 ca = rcu_dereference(c->devs[dev]);
128                 if (ca && !percpu_ref_tryget(&ca->io_ref))
129                         ca = NULL;
130                 rcu_read_unlock();
131
132                 if (!ca)
133                         continue;
134
135                 bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
136                                                     REQ_OP_FLUSH,
137                                                     GFP_KERNEL,
138                                                     &c->nocow_flush_bioset),
139                                    struct nocow_flush, bio);
140                 bio->cl                 = cl;
141                 bio->ca                 = ca;
142                 bio->bio.bi_end_io      = nocow_flush_endio;
143                 closure_bio_submit(&bio->bio, cl);
144         }
145 }
146
147 static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
148                                          struct bch_inode_info *inode)
149 {
150         struct closure cl;
151
152         closure_init_stack(&cl);
153         bch2_inode_flush_nocow_writes_async(c, inode, &cl);
154         closure_sync(&cl);
155
156         return 0;
157 }
158
159 static inline bool bio_full(struct bio *bio, unsigned len)
160 {
161         if (bio->bi_vcnt >= bio->bi_max_vecs)
162                 return true;
163         if (bio->bi_iter.bi_size > UINT_MAX - len)
164                 return true;
165         return false;
166 }
167
168 static inline struct address_space *faults_disabled_mapping(void)
169 {
170         return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
171 }
172
173 static inline void set_fdm_dropped_locks(void)
174 {
175         current->faults_disabled_mapping =
176                 (void *) (((unsigned long) current->faults_disabled_mapping)|1);
177 }
178
179 static inline bool fdm_dropped_locks(void)
180 {
181         return ((unsigned long) current->faults_disabled_mapping) & 1;
182 }
183
184 struct quota_res {
185         u64                             sectors;
186 };
187
188 struct bch_writepage_io {
189         struct bch_inode_info           *inode;
190
191         /* must be last: */
192         struct bch_write_op             op;
193 };
194
195 struct dio_write {
196         struct kiocb                    *req;
197         struct address_space            *mapping;
198         struct bch_inode_info           *inode;
199         struct mm_struct                *mm;
200         unsigned                        loop:1,
201                                         extending:1,
202                                         sync:1,
203                                         flush:1,
204                                         free_iov:1;
205         struct quota_res                quota_res;
206         u64                             written;
207
208         struct iov_iter                 iter;
209         struct iovec                    inline_vecs[2];
210
211         /* must be last: */
212         struct bch_write_op             op;
213 };
214
215 struct dio_read {
216         struct closure                  cl;
217         struct kiocb                    *req;
218         long                            ret;
219         bool                            should_dirty;
220         struct bch_read_bio             rbio;
221 };
222
223 /* pagecache_block must be held */
224 static noinline int write_invalidate_inode_pages_range(struct address_space *mapping,
225                                               loff_t start, loff_t end)
226 {
227         int ret;
228
229         /*
230          * XXX: the way this is currently implemented, we can spin if a process
231          * is continually redirtying a specific page
232          */
233         do {
234                 if (!mapping->nrpages)
235                         return 0;
236
237                 ret = filemap_write_and_wait_range(mapping, start, end);
238                 if (ret)
239                         break;
240
241                 if (!mapping->nrpages)
242                         return 0;
243
244                 ret = invalidate_inode_pages2_range(mapping,
245                                 start >> PAGE_SHIFT,
246                                 end >> PAGE_SHIFT);
247         } while (ret == -EBUSY);
248
249         return ret;
250 }
251
252 /* quotas */
253
254 #ifdef CONFIG_BCACHEFS_QUOTA
255
256 static void __bch2_quota_reservation_put(struct bch_fs *c,
257                                          struct bch_inode_info *inode,
258                                          struct quota_res *res)
259 {
260         BUG_ON(res->sectors > inode->ei_quota_reserved);
261
262         bch2_quota_acct(c, inode->ei_qid, Q_SPC,
263                         -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
264         inode->ei_quota_reserved -= res->sectors;
265         res->sectors = 0;
266 }
267
268 static void bch2_quota_reservation_put(struct bch_fs *c,
269                                        struct bch_inode_info *inode,
270                                        struct quota_res *res)
271 {
272         if (res->sectors) {
273                 mutex_lock(&inode->ei_quota_lock);
274                 __bch2_quota_reservation_put(c, inode, res);
275                 mutex_unlock(&inode->ei_quota_lock);
276         }
277 }
278
279 static int bch2_quota_reservation_add(struct bch_fs *c,
280                                       struct bch_inode_info *inode,
281                                       struct quota_res *res,
282                                       u64 sectors,
283                                       bool check_enospc)
284 {
285         int ret;
286
287         mutex_lock(&inode->ei_quota_lock);
288         ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
289                               check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
290         if (likely(!ret)) {
291                 inode->ei_quota_reserved += sectors;
292                 res->sectors += sectors;
293         }
294         mutex_unlock(&inode->ei_quota_lock);
295
296         return ret;
297 }
298
299 #else
300
301 static void __bch2_quota_reservation_put(struct bch_fs *c,
302                                          struct bch_inode_info *inode,
303                                          struct quota_res *res) {}
304
305 static void bch2_quota_reservation_put(struct bch_fs *c,
306                                        struct bch_inode_info *inode,
307                                        struct quota_res *res) {}
308
309 static int bch2_quota_reservation_add(struct bch_fs *c,
310                                       struct bch_inode_info *inode,
311                                       struct quota_res *res,
312                                       unsigned sectors,
313                                       bool check_enospc)
314 {
315         return 0;
316 }
317
318 #endif
319
320 /* i_size updates: */
321
322 struct inode_new_size {
323         loff_t          new_size;
324         u64             now;
325         unsigned        fields;
326 };
327
328 static int inode_set_size(struct bch_inode_info *inode,
329                           struct bch_inode_unpacked *bi,
330                           void *p)
331 {
332         struct inode_new_size *s = p;
333
334         bi->bi_size = s->new_size;
335         if (s->fields & ATTR_ATIME)
336                 bi->bi_atime = s->now;
337         if (s->fields & ATTR_MTIME)
338                 bi->bi_mtime = s->now;
339         if (s->fields & ATTR_CTIME)
340                 bi->bi_ctime = s->now;
341
342         return 0;
343 }
344
345 int __must_check bch2_write_inode_size(struct bch_fs *c,
346                                        struct bch_inode_info *inode,
347                                        loff_t new_size, unsigned fields)
348 {
349         struct inode_new_size s = {
350                 .new_size       = new_size,
351                 .now            = bch2_current_time(c),
352                 .fields         = fields,
353         };
354
355         return bch2_write_inode(c, inode, inode_set_size, &s, fields);
356 }
357
358 static void __i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
359                            struct quota_res *quota_res, s64 sectors)
360 {
361         bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
362                                 "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
363                                 inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
364                                 inode->ei_inode.bi_sectors);
365         inode->v.i_blocks += sectors;
366
367 #ifdef CONFIG_BCACHEFS_QUOTA
368         if (quota_res && sectors > 0) {
369                 BUG_ON(sectors > quota_res->sectors);
370                 BUG_ON(sectors > inode->ei_quota_reserved);
371
372                 quota_res->sectors -= sectors;
373                 inode->ei_quota_reserved -= sectors;
374         } else {
375                 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
376         }
377 #endif
378 }
379
380 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
381                            struct quota_res *quota_res, s64 sectors)
382 {
383         if (sectors) {
384                 mutex_lock(&inode->ei_quota_lock);
385                 __i_sectors_acct(c, inode, quota_res, sectors);
386                 mutex_unlock(&inode->ei_quota_lock);
387         }
388 }
389
390 /* page state: */
391
392 /* stored in page->private: */
393
394 #define BCH_FOLIO_SECTOR_STATE()        \
395         x(unallocated)                  \
396         x(reserved)                     \
397         x(dirty)                        \
398         x(dirty_reserved)               \
399         x(allocated)
400
401 enum bch_folio_sector_state {
402 #define x(n)    SECTOR_##n,
403         BCH_FOLIO_SECTOR_STATE()
404 #undef x
405 };
406
407 const char * const bch2_folio_sector_states[] = {
408 #define x(n)    #n,
409         BCH_FOLIO_SECTOR_STATE()
410 #undef x
411         NULL
412 };
413
414 static inline enum bch_folio_sector_state
415 folio_sector_dirty(enum bch_folio_sector_state state)
416 {
417         switch (state) {
418         case SECTOR_unallocated:
419                 return SECTOR_dirty;
420         case SECTOR_reserved:
421                 return SECTOR_dirty_reserved;
422         default:
423                 return state;
424         }
425 }
426
427 static inline enum bch_folio_sector_state
428 folio_sector_undirty(enum bch_folio_sector_state state)
429 {
430         switch (state) {
431         case SECTOR_dirty:
432                 return SECTOR_unallocated;
433         case SECTOR_dirty_reserved:
434                 return SECTOR_reserved;
435         default:
436                 return state;
437         }
438 }
439
440 static inline enum bch_folio_sector_state
441 folio_sector_reserve(enum bch_folio_sector_state state)
442 {
443         switch (state) {
444         case SECTOR_unallocated:
445                 return SECTOR_reserved;
446         case SECTOR_dirty:
447                 return SECTOR_dirty_reserved;
448         default:
449                 return state;
450         }
451 }
452
453 struct bch_folio_sector {
454         /* Uncompressed, fully allocated replicas (or on disk reservation): */
455         unsigned                nr_replicas:4;
456
457         /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
458         unsigned                replicas_reserved:4;
459
460         /* i_sectors: */
461         enum bch_folio_sector_state state:8;
462 };
463
464 struct bch_folio {
465         spinlock_t              lock;
466         atomic_t                write_count;
467         /*
468          * Is the sector state up to date with the btree?
469          * (Not the data itself)
470          */
471         bool                    uptodate;
472         struct bch_folio_sector s[];
473 };
474
475 static inline void folio_sector_set(struct folio *folio,
476                              struct bch_folio *s,
477                              unsigned i, unsigned n)
478 {
479         s->s[i].state = n;
480 }
481
482 static inline struct bch_folio *__bch2_folio(struct folio *folio)
483 {
484         return folio_has_private(folio)
485                 ? (struct bch_folio *) folio_get_private(folio)
486                 : NULL;
487 }
488
489 static inline struct bch_folio *bch2_folio(struct folio *folio)
490 {
491         EBUG_ON(!folio_test_locked(folio));
492
493         return __bch2_folio(folio);
494 }
495
496 /* for newly allocated folios: */
497 static void __bch2_folio_release(struct folio *folio)
498 {
499         kfree(folio_detach_private(folio));
500 }
501
502 static void bch2_folio_release(struct folio *folio)
503 {
504         EBUG_ON(!folio_test_locked(folio));
505         __bch2_folio_release(folio);
506 }
507
508 /* for newly allocated folios: */
509 static struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp)
510 {
511         struct bch_folio *s;
512
513         s = kzalloc(sizeof(*s) +
514                     sizeof(struct bch_folio_sector) *
515                     folio_sectors(folio), GFP_NOFS|gfp);
516         if (!s)
517                 return NULL;
518
519         spin_lock_init(&s->lock);
520         folio_attach_private(folio, s);
521         return s;
522 }
523
524 static struct bch_folio *bch2_folio_create(struct folio *folio, gfp_t gfp)
525 {
526         return bch2_folio(folio) ?: __bch2_folio_create(folio, gfp);
527 }
528
529 static unsigned bkey_to_sector_state(struct bkey_s_c k)
530 {
531         if (bkey_extent_is_reservation(k))
532                 return SECTOR_reserved;
533         if (bkey_extent_is_allocation(k.k))
534                 return SECTOR_allocated;
535         return SECTOR_unallocated;
536 }
537
538 static void __bch2_folio_set(struct folio *folio,
539                              unsigned pg_offset, unsigned pg_len,
540                              unsigned nr_ptrs, unsigned state)
541 {
542         struct bch_folio *s = bch2_folio_create(folio, __GFP_NOFAIL);
543         unsigned i, sectors = folio_sectors(folio);
544
545         BUG_ON(pg_offset >= sectors);
546         BUG_ON(pg_offset + pg_len > sectors);
547
548         spin_lock(&s->lock);
549
550         for (i = pg_offset; i < pg_offset + pg_len; i++) {
551                 s->s[i].nr_replicas     = nr_ptrs;
552                 folio_sector_set(folio, s, i, state);
553         }
554
555         if (i == sectors)
556                 s->uptodate = true;
557
558         spin_unlock(&s->lock);
559 }
560
561 /*
562  * Initialize bch_folio state (allocated/unallocated, nr_replicas) from the
563  * extents btree:
564  */
565 static int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
566                           struct folio **folios, unsigned nr_folios)
567 {
568         struct btree_trans trans;
569         struct btree_iter iter;
570         struct bkey_s_c k;
571         u64 offset = folio_sector(folios[0]);
572         unsigned folio_idx = 0;
573         u32 snapshot;
574         int ret;
575
576         bch2_trans_init(&trans, c, 0, 0);
577 retry:
578         bch2_trans_begin(&trans);
579
580         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
581         if (ret)
582                 goto err;
583
584         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
585                            SPOS(inum.inum, offset, snapshot),
586                            BTREE_ITER_SLOTS, k, ret) {
587                 unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
588                 unsigned state = bkey_to_sector_state(k);
589
590                 while (folio_idx < nr_folios) {
591                         struct folio *folio = folios[folio_idx];
592                         u64 folio_start = folio_sector(folio);
593                         u64 folio_end   = folio_end_sector(folio);
594                         unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) - folio_start;
595                         unsigned folio_len = min(k.k->p.offset, folio_end) - folio_offset - folio_start;
596
597                         BUG_ON(k.k->p.offset < folio_start);
598                         BUG_ON(bkey_start_offset(k.k) > folio_end);
599
600                         if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate)
601                                 __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
602
603                         if (k.k->p.offset < folio_end)
604                                 break;
605                         folio_idx++;
606                 }
607
608                 if (folio_idx == nr_folios)
609                         break;
610         }
611
612         offset = iter.pos.offset;
613         bch2_trans_iter_exit(&trans, &iter);
614 err:
615         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
616                 goto retry;
617         bch2_trans_exit(&trans);
618
619         return ret;
620 }
621
622 static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
623 {
624         struct bvec_iter iter;
625         struct folio_vec fv;
626         unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
627                 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
628         unsigned state = bkey_to_sector_state(k);
629
630         bio_for_each_folio(fv, bio, iter)
631                 __bch2_folio_set(fv.fv_folio,
632                                  fv.fv_offset >> 9,
633                                  fv.fv_len >> 9,
634                                  nr_ptrs, state);
635 }
636
637 static void mark_pagecache_unallocated(struct bch_inode_info *inode,
638                                        u64 start, u64 end)
639 {
640         pgoff_t index = start >> PAGE_SECTORS_SHIFT;
641         pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
642         struct folio_batch fbatch;
643         unsigned i, j;
644
645         if (end <= start)
646                 return;
647
648         folio_batch_init(&fbatch);
649
650         while (filemap_get_folios(inode->v.i_mapping,
651                                   &index, end_index, &fbatch)) {
652                 for (i = 0; i < folio_batch_count(&fbatch); i++) {
653                         struct folio *folio = fbatch.folios[i];
654                         u64 folio_start = folio_sector(folio);
655                         u64 folio_end = folio_end_sector(folio);
656                         unsigned folio_offset = max(start, folio_start) - folio_start;
657                         unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
658                         struct bch_folio *s;
659
660                         BUG_ON(end <= folio_start);
661
662                         folio_lock(folio);
663                         s = bch2_folio(folio);
664
665                         if (s) {
666                                 spin_lock(&s->lock);
667                                 for (j = folio_offset; j < folio_offset + folio_len; j++)
668                                         s->s[j].nr_replicas = 0;
669                                 spin_unlock(&s->lock);
670                         }
671
672                         folio_unlock(folio);
673                 }
674                 folio_batch_release(&fbatch);
675                 cond_resched();
676         }
677 }
678
679 static void mark_pagecache_reserved(struct bch_inode_info *inode,
680                                     u64 start, u64 end)
681 {
682         struct bch_fs *c = inode->v.i_sb->s_fs_info;
683         pgoff_t index = start >> PAGE_SECTORS_SHIFT;
684         pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
685         struct folio_batch fbatch;
686         s64 i_sectors_delta = 0;
687         unsigned i, j;
688
689         if (end <= start)
690                 return;
691
692         folio_batch_init(&fbatch);
693
694         while (filemap_get_folios(inode->v.i_mapping,
695                                   &index, end_index, &fbatch)) {
696                 for (i = 0; i < folio_batch_count(&fbatch); i++) {
697                         struct folio *folio = fbatch.folios[i];
698                         u64 folio_start = folio_sector(folio);
699                         u64 folio_end = folio_end_sector(folio);
700                         unsigned folio_offset = max(start, folio_start) - folio_start;
701                         unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
702                         struct bch_folio *s;
703
704                         BUG_ON(end <= folio_start);
705
706                         folio_lock(folio);
707                         s = bch2_folio(folio);
708
709                         if (s) {
710                                 spin_lock(&s->lock);
711                                 for (j = folio_offset; j < folio_offset + folio_len; j++) {
712                                         i_sectors_delta -= s->s[j].state == SECTOR_dirty;
713                                         folio_sector_set(folio, s, j, folio_sector_reserve(s->s[j].state));
714                                 }
715                                 spin_unlock(&s->lock);
716                         }
717
718                         folio_unlock(folio);
719                 }
720                 folio_batch_release(&fbatch);
721                 cond_resched();
722         }
723
724         i_sectors_acct(c, inode, NULL, i_sectors_delta);
725 }
726
727 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
728 {
729         /* XXX: this should not be open coded */
730         return inode->ei_inode.bi_data_replicas
731                 ? inode->ei_inode.bi_data_replicas - 1
732                 : c->opts.data_replicas;
733 }
734
735 static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
736                                           unsigned nr_replicas)
737 {
738         return max(0, (int) nr_replicas -
739                    s->nr_replicas -
740                    s->replicas_reserved);
741 }
742
743 static int bch2_get_folio_disk_reservation(struct bch_fs *c,
744                                 struct bch_inode_info *inode,
745                                 struct folio *folio, bool check_enospc)
746 {
747         struct bch_folio *s = bch2_folio_create(folio, 0);
748         unsigned nr_replicas = inode_nr_replicas(c, inode);
749         struct disk_reservation disk_res = { 0 };
750         unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0;
751         int ret;
752
753         if (!s)
754                 return -ENOMEM;
755
756         for (i = 0; i < sectors; i++)
757                 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
758
759         if (!disk_res_sectors)
760                 return 0;
761
762         ret = bch2_disk_reservation_get(c, &disk_res,
763                                         disk_res_sectors, 1,
764                                         !check_enospc
765                                         ? BCH_DISK_RESERVATION_NOFAIL
766                                         : 0);
767         if (unlikely(ret))
768                 return ret;
769
770         for (i = 0; i < sectors; i++)
771                 s->s[i].replicas_reserved +=
772                         sectors_to_reserve(&s->s[i], nr_replicas);
773
774         return 0;
775 }
776
777 struct bch2_folio_reservation {
778         struct disk_reservation disk;
779         struct quota_res        quota;
780 };
781
782 static void bch2_folio_reservation_init(struct bch_fs *c,
783                         struct bch_inode_info *inode,
784                         struct bch2_folio_reservation *res)
785 {
786         memset(res, 0, sizeof(*res));
787
788         res->disk.nr_replicas = inode_nr_replicas(c, inode);
789 }
790
791 static void bch2_folio_reservation_put(struct bch_fs *c,
792                         struct bch_inode_info *inode,
793                         struct bch2_folio_reservation *res)
794 {
795         bch2_disk_reservation_put(c, &res->disk);
796         bch2_quota_reservation_put(c, inode, &res->quota);
797 }
798
799 static int bch2_folio_reservation_get(struct bch_fs *c,
800                         struct bch_inode_info *inode,
801                         struct folio *folio,
802                         struct bch2_folio_reservation *res,
803                         unsigned offset, unsigned len)
804 {
805         struct bch_folio *s = bch2_folio_create(folio, 0);
806         unsigned i, disk_sectors = 0, quota_sectors = 0;
807         int ret;
808
809         if (!s)
810                 return -ENOMEM;
811
812         BUG_ON(!s->uptodate);
813
814         for (i = round_down(offset, block_bytes(c)) >> 9;
815              i < round_up(offset + len, block_bytes(c)) >> 9;
816              i++) {
817                 disk_sectors += sectors_to_reserve(&s->s[i],
818                                                 res->disk.nr_replicas);
819                 quota_sectors += s->s[i].state == SECTOR_unallocated;
820         }
821
822         if (disk_sectors) {
823                 ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0);
824                 if (unlikely(ret))
825                         return ret;
826         }
827
828         if (quota_sectors) {
829                 ret = bch2_quota_reservation_add(c, inode, &res->quota,
830                                                  quota_sectors, true);
831                 if (unlikely(ret)) {
832                         struct disk_reservation tmp = {
833                                 .sectors = disk_sectors
834                         };
835
836                         bch2_disk_reservation_put(c, &tmp);
837                         res->disk.sectors -= disk_sectors;
838                         return ret;
839                 }
840         }
841
842         return 0;
843 }
844
845 static void bch2_clear_folio_bits(struct folio *folio)
846 {
847         struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
848         struct bch_fs *c = inode->v.i_sb->s_fs_info;
849         struct bch_folio *s = bch2_folio(folio);
850         struct disk_reservation disk_res = { 0 };
851         int i, sectors = folio_sectors(folio), dirty_sectors = 0;
852
853         if (!s)
854                 return;
855
856         EBUG_ON(!folio_test_locked(folio));
857         EBUG_ON(folio_test_writeback(folio));
858
859         for (i = 0; i < sectors; i++) {
860                 disk_res.sectors += s->s[i].replicas_reserved;
861                 s->s[i].replicas_reserved = 0;
862
863                 dirty_sectors -= s->s[i].state == SECTOR_dirty;
864                 folio_sector_set(folio, s, i, folio_sector_undirty(s->s[i].state));
865         }
866
867         bch2_disk_reservation_put(c, &disk_res);
868
869         i_sectors_acct(c, inode, NULL, dirty_sectors);
870
871         bch2_folio_release(folio);
872 }
873
874 static void bch2_set_folio_dirty(struct bch_fs *c,
875                         struct bch_inode_info *inode,
876                         struct folio *folio,
877                         struct bch2_folio_reservation *res,
878                         unsigned offset, unsigned len)
879 {
880         struct bch_folio *s = bch2_folio(folio);
881         unsigned i, dirty_sectors = 0;
882
883         WARN_ON((u64) folio_pos(folio) + offset + len >
884                 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
885
886         BUG_ON(!s->uptodate);
887
888         spin_lock(&s->lock);
889
890         for (i = round_down(offset, block_bytes(c)) >> 9;
891              i < round_up(offset + len, block_bytes(c)) >> 9;
892              i++) {
893                 unsigned sectors = sectors_to_reserve(&s->s[i],
894                                                 res->disk.nr_replicas);
895
896                 /*
897                  * This can happen if we race with the error path in
898                  * bch2_writepage_io_done():
899                  */
900                 sectors = min_t(unsigned, sectors, res->disk.sectors);
901
902                 s->s[i].replicas_reserved += sectors;
903                 res->disk.sectors -= sectors;
904
905                 dirty_sectors += s->s[i].state == SECTOR_unallocated;
906
907                 folio_sector_set(folio, s, i, folio_sector_dirty(s->s[i].state));
908         }
909
910         spin_unlock(&s->lock);
911
912         i_sectors_acct(c, inode, &res->quota, dirty_sectors);
913
914         if (!folio_test_dirty(folio))
915                 filemap_dirty_folio(inode->v.i_mapping, folio);
916 }
917
918 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
919 {
920         struct file *file = vmf->vma->vm_file;
921         struct address_space *mapping = file->f_mapping;
922         struct address_space *fdm = faults_disabled_mapping();
923         struct bch_inode_info *inode = file_bch_inode(file);
924         int ret;
925
926         if (fdm == mapping)
927                 return VM_FAULT_SIGBUS;
928
929         /* Lock ordering: */
930         if (fdm > mapping) {
931                 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
932
933                 if (bch2_pagecache_add_tryget(inode))
934                         goto got_lock;
935
936                 bch2_pagecache_block_put(fdm_host);
937
938                 bch2_pagecache_add_get(inode);
939                 bch2_pagecache_add_put(inode);
940
941                 bch2_pagecache_block_get(fdm_host);
942
943                 /* Signal that lock has been dropped: */
944                 set_fdm_dropped_locks();
945                 return VM_FAULT_SIGBUS;
946         }
947
948         bch2_pagecache_add_get(inode);
949 got_lock:
950         ret = filemap_fault(vmf);
951         bch2_pagecache_add_put(inode);
952
953         return ret;
954 }
955
956 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
957 {
958         struct folio *folio = page_folio(vmf->page);
959         struct file *file = vmf->vma->vm_file;
960         struct bch_inode_info *inode = file_bch_inode(file);
961         struct address_space *mapping = file->f_mapping;
962         struct bch_fs *c = inode->v.i_sb->s_fs_info;
963         struct bch2_folio_reservation res;
964         unsigned len;
965         loff_t isize;
966         int ret;
967
968         bch2_folio_reservation_init(c, inode, &res);
969
970         sb_start_pagefault(inode->v.i_sb);
971         file_update_time(file);
972
973         /*
974          * Not strictly necessary, but helps avoid dio writes livelocking in
975          * write_invalidate_inode_pages_range() - can drop this if/when we get
976          * a write_invalidate_inode_pages_range() that works without dropping
977          * page lock before invalidating page
978          */
979         bch2_pagecache_add_get(inode);
980
981         folio_lock(folio);
982         isize = i_size_read(&inode->v);
983
984         if (folio->mapping != mapping || folio_pos(folio) >= isize) {
985                 folio_unlock(folio);
986                 ret = VM_FAULT_NOPAGE;
987                 goto out;
988         }
989
990         len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio));
991
992         if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate) {
993                 if (bch2_folio_set(c, inode_inum(inode), &folio, 1)) {
994                         folio_unlock(folio);
995                         ret = VM_FAULT_SIGBUS;
996                         goto out;
997                 }
998         }
999
1000         if (bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
1001                 folio_unlock(folio);
1002                 ret = VM_FAULT_SIGBUS;
1003                 goto out;
1004         }
1005
1006         bch2_set_folio_dirty(c, inode, folio, &res, 0, len);
1007         bch2_folio_reservation_put(c, inode, &res);
1008
1009         folio_wait_stable(folio);
1010         ret = VM_FAULT_LOCKED;
1011 out:
1012         bch2_pagecache_add_put(inode);
1013         sb_end_pagefault(inode->v.i_sb);
1014
1015         return ret;
1016 }
1017
1018 void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1019 {
1020         if (offset || length < folio_size(folio))
1021                 return;
1022
1023         bch2_clear_folio_bits(folio);
1024 }
1025
1026 bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
1027 {
1028         if (folio_test_dirty(folio) || folio_test_writeback(folio))
1029                 return false;
1030
1031         bch2_clear_folio_bits(folio);
1032         return true;
1033 }
1034
1035 /* readpage(s): */
1036
1037 static void bch2_readpages_end_io(struct bio *bio)
1038 {
1039         struct bvec_iter_all iter;
1040         struct folio_vec fv;
1041
1042         bio_for_each_folio_all(fv, bio, iter) {
1043                 if (!bio->bi_status) {
1044                         folio_mark_uptodate(fv.fv_folio);
1045                 } else {
1046                         folio_clear_uptodate(fv.fv_folio);
1047                         folio_set_error(fv.fv_folio);
1048                 }
1049                 folio_unlock(fv.fv_folio);
1050         }
1051
1052         bio_put(bio);
1053 }
1054
1055 struct readpages_iter {
1056         struct address_space    *mapping;
1057         unsigned                idx;
1058         folios                  folios;
1059 };
1060
1061 static int readpages_iter_init(struct readpages_iter *iter,
1062                                struct readahead_control *ractl)
1063 {
1064         struct folio **fi;
1065         int ret;
1066
1067         memset(iter, 0, sizeof(*iter));
1068
1069         iter->mapping = ractl->mapping;
1070
1071         ret = filemap_get_contig_folios_d(iter->mapping,
1072                                 ractl->_index << PAGE_SHIFT,
1073                                 (ractl->_index + ractl->_nr_pages) << PAGE_SHIFT,
1074                                 0, mapping_gfp_mask(iter->mapping),
1075                                 &iter->folios);
1076         if (ret)
1077                 return ret;
1078
1079         darray_for_each(iter->folios, fi) {
1080                 ractl->_nr_pages -= 1U << folio_order(*fi);
1081                 __bch2_folio_create(*fi, __GFP_NOFAIL);
1082                 folio_put(*fi);
1083                 folio_put(*fi);
1084         }
1085
1086         return 0;
1087 }
1088
1089 static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
1090 {
1091         if (iter->idx >= iter->folios.nr)
1092                 return NULL;
1093         return iter->folios.data[iter->idx];
1094 }
1095
1096 static inline void readpage_iter_advance(struct readpages_iter *iter)
1097 {
1098         iter->idx++;
1099 }
1100
1101 static bool extent_partial_reads_expensive(struct bkey_s_c k)
1102 {
1103         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1104         struct bch_extent_crc_unpacked crc;
1105         const union bch_extent_entry *i;
1106
1107         bkey_for_each_crc(k.k, ptrs, crc, i)
1108                 if (crc.csum_type || crc.compression_type)
1109                         return true;
1110         return false;
1111 }
1112
1113 static void readpage_bio_extend(struct readpages_iter *iter,
1114                                 struct bio *bio,
1115                                 unsigned sectors_this_extent,
1116                                 bool get_more)
1117 {
1118         while (bio_sectors(bio) < sectors_this_extent &&
1119                bio->bi_vcnt < bio->bi_max_vecs) {
1120                 struct folio *folio = readpage_iter_peek(iter);
1121                 int ret;
1122
1123                 if (folio) {
1124                         readpage_iter_advance(iter);
1125                 } else {
1126                         pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
1127
1128                         if (!get_more)
1129                                 break;
1130
1131                         folio = xa_load(&iter->mapping->i_pages, folio_offset);
1132                         if (folio && !xa_is_value(folio))
1133                                 break;
1134
1135                         folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
1136                         if (!folio)
1137                                 break;
1138
1139                         if (!__bch2_folio_create(folio, 0)) {
1140                                 folio_put(folio);
1141                                 break;
1142                         }
1143
1144                         ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_NOFS);
1145                         if (ret) {
1146                                 __bch2_folio_release(folio);
1147                                 folio_put(folio);
1148                                 break;
1149                         }
1150
1151                         folio_put(folio);
1152                 }
1153
1154                 BUG_ON(folio_sector(folio) != bio_end_sector(bio));
1155
1156                 BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
1157         }
1158 }
1159
1160 static void bchfs_read(struct btree_trans *trans,
1161                        struct bch_read_bio *rbio,
1162                        subvol_inum inum,
1163                        struct readpages_iter *readpages_iter)
1164 {
1165         struct bch_fs *c = trans->c;
1166         struct btree_iter iter;
1167         struct bkey_buf sk;
1168         int flags = BCH_READ_RETRY_IF_STALE|
1169                 BCH_READ_MAY_PROMOTE;
1170         u32 snapshot;
1171         int ret = 0;
1172
1173         rbio->c = c;
1174         rbio->start_time = local_clock();
1175         rbio->subvol = inum.subvol;
1176
1177         bch2_bkey_buf_init(&sk);
1178 retry:
1179         bch2_trans_begin(trans);
1180         iter = (struct btree_iter) { NULL };
1181
1182         ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1183         if (ret)
1184                 goto err;
1185
1186         bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1187                              SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
1188                              BTREE_ITER_SLOTS);
1189         while (1) {
1190                 struct bkey_s_c k;
1191                 unsigned bytes, sectors, offset_into_extent;
1192                 enum btree_id data_btree = BTREE_ID_extents;
1193
1194                 /*
1195                  * read_extent -> io_time_reset may cause a transaction restart
1196                  * without returning an error, we need to check for that here:
1197                  */
1198                 ret = bch2_trans_relock(trans);
1199                 if (ret)
1200                         break;
1201
1202                 bch2_btree_iter_set_pos(&iter,
1203                                 POS(inum.inum, rbio->bio.bi_iter.bi_sector));
1204
1205                 k = bch2_btree_iter_peek_slot(&iter);
1206                 ret = bkey_err(k);
1207                 if (ret)
1208                         break;
1209
1210                 offset_into_extent = iter.pos.offset -
1211                         bkey_start_offset(k.k);
1212                 sectors = k.k->size - offset_into_extent;
1213
1214                 bch2_bkey_buf_reassemble(&sk, c, k);
1215
1216                 ret = bch2_read_indirect_extent(trans, &data_btree,
1217                                         &offset_into_extent, &sk);
1218                 if (ret)
1219                         break;
1220
1221                 k = bkey_i_to_s_c(sk.k);
1222
1223                 sectors = min(sectors, k.k->size - offset_into_extent);
1224
1225                 if (readpages_iter)
1226                         readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
1227                                             extent_partial_reads_expensive(k));
1228
1229                 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
1230                 swap(rbio->bio.bi_iter.bi_size, bytes);
1231
1232                 if (rbio->bio.bi_iter.bi_size == bytes)
1233                         flags |= BCH_READ_LAST_FRAGMENT;
1234
1235                 bch2_bio_page_state_set(&rbio->bio, k);
1236
1237                 bch2_read_extent(trans, rbio, iter.pos,
1238                                  data_btree, k, offset_into_extent, flags);
1239
1240                 if (flags & BCH_READ_LAST_FRAGMENT)
1241                         break;
1242
1243                 swap(rbio->bio.bi_iter.bi_size, bytes);
1244                 bio_advance(&rbio->bio, bytes);
1245
1246                 ret = btree_trans_too_many_iters(trans);
1247                 if (ret)
1248                         break;
1249         }
1250 err:
1251         bch2_trans_iter_exit(trans, &iter);
1252
1253         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1254                 goto retry;
1255
1256         if (ret) {
1257                 bch_err_inum_offset_ratelimited(c,
1258                                 iter.pos.inode,
1259                                 iter.pos.offset << 9,
1260                                 "read error %i from btree lookup", ret);
1261                 rbio->bio.bi_status = BLK_STS_IOERR;
1262                 bio_endio(&rbio->bio);
1263         }
1264
1265         bch2_bkey_buf_exit(&sk, c);
1266 }
1267
1268 void bch2_readahead(struct readahead_control *ractl)
1269 {
1270         struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
1271         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1272         struct bch_io_opts opts;
1273         struct btree_trans trans;
1274         struct folio *folio;
1275         struct readpages_iter readpages_iter;
1276         int ret;
1277
1278         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
1279
1280         ret = readpages_iter_init(&readpages_iter, ractl);
1281         BUG_ON(ret);
1282
1283         bch2_trans_init(&trans, c, 0, 0);
1284
1285         bch2_pagecache_add_get(inode);
1286
1287         while ((folio = readpage_iter_peek(&readpages_iter))) {
1288                 unsigned n = min_t(unsigned,
1289                                    readpages_iter.folios.nr -
1290                                    readpages_iter.idx,
1291                                    BIO_MAX_VECS);
1292                 struct bch_read_bio *rbio =
1293                         rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
1294                                                    GFP_NOFS, &c->bio_read),
1295                                   opts);
1296
1297                 readpage_iter_advance(&readpages_iter);
1298
1299                 rbio->bio.bi_iter.bi_sector = folio_sector(folio);
1300                 rbio->bio.bi_end_io = bch2_readpages_end_io;
1301                 BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
1302
1303                 bchfs_read(&trans, rbio, inode_inum(inode),
1304                            &readpages_iter);
1305         }
1306
1307         bch2_pagecache_add_put(inode);
1308
1309         bch2_trans_exit(&trans);
1310         darray_exit(&readpages_iter.folios);
1311 }
1312
1313 static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio,
1314                              subvol_inum inum, struct folio *folio)
1315 {
1316         struct btree_trans trans;
1317
1318         bch2_folio_create(folio, __GFP_NOFAIL);
1319
1320         rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
1321         rbio->bio.bi_iter.bi_sector = folio_sector(folio);
1322         BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
1323
1324         bch2_trans_init(&trans, c, 0, 0);
1325         bchfs_read(&trans, rbio, inum, NULL);
1326         bch2_trans_exit(&trans);
1327 }
1328
1329 static void bch2_read_single_folio_end_io(struct bio *bio)
1330 {
1331         complete(bio->bi_private);
1332 }
1333
1334 static int bch2_read_single_folio(struct folio *folio,
1335                                   struct address_space *mapping)
1336 {
1337         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1338         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1339         struct bch_read_bio *rbio;
1340         struct bch_io_opts opts;
1341         int ret;
1342         DECLARE_COMPLETION_ONSTACK(done);
1343
1344         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
1345
1346         rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
1347                          opts);
1348         rbio->bio.bi_private = &done;
1349         rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
1350
1351         __bchfs_readfolio(c, rbio, inode_inum(inode), folio);
1352         wait_for_completion(&done);
1353
1354         ret = blk_status_to_errno(rbio->bio.bi_status);
1355         bio_put(&rbio->bio);
1356
1357         if (ret < 0)
1358                 return ret;
1359
1360         folio_mark_uptodate(folio);
1361         return 0;
1362 }
1363
1364 int bch2_read_folio(struct file *file, struct folio *folio)
1365 {
1366         int ret;
1367
1368         ret = bch2_read_single_folio(folio, folio->mapping);
1369         folio_unlock(folio);
1370         return bch2_err_class(ret);
1371 }
1372
1373 /* writepages: */
1374
1375 struct bch_writepage_state {
1376         struct bch_writepage_io *io;
1377         struct bch_io_opts      opts;
1378         struct bch_folio_sector *tmp;
1379         unsigned                tmp_sectors;
1380 };
1381
1382 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1383                                                                   struct bch_inode_info *inode)
1384 {
1385         struct bch_writepage_state ret = { 0 };
1386
1387         bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
1388         return ret;
1389 }
1390
1391 static void bch2_writepage_io_done(struct bch_write_op *op)
1392 {
1393         struct bch_writepage_io *io =
1394                 container_of(op, struct bch_writepage_io, op);
1395         struct bch_fs *c = io->op.c;
1396         struct bio *bio = &io->op.wbio.bio;
1397         struct bvec_iter_all iter;
1398         struct folio_vec fv;
1399         unsigned i;
1400
1401         if (io->op.error) {
1402                 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
1403
1404                 bio_for_each_folio_all(fv, bio, iter) {
1405                         struct bch_folio *s;
1406
1407                         folio_set_error(fv.fv_folio);
1408                         mapping_set_error(fv.fv_folio->mapping, -EIO);
1409
1410                         s = __bch2_folio(fv.fv_folio);
1411                         spin_lock(&s->lock);
1412                         for (i = 0; i < folio_sectors(fv.fv_folio); i++)
1413                                 s->s[i].nr_replicas = 0;
1414                         spin_unlock(&s->lock);
1415                 }
1416         }
1417
1418         if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1419                 bio_for_each_folio_all(fv, bio, iter) {
1420                         struct bch_folio *s;
1421
1422                         s = __bch2_folio(fv.fv_folio);
1423                         spin_lock(&s->lock);
1424                         for (i = 0; i < folio_sectors(fv.fv_folio); i++)
1425                                 s->s[i].nr_replicas = 0;
1426                         spin_unlock(&s->lock);
1427                 }
1428         }
1429
1430         /*
1431          * racing with fallocate can cause us to add fewer sectors than
1432          * expected - but we shouldn't add more sectors than expected:
1433          */
1434         WARN_ON_ONCE(io->op.i_sectors_delta > 0);
1435
1436         /*
1437          * (error (due to going RO) halfway through a page can screw that up
1438          * slightly)
1439          * XXX wtf?
1440            BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1441          */
1442
1443         /*
1444          * PageWriteback is effectively our ref on the inode - fixup i_blocks
1445          * before calling end_page_writeback:
1446          */
1447         i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1448
1449         bio_for_each_folio_all(fv, bio, iter) {
1450                 struct bch_folio *s = __bch2_folio(fv.fv_folio);
1451
1452                 if (atomic_dec_and_test(&s->write_count))
1453                         folio_end_writeback(fv.fv_folio);
1454         }
1455
1456         bio_put(&io->op.wbio.bio);
1457 }
1458
1459 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1460 {
1461         struct bch_writepage_io *io = w->io;
1462
1463         w->io = NULL;
1464         closure_call(&io->op.cl, bch2_write, NULL, NULL);
1465 }
1466
1467 /*
1468  * Get a bch_writepage_io and add @page to it - appending to an existing one if
1469  * possible, else allocating a new one:
1470  */
1471 static void bch2_writepage_io_alloc(struct bch_fs *c,
1472                                     struct writeback_control *wbc,
1473                                     struct bch_writepage_state *w,
1474                                     struct bch_inode_info *inode,
1475                                     u64 sector,
1476                                     unsigned nr_replicas)
1477 {
1478         struct bch_write_op *op;
1479
1480         w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
1481                                               REQ_OP_WRITE,
1482                                               GFP_NOFS,
1483                                               &c->writepage_bioset),
1484                              struct bch_writepage_io, op.wbio.bio);
1485
1486         w->io->inode            = inode;
1487         op                      = &w->io->op;
1488         bch2_write_op_init(op, c, w->opts);
1489         op->target              = w->opts.foreground_target;
1490         op->nr_replicas         = nr_replicas;
1491         op->res.nr_replicas     = nr_replicas;
1492         op->write_point         = writepoint_hashed(inode->ei_last_dirtied);
1493         op->subvol              = inode->ei_subvol;
1494         op->pos                 = POS(inode->v.i_ino, sector);
1495         op->end_io              = bch2_writepage_io_done;
1496         op->devs_need_flush     = &inode->ei_devs_need_flush;
1497         op->wbio.bio.bi_iter.bi_sector = sector;
1498         op->wbio.bio.bi_opf     = wbc_to_write_flags(wbc);
1499 }
1500
1501 static int __bch2_writepage(struct page *_page,
1502                             struct writeback_control *wbc,
1503                             void *data)
1504 {
1505         struct folio *folio = page_folio(_page);
1506         struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
1507         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1508         struct bch_writepage_state *w = data;
1509         struct bch_folio *s;
1510         unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
1511         loff_t i_size = i_size_read(&inode->v);
1512         int ret;
1513
1514         EBUG_ON(!folio_test_uptodate(folio));
1515
1516         /* Is the folio fully inside i_size? */
1517         if (folio_end_pos(folio) <= i_size)
1518                 goto do_io;
1519
1520         /* Is the folio fully outside i_size? (truncate in progress) */
1521         if (folio_pos(folio) >= i_size) {
1522                 folio_unlock(folio);
1523                 return 0;
1524         }
1525
1526         /*
1527          * The folio straddles i_size.  It must be zeroed out on each and every
1528          * writepage invocation because it may be mmapped.  "A file is mapped
1529          * in multiples of the folio size.  For a file that is not a multiple of
1530          * the  folio size, the remaining memory is zeroed when mapped, and
1531          * writes to that region are not written out to the file."
1532          */
1533         folio_zero_segment(folio,
1534                            i_size - folio_pos(folio),
1535                            folio_size(folio));
1536 do_io:
1537         f_sectors = folio_sectors(folio);
1538         s = bch2_folio_create(folio, __GFP_NOFAIL);
1539
1540         if (f_sectors > w->tmp_sectors) {
1541                 kfree(w->tmp);
1542                 w->tmp = kzalloc(sizeof(struct bch_folio_sector) *
1543                                  f_sectors, __GFP_NOFAIL);
1544                 w->tmp_sectors = f_sectors;
1545         }
1546
1547         /*
1548          * Things get really hairy with errors during writeback:
1549          */
1550         ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
1551         BUG_ON(ret);
1552
1553         /* Before unlocking the page, get copy of reservations: */
1554         spin_lock(&s->lock);
1555         memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
1556
1557         for (i = 0; i < f_sectors; i++) {
1558                 if (s->s[i].state < SECTOR_dirty)
1559                         continue;
1560
1561                 nr_replicas_this_write =
1562                         min_t(unsigned, nr_replicas_this_write,
1563                               s->s[i].nr_replicas +
1564                               s->s[i].replicas_reserved);
1565         }
1566
1567         for (i = 0; i < f_sectors; i++) {
1568                 if (s->s[i].state < SECTOR_dirty)
1569                         continue;
1570
1571                 s->s[i].nr_replicas = w->opts.compression
1572                         ? 0 : nr_replicas_this_write;
1573
1574                 s->s[i].replicas_reserved = 0;
1575                 folio_sector_set(folio, s, i, SECTOR_allocated);
1576         }
1577         spin_unlock(&s->lock);
1578
1579         BUG_ON(atomic_read(&s->write_count));
1580         atomic_set(&s->write_count, 1);
1581
1582         BUG_ON(folio_test_writeback(folio));
1583         folio_start_writeback(folio);
1584
1585         folio_unlock(folio);
1586
1587         offset = 0;
1588         while (1) {
1589                 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
1590                 u64 sector;
1591
1592                 while (offset < f_sectors &&
1593                        w->tmp[offset].state < SECTOR_dirty)
1594                         offset++;
1595
1596                 if (offset == f_sectors)
1597                         break;
1598
1599                 while (offset + sectors < f_sectors &&
1600                        w->tmp[offset + sectors].state >= SECTOR_dirty) {
1601                         reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
1602                         dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
1603                         sectors++;
1604                 }
1605                 BUG_ON(!sectors);
1606
1607                 sector = folio_sector(folio) + offset;
1608
1609                 if (w->io &&
1610                     (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1611                      bio_full(&w->io->op.wbio.bio, sectors << 9) ||
1612                      w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1613                      (BIO_MAX_VECS * PAGE_SIZE) ||
1614                      bio_end_sector(&w->io->op.wbio.bio) != sector))
1615                         bch2_writepage_do_io(w);
1616
1617                 if (!w->io)
1618                         bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1619                                                 nr_replicas_this_write);
1620
1621                 atomic_inc(&s->write_count);
1622
1623                 BUG_ON(inode != w->io->inode);
1624                 BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
1625                                      sectors << 9, offset << 9));
1626
1627                 /* Check for writing past i_size: */
1628                 WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1629                           round_up(i_size, block_bytes(c)) &&
1630                           !test_bit(BCH_FS_EMERGENCY_RO, &c->flags),
1631                           "writing past i_size: %llu > %llu (unrounded %llu)\n",
1632                           bio_end_sector(&w->io->op.wbio.bio) << 9,
1633                           round_up(i_size, block_bytes(c)),
1634                           i_size);
1635
1636                 w->io->op.res.sectors += reserved_sectors;
1637                 w->io->op.i_sectors_delta -= dirty_sectors;
1638                 w->io->op.new_i_size = i_size;
1639
1640                 offset += sectors;
1641         }
1642
1643         if (atomic_dec_and_test(&s->write_count))
1644                 folio_end_writeback(folio);
1645
1646         return 0;
1647 }
1648
1649 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1650 {
1651         struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1652         struct bch_writepage_state w =
1653                 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1654         struct blk_plug plug;
1655         int ret;
1656
1657         blk_start_plug(&plug);
1658         ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1659         if (w.io)
1660                 bch2_writepage_do_io(&w);
1661         blk_finish_plug(&plug);
1662         kfree(w.tmp);
1663         return bch2_err_class(ret);
1664 }
1665
1666 /* buffered writes: */
1667
1668 int bch2_write_begin(struct file *file, struct address_space *mapping,
1669                      loff_t pos, unsigned len,
1670                      struct page **pagep, void **fsdata)
1671 {
1672         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1673         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1674         struct bch2_folio_reservation *res;
1675         struct folio *folio;
1676         unsigned offset;
1677         int ret = -ENOMEM;
1678
1679         res = kmalloc(sizeof(*res), GFP_KERNEL);
1680         if (!res)
1681                 return -ENOMEM;
1682
1683         bch2_folio_reservation_init(c, inode, res);
1684         *fsdata = res;
1685
1686         bch2_pagecache_add_get(inode);
1687
1688         folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
1689                                 FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
1690                                 mapping_gfp_mask(mapping));
1691         if (!folio)
1692                 goto err_unlock;
1693
1694         if (folio_test_uptodate(folio))
1695                 goto out;
1696
1697         offset = pos - folio_pos(folio);
1698         len = min_t(size_t, len, folio_end_pos(folio) - pos);
1699
1700         /* If we're writing entire folio, don't need to read it in first: */
1701         if (!offset && len == folio_size(folio))
1702                 goto out;
1703
1704         if (!offset && pos + len >= inode->v.i_size) {
1705                 folio_zero_segment(folio, len, folio_size(folio));
1706                 flush_dcache_folio(folio);
1707                 goto out;
1708         }
1709
1710         if (folio_pos(folio) >= inode->v.i_size) {
1711                 folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
1712                 flush_dcache_folio(folio);
1713                 goto out;
1714         }
1715 readpage:
1716         ret = bch2_read_single_folio(folio, mapping);
1717         if (ret)
1718                 goto err;
1719 out:
1720         if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate) {
1721                 ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
1722                 if (ret)
1723                         goto err;
1724         }
1725
1726         ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
1727         if (ret) {
1728                 if (!folio_test_uptodate(folio)) {
1729                         /*
1730                          * If the folio hasn't been read in, we won't know if we
1731                          * actually need a reservation - we don't actually need
1732                          * to read here, we just need to check if the folio is
1733                          * fully backed by uncompressed data:
1734                          */
1735                         goto readpage;
1736                 }
1737
1738                 goto err;
1739         }
1740
1741         *pagep = &folio->page;
1742         return 0;
1743 err:
1744         folio_unlock(folio);
1745         folio_put(folio);
1746         *pagep = NULL;
1747 err_unlock:
1748         bch2_pagecache_add_put(inode);
1749         kfree(res);
1750         *fsdata = NULL;
1751         return bch2_err_class(ret);
1752 }
1753
1754 int bch2_write_end(struct file *file, struct address_space *mapping,
1755                    loff_t pos, unsigned len, unsigned copied,
1756                    struct page *page, void *fsdata)
1757 {
1758         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1759         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1760         struct bch2_folio_reservation *res = fsdata;
1761         struct folio *folio = page_folio(page);
1762         unsigned offset = pos - folio_pos(folio);
1763
1764         lockdep_assert_held(&inode->v.i_rwsem);
1765         BUG_ON(offset + copied > folio_size(folio));
1766
1767         if (unlikely(copied < len && !folio_test_uptodate(folio))) {
1768                 /*
1769                  * The folio needs to be read in, but that would destroy
1770                  * our partial write - simplest thing is to just force
1771                  * userspace to redo the write:
1772                  */
1773                 folio_zero_range(folio, 0, folio_size(folio));
1774                 flush_dcache_folio(folio);
1775                 copied = 0;
1776         }
1777
1778         spin_lock(&inode->v.i_lock);
1779         if (pos + copied > inode->v.i_size)
1780                 i_size_write(&inode->v, pos + copied);
1781         spin_unlock(&inode->v.i_lock);
1782
1783         if (copied) {
1784                 if (!folio_test_uptodate(folio))
1785                         folio_mark_uptodate(folio);
1786
1787                 bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
1788
1789                 inode->ei_last_dirtied = (unsigned long) current;
1790         }
1791
1792         folio_unlock(folio);
1793         folio_put(folio);
1794         bch2_pagecache_add_put(inode);
1795
1796         bch2_folio_reservation_put(c, inode, res);
1797         kfree(res);
1798
1799         return copied;
1800 }
1801
1802 static noinline void folios_trunc(folios *folios, struct folio **fi)
1803 {
1804         while (folios->data + folios->nr > fi) {
1805                 struct folio *f = darray_pop(folios);
1806
1807                 folio_unlock(f);
1808                 folio_put(f);
1809         }
1810 }
1811
1812 static int __bch2_buffered_write(struct bch_inode_info *inode,
1813                                  struct address_space *mapping,
1814                                  struct iov_iter *iter,
1815                                  loff_t pos, unsigned len)
1816 {
1817         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1818         struct bch2_folio_reservation res;
1819         folios folios;
1820         struct folio **fi, *f;
1821         unsigned copied = 0, f_offset;
1822         loff_t end = pos + len, f_pos;
1823         loff_t last_folio_pos = inode->v.i_size;
1824         int ret = 0;
1825
1826         BUG_ON(!len);
1827
1828         bch2_folio_reservation_init(c, inode, &res);
1829         darray_init(&folios);
1830
1831         ret = filemap_get_contig_folios_d(mapping, pos, end,
1832                                    FGP_LOCK|FGP_WRITE|FGP_STABLE|FGP_CREAT,
1833                                    mapping_gfp_mask(mapping),
1834                                    &folios);
1835         if (ret)
1836                 goto out;
1837
1838         BUG_ON(!folios.nr);
1839
1840         f = darray_first(folios);
1841         if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
1842                 ret = bch2_read_single_folio(f, mapping);
1843                 if (ret)
1844                         goto out;
1845         }
1846
1847         f = darray_last(folios);
1848         end = min(end, folio_end_pos(f));
1849         last_folio_pos = folio_pos(f);
1850         if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
1851                 if (end >= inode->v.i_size) {
1852                         folio_zero_range(f, 0, folio_size(f));
1853                 } else {
1854                         ret = bch2_read_single_folio(f, mapping);
1855                         if (ret)
1856                                 goto out;
1857                 }
1858         }
1859
1860         f_pos = pos;
1861         f_offset = pos - folio_pos(darray_first(folios));
1862         darray_for_each(folios, fi) {
1863                 struct folio *f = *fi;
1864                 unsigned f_len = min(end, folio_end_pos(f)) - f_pos;
1865
1866                 if (!bch2_folio_create(f, __GFP_NOFAIL)->uptodate) {
1867                         ret = bch2_folio_set(c, inode_inum(inode), fi,
1868                                              folios.data + folios.nr - fi);
1869                         if (ret)
1870                                 goto out;
1871                 }
1872
1873                 /*
1874                  * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
1875                  * supposed to write as much as we have disk space for.
1876                  *
1877                  * On failure here we should still write out a partial page if
1878                  * we aren't completely out of disk space - we don't do that
1879                  * yet:
1880                  */
1881                 ret = bch2_folio_reservation_get(c, inode, f, &res, f_offset, f_len);
1882                 if (unlikely(ret)) {
1883                         folios_trunc(&folios, fi);
1884                         if (!folios.nr)
1885                                 goto out;
1886
1887                         end = min(end, folio_end_pos(darray_last(folios)));
1888                         break;
1889                 }
1890
1891                 f_pos = folio_end_pos(f);
1892                 f_offset = 0;
1893         }
1894
1895         if (mapping_writably_mapped(mapping))
1896                 darray_for_each(folios, fi)
1897                         flush_dcache_folio(*fi);
1898
1899         f_pos = pos;
1900         f_offset = pos - folio_pos(darray_first(folios));
1901         darray_for_each(folios, fi) {
1902                 struct folio *f = *fi;
1903                 unsigned f_len = min(end, folio_end_pos(f)) - f_pos;
1904                 unsigned f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
1905
1906                 if (!f_copied) {
1907                         folios_trunc(&folios, fi);
1908                         break;
1909                 }
1910
1911                 if (!folio_test_uptodate(f) &&
1912                     f_copied != folio_size(f) &&
1913                     pos + copied + f_copied < inode->v.i_size) {
1914                         folio_zero_range(f, 0, folio_size(f));
1915                         folios_trunc(&folios, fi);
1916                         break;
1917                 }
1918
1919                 flush_dcache_folio(f);
1920                 copied += f_copied;
1921
1922                 if (f_copied != f_len) {
1923                         folios_trunc(&folios, fi + 1);
1924                         break;
1925                 }
1926
1927                 f_pos = folio_end_pos(f);
1928                 f_offset = 0;
1929         }
1930
1931         if (!copied)
1932                 goto out;
1933
1934         end = pos + copied;
1935
1936         spin_lock(&inode->v.i_lock);
1937         if (end > inode->v.i_size)
1938                 i_size_write(&inode->v, end);
1939         spin_unlock(&inode->v.i_lock);
1940
1941         f_pos = pos;
1942         f_offset = pos - folio_pos(darray_first(folios));
1943         darray_for_each(folios, fi) {
1944                 struct folio *f = *fi;
1945                 unsigned f_len = min(end, folio_end_pos(f)) - f_pos;
1946
1947                 if (!folio_test_uptodate(f))
1948                         folio_mark_uptodate(f);
1949
1950                 bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
1951
1952                 f_pos = folio_end_pos(f);
1953                 f_offset = 0;
1954         }
1955
1956         inode->ei_last_dirtied = (unsigned long) current;
1957 out:
1958         darray_for_each(folios, fi) {
1959                 folio_unlock(*fi);
1960                 folio_put(*fi);
1961         }
1962
1963         /*
1964          * If the last folio added to the mapping starts beyond current EOF, we
1965          * performed a short write but left around at least one post-EOF folio.
1966          * Clean up the mapping before we return.
1967          */
1968         if (last_folio_pos >= inode->v.i_size)
1969                 truncate_pagecache(&inode->v, inode->v.i_size);
1970
1971         darray_exit(&folios);
1972         bch2_folio_reservation_put(c, inode, &res);
1973
1974         return copied ?: ret;
1975 }
1976
1977 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1978 {
1979         struct file *file = iocb->ki_filp;
1980         struct address_space *mapping = file->f_mapping;
1981         struct bch_inode_info *inode = file_bch_inode(file);
1982         loff_t pos = iocb->ki_pos;
1983         ssize_t written = 0;
1984         int ret = 0;
1985
1986         bch2_pagecache_add_get(inode);
1987
1988         do {
1989                 unsigned offset = pos & (PAGE_SIZE - 1);
1990                 unsigned bytes = iov_iter_count(iter);
1991 again:
1992                 /*
1993                  * Bring in the user page that we will copy from _first_.
1994                  * Otherwise there's a nasty deadlock on copying from the
1995                  * same page as we're writing to, without it being marked
1996                  * up-to-date.
1997                  *
1998                  * Not only is this an optimisation, but it is also required
1999                  * to check that the address is actually valid, when atomic
2000                  * usercopies are used, below.
2001                  */
2002                 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
2003                         bytes = min_t(unsigned long, iov_iter_count(iter),
2004                                       PAGE_SIZE - offset);
2005
2006                         if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
2007                                 ret = -EFAULT;
2008                                 break;
2009                         }
2010                 }
2011
2012                 if (unlikely(fatal_signal_pending(current))) {
2013                         ret = -EINTR;
2014                         break;
2015                 }
2016
2017                 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
2018                 if (unlikely(ret < 0))
2019                         break;
2020
2021                 cond_resched();
2022
2023                 if (unlikely(ret == 0)) {
2024                         /*
2025                          * If we were unable to copy any data at all, we must
2026                          * fall back to a single segment length write.
2027                          *
2028                          * If we didn't fallback here, we could livelock
2029                          * because not all segments in the iov can be copied at
2030                          * once without a pagefault.
2031                          */
2032                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
2033                                       iov_iter_single_seg_count(iter));
2034                         goto again;
2035                 }
2036                 pos += ret;
2037                 written += ret;
2038                 ret = 0;
2039
2040                 balance_dirty_pages_ratelimited(mapping);
2041         } while (iov_iter_count(iter));
2042
2043         bch2_pagecache_add_put(inode);
2044
2045         return written ? written : ret;
2046 }
2047
2048 /* O_DIRECT reads */
2049
2050 static void bio_check_or_release(struct bio *bio, bool check_dirty)
2051 {
2052         if (check_dirty) {
2053                 bio_check_pages_dirty(bio);
2054         } else {
2055                 bio_release_pages(bio, false);
2056                 bio_put(bio);
2057         }
2058 }
2059
2060 static void bch2_dio_read_complete(struct closure *cl)
2061 {
2062         struct dio_read *dio = container_of(cl, struct dio_read, cl);
2063
2064         dio->req->ki_complete(dio->req, dio->ret);
2065         bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
2066 }
2067
2068 static void bch2_direct_IO_read_endio(struct bio *bio)
2069 {
2070         struct dio_read *dio = bio->bi_private;
2071
2072         if (bio->bi_status)
2073                 dio->ret = blk_status_to_errno(bio->bi_status);
2074
2075         closure_put(&dio->cl);
2076 }
2077
2078 static void bch2_direct_IO_read_split_endio(struct bio *bio)
2079 {
2080         struct dio_read *dio = bio->bi_private;
2081         bool should_dirty = dio->should_dirty;
2082
2083         bch2_direct_IO_read_endio(bio);
2084         bio_check_or_release(bio, should_dirty);
2085 }
2086
2087 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
2088 {
2089         struct file *file = req->ki_filp;
2090         struct bch_inode_info *inode = file_bch_inode(file);
2091         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2092         struct bch_io_opts opts;
2093         struct dio_read *dio;
2094         struct bio *bio;
2095         loff_t offset = req->ki_pos;
2096         bool sync = is_sync_kiocb(req);
2097         size_t shorten;
2098         ssize_t ret;
2099
2100         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
2101
2102         if ((offset|iter->count) & (block_bytes(c) - 1))
2103                 return -EINVAL;
2104
2105         ret = min_t(loff_t, iter->count,
2106                     max_t(loff_t, 0, i_size_read(&inode->v) - offset));
2107
2108         if (!ret)
2109                 return ret;
2110
2111         shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
2112         iter->count -= shorten;
2113
2114         bio = bio_alloc_bioset(NULL,
2115                                bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2116                                REQ_OP_READ,
2117                                GFP_KERNEL,
2118                                &c->dio_read_bioset);
2119
2120         bio->bi_end_io = bch2_direct_IO_read_endio;
2121
2122         dio = container_of(bio, struct dio_read, rbio.bio);
2123         closure_init(&dio->cl, NULL);
2124
2125         /*
2126          * this is a _really_ horrible hack just to avoid an atomic sub at the
2127          * end:
2128          */
2129         if (!sync) {
2130                 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
2131                 atomic_set(&dio->cl.remaining,
2132                            CLOSURE_REMAINING_INITIALIZER -
2133                            CLOSURE_RUNNING +
2134                            CLOSURE_DESTRUCTOR);
2135         } else {
2136                 atomic_set(&dio->cl.remaining,
2137                            CLOSURE_REMAINING_INITIALIZER + 1);
2138         }
2139
2140         dio->req        = req;
2141         dio->ret        = ret;
2142         /*
2143          * This is one of the sketchier things I've encountered: we have to skip
2144          * the dirtying of requests that are internal from the kernel (i.e. from
2145          * loopback), because we'll deadlock on page_lock.
2146          */
2147         dio->should_dirty = iter_is_iovec(iter);
2148
2149         goto start;
2150         while (iter->count) {
2151                 bio = bio_alloc_bioset(NULL,
2152                                        bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2153                                        REQ_OP_READ,
2154                                        GFP_KERNEL,
2155                                        &c->bio_read);
2156                 bio->bi_end_io          = bch2_direct_IO_read_split_endio;
2157 start:
2158                 bio->bi_opf             = REQ_OP_READ|REQ_SYNC;
2159                 bio->bi_iter.bi_sector  = offset >> 9;
2160                 bio->bi_private         = dio;
2161
2162                 ret = bio_iov_iter_get_pages(bio, iter);
2163                 if (ret < 0) {
2164                         /* XXX: fault inject this path */
2165                         bio->bi_status = BLK_STS_RESOURCE;
2166                         bio_endio(bio);
2167                         break;
2168                 }
2169
2170                 offset += bio->bi_iter.bi_size;
2171
2172                 if (dio->should_dirty)
2173                         bio_set_pages_dirty(bio);
2174
2175                 if (iter->count)
2176                         closure_get(&dio->cl);
2177
2178                 bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
2179         }
2180
2181         iter->count += shorten;
2182
2183         if (sync) {
2184                 closure_sync(&dio->cl);
2185                 closure_debug_destroy(&dio->cl);
2186                 ret = dio->ret;
2187                 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
2188                 return ret;
2189         } else {
2190                 return -EIOCBQUEUED;
2191         }
2192 }
2193
2194 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2195 {
2196         struct file *file = iocb->ki_filp;
2197         struct bch_inode_info *inode = file_bch_inode(file);
2198         struct address_space *mapping = file->f_mapping;
2199         size_t count = iov_iter_count(iter);
2200         ssize_t ret;
2201
2202         if (!count)
2203                 return 0; /* skip atime */
2204
2205         if (iocb->ki_flags & IOCB_DIRECT) {
2206                 struct blk_plug plug;
2207
2208                 if (unlikely(mapping->nrpages)) {
2209                         ret = filemap_write_and_wait_range(mapping,
2210                                                 iocb->ki_pos,
2211                                                 iocb->ki_pos + count - 1);
2212                         if (ret < 0)
2213                                 goto out;
2214                 }
2215
2216                 file_accessed(file);
2217
2218                 blk_start_plug(&plug);
2219                 ret = bch2_direct_IO_read(iocb, iter);
2220                 blk_finish_plug(&plug);
2221
2222                 if (ret >= 0)
2223                         iocb->ki_pos += ret;
2224         } else {
2225                 bch2_pagecache_add_get(inode);
2226                 ret = generic_file_read_iter(iocb, iter);
2227                 bch2_pagecache_add_put(inode);
2228         }
2229 out:
2230         return bch2_err_class(ret);
2231 }
2232
2233 /* O_DIRECT writes */
2234
2235 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
2236                                        u64 offset, u64 size,
2237                                        unsigned nr_replicas, bool compressed)
2238 {
2239         struct btree_trans trans;
2240         struct btree_iter iter;
2241         struct bkey_s_c k;
2242         u64 end = offset + size;
2243         u32 snapshot;
2244         bool ret = true;
2245         int err;
2246
2247         bch2_trans_init(&trans, c, 0, 0);
2248 retry:
2249         bch2_trans_begin(&trans);
2250
2251         err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
2252         if (err)
2253                 goto err;
2254
2255         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
2256                            SPOS(inum.inum, offset, snapshot),
2257                            BTREE_ITER_SLOTS, k, err) {
2258                 if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
2259                         break;
2260
2261                 if (k.k->p.snapshot != snapshot ||
2262                     nr_replicas > bch2_bkey_replicas(c, k) ||
2263                     (!compressed && bch2_bkey_sectors_compressed(k))) {
2264                         ret = false;
2265                         break;
2266                 }
2267         }
2268
2269         offset = iter.pos.offset;
2270         bch2_trans_iter_exit(&trans, &iter);
2271 err:
2272         if (bch2_err_matches(err, BCH_ERR_transaction_restart))
2273                 goto retry;
2274         bch2_trans_exit(&trans);
2275
2276         return err ? false : ret;
2277 }
2278
2279 static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
2280 {
2281         struct bch_fs *c = dio->op.c;
2282         struct bch_inode_info *inode = dio->inode;
2283         struct bio *bio = &dio->op.wbio.bio;
2284
2285         return bch2_check_range_allocated(c, inode_inum(inode),
2286                                 dio->op.pos.offset, bio_sectors(bio),
2287                                 dio->op.opts.data_replicas,
2288                                 dio->op.opts.compression != 0);
2289 }
2290
2291 static void bch2_dio_write_loop_async(struct bch_write_op *);
2292 static __always_inline long bch2_dio_write_done(struct dio_write *dio);
2293
2294 static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
2295 {
2296         struct iovec *iov = dio->inline_vecs;
2297
2298         if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
2299                 iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
2300                                     GFP_KERNEL);
2301                 if (unlikely(!iov))
2302                         return -ENOMEM;
2303
2304                 dio->free_iov = true;
2305         }
2306
2307         memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
2308         dio->iter.iov = iov;
2309         return 0;
2310 }
2311
2312 static void bch2_dio_write_flush_done(struct closure *cl)
2313 {
2314         struct dio_write *dio = container_of(cl, struct dio_write, op.cl);
2315         struct bch_fs *c = dio->op.c;
2316
2317         closure_debug_destroy(cl);
2318
2319         dio->op.error = bch2_journal_error(&c->journal);
2320
2321         bch2_dio_write_done(dio);
2322 }
2323
2324 static noinline void bch2_dio_write_flush(struct dio_write *dio)
2325 {
2326         struct bch_fs *c = dio->op.c;
2327         struct bch_inode_unpacked inode;
2328         int ret;
2329
2330         dio->flush = 0;
2331
2332         closure_init(&dio->op.cl, NULL);
2333
2334         if (!dio->op.error) {
2335                 ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
2336                 if (ret) {
2337                         dio->op.error = ret;
2338                 } else {
2339                         bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq, &dio->op.cl);
2340                         bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
2341                 }
2342         }
2343
2344         if (dio->sync) {
2345                 closure_sync(&dio->op.cl);
2346                 closure_debug_destroy(&dio->op.cl);
2347         } else {
2348                 continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
2349         }
2350 }
2351
2352 static __always_inline long bch2_dio_write_done(struct dio_write *dio)
2353 {
2354         struct kiocb *req = dio->req;
2355         struct bch_inode_info *inode = dio->inode;
2356         bool sync = dio->sync;
2357         long ret;
2358
2359         if (unlikely(dio->flush)) {
2360                 bch2_dio_write_flush(dio);
2361                 if (!sync)
2362                         return -EIOCBQUEUED;
2363         }
2364
2365         bch2_pagecache_block_put(inode);
2366
2367         if (dio->free_iov)
2368                 kfree(dio->iter.iov);
2369
2370         ret = dio->op.error ?: ((long) dio->written << 9);
2371         bio_put(&dio->op.wbio.bio);
2372
2373         /* inode->i_dio_count is our ref on inode and thus bch_fs */
2374         inode_dio_end(&inode->v);
2375
2376         if (ret < 0)
2377                 ret = bch2_err_class(ret);
2378
2379         if (!sync) {
2380                 req->ki_complete(req, ret);
2381                 ret = -EIOCBQUEUED;
2382         }
2383         return ret;
2384 }
2385
2386 static __always_inline void bch2_dio_write_end(struct dio_write *dio)
2387 {
2388         struct bch_fs *c = dio->op.c;
2389         struct kiocb *req = dio->req;
2390         struct bch_inode_info *inode = dio->inode;
2391         struct bio *bio = &dio->op.wbio.bio;
2392
2393         req->ki_pos     += (u64) dio->op.written << 9;
2394         dio->written    += dio->op.written;
2395
2396         if (dio->extending) {
2397                 spin_lock(&inode->v.i_lock);
2398                 if (req->ki_pos > inode->v.i_size)
2399                         i_size_write(&inode->v, req->ki_pos);
2400                 spin_unlock(&inode->v.i_lock);
2401         }
2402
2403         if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
2404                 mutex_lock(&inode->ei_quota_lock);
2405                 __i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
2406                 __bch2_quota_reservation_put(c, inode, &dio->quota_res);
2407                 mutex_unlock(&inode->ei_quota_lock);
2408         }
2409
2410         if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF))) {
2411                 struct bvec_iter_all iter;
2412                 struct folio_vec fv;
2413
2414                 bio_for_each_folio_all(fv, bio, iter)
2415                         folio_put(fv.fv_folio);
2416         }
2417
2418         if (unlikely(dio->op.error))
2419                 set_bit(EI_INODE_ERROR, &inode->ei_flags);
2420 }
2421
2422 static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
2423 {
2424         struct bch_fs *c = dio->op.c;
2425         struct kiocb *req = dio->req;
2426         struct address_space *mapping = dio->mapping;
2427         struct bch_inode_info *inode = dio->inode;
2428         struct bch_io_opts opts;
2429         struct bio *bio = &dio->op.wbio.bio;
2430         unsigned unaligned, iter_count;
2431         bool sync = dio->sync, dropped_locks;
2432         long ret;
2433
2434         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
2435
2436         while (1) {
2437                 iter_count = dio->iter.count;
2438
2439                 EBUG_ON(current->faults_disabled_mapping);
2440                 current->faults_disabled_mapping = mapping;
2441
2442                 ret = bio_iov_iter_get_pages(bio, &dio->iter);
2443
2444                 dropped_locks = fdm_dropped_locks();
2445
2446                 current->faults_disabled_mapping = NULL;
2447
2448                 /*
2449                  * If the fault handler returned an error but also signalled
2450                  * that it dropped & retook ei_pagecache_lock, we just need to
2451                  * re-shoot down the page cache and retry:
2452                  */
2453                 if (dropped_locks && ret)
2454                         ret = 0;
2455
2456                 if (unlikely(ret < 0))
2457                         goto err;
2458
2459                 if (unlikely(dropped_locks)) {
2460                         ret = write_invalidate_inode_pages_range(mapping,
2461                                         req->ki_pos,
2462                                         req->ki_pos + iter_count - 1);
2463                         if (unlikely(ret))
2464                                 goto err;
2465
2466                         if (!bio->bi_iter.bi_size)
2467                                 continue;
2468                 }
2469
2470                 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
2471                 bio->bi_iter.bi_size -= unaligned;
2472                 iov_iter_revert(&dio->iter, unaligned);
2473
2474                 if (!bio->bi_iter.bi_size) {
2475                         /*
2476                          * bio_iov_iter_get_pages was only able to get <
2477                          * blocksize worth of pages:
2478                          */
2479                         ret = -EFAULT;
2480                         goto err;
2481                 }
2482
2483                 bch2_write_op_init(&dio->op, c, opts);
2484                 dio->op.end_io          = sync
2485                         ? NULL
2486                         : bch2_dio_write_loop_async;
2487                 dio->op.target          = dio->op.opts.foreground_target;
2488                 dio->op.write_point     = writepoint_hashed((unsigned long) current);
2489                 dio->op.nr_replicas     = dio->op.opts.data_replicas;
2490                 dio->op.subvol          = inode->ei_subvol;
2491                 dio->op.pos             = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
2492                 dio->op.devs_need_flush = &inode->ei_devs_need_flush;
2493
2494                 if (sync)
2495                         dio->op.flags |= BCH_WRITE_SYNC;
2496                 dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
2497
2498                 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2499                                                  bio_sectors(bio), true);
2500                 if (unlikely(ret))
2501                         goto err;
2502
2503                 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
2504                                                 dio->op.opts.data_replicas, 0);
2505                 if (unlikely(ret) &&
2506                     !bch2_dio_write_check_allocated(dio))
2507                         goto err;
2508
2509                 task_io_account_write(bio->bi_iter.bi_size);
2510
2511                 if (unlikely(dio->iter.count) &&
2512                     !dio->sync &&
2513                     !dio->loop &&
2514                     bch2_dio_write_copy_iov(dio))
2515                         dio->sync = sync = true;
2516
2517                 dio->loop = true;
2518                 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
2519
2520                 if (!sync)
2521                         return -EIOCBQUEUED;
2522
2523                 bch2_dio_write_end(dio);
2524
2525                 if (likely(!dio->iter.count) || dio->op.error)
2526                         break;
2527
2528                 bio_reset(bio, NULL, REQ_OP_WRITE);
2529         }
2530 out:
2531         return bch2_dio_write_done(dio);
2532 err:
2533         dio->op.error = ret;
2534
2535         if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
2536                 struct bvec_iter_all iter;
2537                 struct folio_vec fv;
2538
2539                 bio_for_each_folio_all(fv, bio, iter)
2540                         folio_put(fv.fv_folio);
2541         }
2542
2543         bch2_quota_reservation_put(c, inode, &dio->quota_res);
2544         goto out;
2545 }
2546
2547 static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
2548 {
2549         struct mm_struct *mm = dio->mm;
2550
2551         bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
2552
2553         if (mm)
2554                 kthread_use_mm(mm);
2555         bch2_dio_write_loop(dio);
2556         if (mm)
2557                 kthread_unuse_mm(mm);
2558 }
2559
2560 static void bch2_dio_write_loop_async(struct bch_write_op *op)
2561 {
2562         struct dio_write *dio = container_of(op, struct dio_write, op);
2563
2564         bch2_dio_write_end(dio);
2565
2566         if (likely(!dio->iter.count) || dio->op.error)
2567                 bch2_dio_write_done(dio);
2568         else
2569                 bch2_dio_write_continue(dio);
2570 }
2571
2572 static noinline
2573 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
2574 {
2575         struct file *file = req->ki_filp;
2576         struct address_space *mapping = file->f_mapping;
2577         struct bch_inode_info *inode = file_bch_inode(file);
2578         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2579         struct dio_write *dio;
2580         struct bio *bio;
2581         bool locked = true, extending;
2582         ssize_t ret;
2583
2584         prefetch(&c->opts);
2585         prefetch((void *) &c->opts + 64);
2586         prefetch(&inode->ei_inode);
2587         prefetch((void *) &inode->ei_inode + 64);
2588
2589         inode_lock(&inode->v);
2590
2591         ret = generic_write_checks(req, iter);
2592         if (unlikely(ret <= 0))
2593                 goto err;
2594
2595         ret = file_remove_privs(file);
2596         if (unlikely(ret))
2597                 goto err;
2598
2599         ret = file_update_time(file);
2600         if (unlikely(ret))
2601                 goto err;
2602
2603         if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
2604                 goto err;
2605
2606         inode_dio_begin(&inode->v);
2607         bch2_pagecache_block_get(inode);
2608
2609         extending = req->ki_pos + iter->count > inode->v.i_size;
2610         if (!extending) {
2611                 inode_unlock(&inode->v);
2612                 locked = false;
2613         }
2614
2615         bio = bio_alloc_bioset(NULL,
2616                                bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2617                                REQ_OP_WRITE,
2618                                GFP_KERNEL,
2619                                &c->dio_write_bioset);
2620         dio = container_of(bio, struct dio_write, op.wbio.bio);
2621         dio->req                = req;
2622         dio->mapping            = mapping;
2623         dio->inode              = inode;
2624         dio->mm                 = current->mm;
2625         dio->loop               = false;
2626         dio->extending          = extending;
2627         dio->sync               = is_sync_kiocb(req) || extending;
2628         dio->flush              = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
2629         dio->free_iov           = false;
2630         dio->quota_res.sectors  = 0;
2631         dio->written            = 0;
2632         dio->iter               = *iter;
2633         dio->op.c               = c;
2634
2635         if (unlikely(mapping->nrpages)) {
2636                 ret = write_invalidate_inode_pages_range(mapping,
2637                                                 req->ki_pos,
2638                                                 req->ki_pos + iter->count - 1);
2639                 if (unlikely(ret))
2640                         goto err_put_bio;
2641         }
2642
2643         ret = bch2_dio_write_loop(dio);
2644 err:
2645         if (locked)
2646                 inode_unlock(&inode->v);
2647         return ret;
2648 err_put_bio:
2649         bch2_pagecache_block_put(inode);
2650         bio_put(bio);
2651         inode_dio_end(&inode->v);
2652         goto err;
2653 }
2654
2655 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2656 {
2657         struct file *file = iocb->ki_filp;
2658         struct bch_inode_info *inode = file_bch_inode(file);
2659         ssize_t ret;
2660
2661         if (iocb->ki_flags & IOCB_DIRECT) {
2662                 ret = bch2_direct_write(iocb, from);
2663                 goto out;
2664         }
2665
2666         /* We can write back this queue in page reclaim */
2667         current->backing_dev_info = inode_to_bdi(&inode->v);
2668         inode_lock(&inode->v);
2669
2670         ret = generic_write_checks(iocb, from);
2671         if (ret <= 0)
2672                 goto unlock;
2673
2674         ret = file_remove_privs(file);
2675         if (ret)
2676                 goto unlock;
2677
2678         ret = file_update_time(file);
2679         if (ret)
2680                 goto unlock;
2681
2682         ret = bch2_buffered_write(iocb, from);
2683         if (likely(ret > 0))
2684                 iocb->ki_pos += ret;
2685 unlock:
2686         inode_unlock(&inode->v);
2687         current->backing_dev_info = NULL;
2688
2689         if (ret > 0)
2690                 ret = generic_write_sync(iocb, ret);
2691 out:
2692         return bch2_err_class(ret);
2693 }
2694
2695 /* fsync: */
2696
2697 /*
2698  * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
2699  * insert trigger: look up the btree inode instead
2700  */
2701 static int bch2_flush_inode(struct bch_fs *c,
2702                             struct bch_inode_info *inode)
2703 {
2704         struct bch_inode_unpacked u;
2705         int ret;
2706
2707         if (c->opts.journal_flush_disabled)
2708                 return 0;
2709
2710         ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
2711         if (ret)
2712                 return ret;
2713
2714         return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
2715                 bch2_inode_flush_nocow_writes(c, inode);
2716 }
2717
2718 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2719 {
2720         struct bch_inode_info *inode = file_bch_inode(file);
2721         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2722         int ret, ret2, ret3;
2723
2724         ret = file_write_and_wait_range(file, start, end);
2725         ret2 = sync_inode_metadata(&inode->v, 1);
2726         ret3 = bch2_flush_inode(c, inode);
2727
2728         return bch2_err_class(ret ?: ret2 ?: ret3);
2729 }
2730
2731 /* truncate: */
2732
2733 static inline int range_has_data(struct bch_fs *c, u32 subvol,
2734                                  struct bpos start,
2735                                  struct bpos end)
2736 {
2737         struct btree_trans trans;
2738         struct btree_iter iter;
2739         struct bkey_s_c k;
2740         int ret = 0;
2741
2742         bch2_trans_init(&trans, c, 0, 0);
2743 retry:
2744         bch2_trans_begin(&trans);
2745
2746         ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
2747         if (ret)
2748                 goto err;
2749
2750         for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
2751                 if (bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k)) {
2752                         ret = 1;
2753                         break;
2754                 }
2755         start = iter.pos;
2756         bch2_trans_iter_exit(&trans, &iter);
2757 err:
2758         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2759                 goto retry;
2760
2761         bch2_trans_exit(&trans);
2762         return ret;
2763 }
2764
2765 static int __bch2_truncate_folio(struct bch_inode_info *inode,
2766                                  pgoff_t index, loff_t start, loff_t end)
2767 {
2768         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2769         struct address_space *mapping = inode->v.i_mapping;
2770         struct bch_folio *s;
2771         unsigned start_offset = start & (PAGE_SIZE - 1);
2772         unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2773         unsigned i;
2774         struct folio *folio;
2775         s64 i_sectors_delta = 0;
2776         int ret = 0;
2777         loff_t end_pos;
2778
2779         folio = filemap_lock_folio(mapping, index);
2780         if (!folio) {
2781                 /*
2782                  * XXX: we're doing two index lookups when we end up reading the
2783                  * folio
2784                  */
2785                 ret = range_has_data(c, inode->ei_subvol,
2786                                 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
2787                                 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
2788                 if (ret <= 0)
2789                         return ret;
2790
2791                 folio = __filemap_get_folio(mapping, index,
2792                                             FGP_LOCK|FGP_CREAT, GFP_KERNEL);
2793                 if (unlikely(!folio)) {
2794                         ret = -ENOMEM;
2795                         goto out;
2796                 }
2797         }
2798
2799         BUG_ON(start    >= folio_end_pos(folio));
2800         BUG_ON(end      <= folio_pos(folio));
2801
2802         start_offset    = max(start, folio_pos(folio)) - folio_pos(folio);
2803         end_offset      = min(end, folio_end_pos(folio)) - folio_pos(folio);
2804
2805         /* Folio boundary? Nothing to do */
2806         if (start_offset == 0 &&
2807             end_offset == folio_size(folio)) {
2808                 ret = 0;
2809                 goto unlock;
2810         }
2811
2812         s = bch2_folio_create(folio, 0);
2813         if (!s) {
2814                 ret = -ENOMEM;
2815                 goto unlock;
2816         }
2817
2818         if (!folio_test_uptodate(folio)) {
2819                 ret = bch2_read_single_folio(folio, mapping);
2820                 if (ret)
2821                         goto unlock;
2822         }
2823
2824         if (!s->uptodate) {
2825                 ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
2826                 if (ret)
2827                         goto unlock;
2828         }
2829
2830         for (i = round_up(start_offset, block_bytes(c)) >> 9;
2831              i < round_down(end_offset, block_bytes(c)) >> 9;
2832              i++) {
2833                 s->s[i].nr_replicas     = 0;
2834
2835                 i_sectors_delta -= s->s[i].state == SECTOR_dirty;
2836                 folio_sector_set(folio, s, i, SECTOR_unallocated);
2837         }
2838
2839         i_sectors_acct(c, inode, NULL, i_sectors_delta);
2840
2841         /*
2842          * Caller needs to know whether this folio will be written out by
2843          * writeback - doing an i_size update if necessary - or whether it will
2844          * be responsible for the i_size update.
2845          *
2846          * Note that we shouldn't ever see a folio beyond EOF, but check and
2847          * warn if so. This has been observed by failure to clean up folios
2848          * after a short write and there's still a chance reclaim will fix
2849          * things up.
2850          */
2851         WARN_ON_ONCE(folio_pos(folio) >= inode->v.i_size);
2852         end_pos = folio_end_pos(folio);
2853         if (inode->v.i_size > folio_pos(folio))
2854                 end_pos = min(inode->v.i_size, end_pos);
2855         ret = s->s[(end_pos - folio_pos(folio) - 1) >> 9].state >= SECTOR_dirty;
2856
2857         folio_zero_segment(folio, start_offset, end_offset);
2858
2859         /*
2860          * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2861          *
2862          * XXX: because we aren't currently tracking whether the folio has actual
2863          * data in it (vs. just 0s, or only partially written) this wrong. ick.
2864          */
2865         BUG_ON(bch2_get_folio_disk_reservation(c, inode, folio, false));
2866
2867         /*
2868          * This removes any writeable userspace mappings; we need to force
2869          * .page_mkwrite to be called again before any mmapped writes, to
2870          * redirty the full page:
2871          */
2872         folio_mkclean(folio);
2873         filemap_dirty_folio(mapping, folio);
2874 unlock:
2875         folio_unlock(folio);
2876         folio_put(folio);
2877 out:
2878         return ret;
2879 }
2880
2881 static int bch2_truncate_folio(struct bch_inode_info *inode, loff_t from)
2882 {
2883         return __bch2_truncate_folio(inode, from >> PAGE_SHIFT,
2884                                      from, ANYSINT_MAX(loff_t));
2885 }
2886
2887 static int bch2_truncate_folios(struct bch_inode_info *inode,
2888                                 loff_t start, loff_t end)
2889 {
2890         int ret = __bch2_truncate_folio(inode, start >> PAGE_SHIFT,
2891                                         start, end);
2892
2893         if (ret >= 0 &&
2894             start >> PAGE_SHIFT != end >> PAGE_SHIFT)
2895                 ret = __bch2_truncate_folio(inode,
2896                                         (end - 1) >> PAGE_SHIFT,
2897                                         start, end);
2898         return ret;
2899 }
2900
2901 static int bch2_extend(struct user_namespace *mnt_userns,
2902                        struct bch_inode_info *inode,
2903                        struct bch_inode_unpacked *inode_u,
2904                        struct iattr *iattr)
2905 {
2906         struct address_space *mapping = inode->v.i_mapping;
2907         int ret;
2908
2909         /*
2910          * sync appends:
2911          *
2912          * this has to be done _before_ extending i_size:
2913          */
2914         ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2915         if (ret)
2916                 return ret;
2917
2918         truncate_setsize(&inode->v, iattr->ia_size);
2919
2920         return bch2_setattr_nonsize(mnt_userns, inode, iattr);
2921 }
2922
2923 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2924                                    struct bch_inode_unpacked *bi,
2925                                    void *p)
2926 {
2927         bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2928         return 0;
2929 }
2930
2931 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2932                                   struct bch_inode_unpacked *bi, void *p)
2933 {
2934         u64 *new_i_size = p;
2935
2936         bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2937         bi->bi_size = *new_i_size;
2938         return 0;
2939 }
2940
2941 int bch2_truncate(struct user_namespace *mnt_userns,
2942                   struct bch_inode_info *inode, struct iattr *iattr)
2943 {
2944         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2945         struct address_space *mapping = inode->v.i_mapping;
2946         struct bch_inode_unpacked inode_u;
2947         u64 new_i_size = iattr->ia_size;
2948         s64 i_sectors_delta = 0;
2949         int ret = 0;
2950
2951         /*
2952          * If the truncate call with change the size of the file, the
2953          * cmtimes should be updated. If the size will not change, we
2954          * do not need to update the cmtimes.
2955          */
2956         if (iattr->ia_size != inode->v.i_size) {
2957                 if (!(iattr->ia_valid & ATTR_MTIME))
2958                         ktime_get_coarse_real_ts64(&iattr->ia_mtime);
2959                 if (!(iattr->ia_valid & ATTR_CTIME))
2960                         ktime_get_coarse_real_ts64(&iattr->ia_ctime);
2961                 iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
2962         }
2963
2964         inode_dio_wait(&inode->v);
2965         bch2_pagecache_block_get(inode);
2966
2967         ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
2968         if (ret)
2969                 goto err;
2970
2971         /*
2972          * check this before next assertion; on filesystem error our normal
2973          * invariants are a bit broken (truncate has to truncate the page cache
2974          * before the inode).
2975          */
2976         ret = bch2_journal_error(&c->journal);
2977         if (ret)
2978                 goto err;
2979
2980         WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
2981                   inode->v.i_size < inode_u.bi_size,
2982                   "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
2983                   (u64) inode->v.i_size, inode_u.bi_size);
2984
2985         if (iattr->ia_size > inode->v.i_size) {
2986                 ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
2987                 goto err;
2988         }
2989
2990         iattr->ia_valid &= ~ATTR_SIZE;
2991
2992         ret = bch2_truncate_folio(inode, iattr->ia_size);
2993         if (unlikely(ret < 0))
2994                 goto err;
2995
2996         /*
2997          * When extending, we're going to write the new i_size to disk
2998          * immediately so we need to flush anything above the current on disk
2999          * i_size first:
3000          *
3001          * Also, when extending we need to flush the page that i_size currently
3002          * straddles - if it's mapped to userspace, we need to ensure that
3003          * userspace has to redirty it and call .mkwrite -> set_page_dirty
3004          * again to allocate the part of the page that was extended.
3005          */
3006         if (iattr->ia_size > inode_u.bi_size)
3007                 ret = filemap_write_and_wait_range(mapping,
3008                                 inode_u.bi_size,
3009                                 iattr->ia_size - 1);
3010         else if (iattr->ia_size & (PAGE_SIZE - 1))
3011                 ret = filemap_write_and_wait_range(mapping,
3012                                 round_down(iattr->ia_size, PAGE_SIZE),
3013                                 iattr->ia_size - 1);
3014         if (ret)
3015                 goto err;
3016
3017         mutex_lock(&inode->ei_update_lock);
3018         ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
3019                                &new_i_size, 0);
3020         mutex_unlock(&inode->ei_update_lock);
3021
3022         if (unlikely(ret))
3023                 goto err;
3024
3025         truncate_setsize(&inode->v, iattr->ia_size);
3026
3027         ret = bch2_fpunch(c, inode_inum(inode),
3028                         round_up(iattr->ia_size, block_bytes(c)) >> 9,
3029                         U64_MAX, &i_sectors_delta);
3030         i_sectors_acct(c, inode, NULL, i_sectors_delta);
3031
3032         bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
3033                                 !bch2_journal_error(&c->journal), c,
3034                                 "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
3035                                 inode->v.i_ino, (u64) inode->v.i_blocks,
3036                                 inode->ei_inode.bi_sectors);
3037         if (unlikely(ret))
3038                 goto err;
3039
3040         mutex_lock(&inode->ei_update_lock);
3041         ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
3042         mutex_unlock(&inode->ei_update_lock);
3043
3044         ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
3045 err:
3046         bch2_pagecache_block_put(inode);
3047         return bch2_err_class(ret);
3048 }
3049
3050 /* fallocate: */
3051
3052 static int inode_update_times_fn(struct bch_inode_info *inode,
3053                                  struct bch_inode_unpacked *bi, void *p)
3054 {
3055         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3056
3057         bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
3058         return 0;
3059 }
3060
3061 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
3062 {
3063         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3064         u64 end         = offset + len;
3065         u64 block_start = round_up(offset, block_bytes(c));
3066         u64 block_end   = round_down(end, block_bytes(c));
3067         bool truncated_last_page;
3068         int ret = 0;
3069
3070         ret = bch2_truncate_folios(inode, offset, end);
3071         if (unlikely(ret < 0))
3072                 goto err;
3073
3074         truncated_last_page = ret;
3075
3076         truncate_pagecache_range(&inode->v, offset, end - 1);
3077
3078         if (block_start < block_end) {
3079                 s64 i_sectors_delta = 0;
3080
3081                 ret = bch2_fpunch(c, inode_inum(inode),
3082                                   block_start >> 9, block_end >> 9,
3083                                   &i_sectors_delta);
3084                 i_sectors_acct(c, inode, NULL, i_sectors_delta);
3085         }
3086
3087         mutex_lock(&inode->ei_update_lock);
3088         if (end >= inode->v.i_size && !truncated_last_page) {
3089                 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
3090                                             ATTR_MTIME|ATTR_CTIME);
3091         } else {
3092                 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
3093                                        ATTR_MTIME|ATTR_CTIME);
3094         }
3095         mutex_unlock(&inode->ei_update_lock);
3096 err:
3097         return ret;
3098 }
3099
3100 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
3101                                    loff_t offset, loff_t len,
3102                                    bool insert)
3103 {
3104         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3105         struct address_space *mapping = inode->v.i_mapping;
3106         struct bkey_buf copy;
3107         struct btree_trans trans;
3108         struct btree_iter src, dst, del;
3109         loff_t shift, new_size;
3110         u64 src_start;
3111         int ret = 0;
3112
3113         if ((offset | len) & (block_bytes(c) - 1))
3114                 return -EINVAL;
3115
3116         if (insert) {
3117                 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
3118                         return -EFBIG;
3119
3120                 if (offset >= inode->v.i_size)
3121                         return -EINVAL;
3122
3123                 src_start       = U64_MAX;
3124                 shift           = len;
3125         } else {
3126                 if (offset + len >= inode->v.i_size)
3127                         return -EINVAL;
3128
3129                 src_start       = offset + len;
3130                 shift           = -len;
3131         }
3132
3133         new_size = inode->v.i_size + shift;
3134
3135         ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
3136         if (ret)
3137                 return ret;
3138
3139         if (insert) {
3140                 i_size_write(&inode->v, new_size);
3141                 mutex_lock(&inode->ei_update_lock);
3142                 ret = bch2_write_inode_size(c, inode, new_size,
3143                                             ATTR_MTIME|ATTR_CTIME);
3144                 mutex_unlock(&inode->ei_update_lock);
3145         } else {
3146                 s64 i_sectors_delta = 0;
3147
3148                 ret = bch2_fpunch(c, inode_inum(inode),
3149                                   offset >> 9, (offset + len) >> 9,
3150                                   &i_sectors_delta);
3151                 i_sectors_acct(c, inode, NULL, i_sectors_delta);
3152
3153                 if (ret)
3154                         return ret;
3155         }
3156
3157         bch2_bkey_buf_init(&copy);
3158         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
3159         bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
3160                         POS(inode->v.i_ino, src_start >> 9),
3161                         BTREE_ITER_INTENT);
3162         bch2_trans_copy_iter(&dst, &src);
3163         bch2_trans_copy_iter(&del, &src);
3164
3165         while (ret == 0 ||
3166                bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
3167                 struct disk_reservation disk_res =
3168                         bch2_disk_reservation_init(c, 0);
3169                 struct bkey_i delete;
3170                 struct bkey_s_c k;
3171                 struct bpos next_pos;
3172                 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
3173                 struct bpos atomic_end;
3174                 unsigned trigger_flags = 0;
3175                 u32 snapshot;
3176
3177                 bch2_trans_begin(&trans);
3178
3179                 ret = bch2_subvolume_get_snapshot(&trans,
3180                                         inode->ei_subvol, &snapshot);
3181                 if (ret)
3182                         continue;
3183
3184                 bch2_btree_iter_set_snapshot(&src, snapshot);
3185                 bch2_btree_iter_set_snapshot(&dst, snapshot);
3186                 bch2_btree_iter_set_snapshot(&del, snapshot);
3187
3188                 bch2_trans_begin(&trans);
3189
3190                 k = insert
3191                         ? bch2_btree_iter_peek_prev(&src)
3192                         : bch2_btree_iter_peek_upto(&src, POS(inode->v.i_ino, U64_MAX));
3193                 if ((ret = bkey_err(k)))
3194                         continue;
3195
3196                 if (!k.k || k.k->p.inode != inode->v.i_ino)
3197                         break;
3198
3199                 if (insert &&
3200                     bkey_le(k.k->p, POS(inode->v.i_ino, offset >> 9)))
3201                         break;
3202 reassemble:
3203                 bch2_bkey_buf_reassemble(&copy, c, k);
3204
3205                 if (insert &&
3206                     bkey_lt(bkey_start_pos(k.k), move_pos))
3207                         bch2_cut_front(move_pos, copy.k);
3208
3209                 copy.k->k.p.offset += shift >> 9;
3210                 bch2_btree_iter_set_pos(&dst, bkey_start_pos(&copy.k->k));
3211
3212                 ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
3213                 if (ret)
3214                         continue;
3215
3216                 if (!bkey_eq(atomic_end, copy.k->k.p)) {
3217                         if (insert) {
3218                                 move_pos = atomic_end;
3219                                 move_pos.offset -= shift >> 9;
3220                                 goto reassemble;
3221                         } else {
3222                                 bch2_cut_back(atomic_end, copy.k);
3223                         }
3224                 }
3225
3226                 bkey_init(&delete.k);
3227                 delete.k.p = copy.k->k.p;
3228                 delete.k.size = copy.k->k.size;
3229                 delete.k.p.offset -= shift >> 9;
3230                 bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
3231
3232                 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
3233
3234                 if (copy.k->k.size != k.k->size) {
3235                         /* We might end up splitting compressed extents: */
3236                         unsigned nr_ptrs =
3237                                 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
3238
3239                         ret = bch2_disk_reservation_get(c, &disk_res,
3240                                         copy.k->k.size, nr_ptrs,
3241                                         BCH_DISK_RESERVATION_NOFAIL);
3242                         BUG_ON(ret);
3243                 }
3244
3245                 ret =   bch2_btree_iter_traverse(&del) ?:
3246                         bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
3247                         bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
3248                         bch2_trans_commit(&trans, &disk_res, NULL,
3249                                           BTREE_INSERT_NOFAIL);
3250                 bch2_disk_reservation_put(c, &disk_res);
3251
3252                 if (!ret)
3253                         bch2_btree_iter_set_pos(&src, next_pos);
3254         }
3255         bch2_trans_iter_exit(&trans, &del);
3256         bch2_trans_iter_exit(&trans, &dst);
3257         bch2_trans_iter_exit(&trans, &src);
3258         bch2_trans_exit(&trans);
3259         bch2_bkey_buf_exit(&copy, c);
3260
3261         if (ret)
3262                 return ret;
3263
3264         mutex_lock(&inode->ei_update_lock);
3265         if (!insert) {
3266                 i_size_write(&inode->v, new_size);
3267                 ret = bch2_write_inode_size(c, inode, new_size,
3268                                             ATTR_MTIME|ATTR_CTIME);
3269         } else {
3270                 /* We need an inode update to update bi_journal_seq for fsync: */
3271                 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
3272                                        ATTR_MTIME|ATTR_CTIME);
3273         }
3274         mutex_unlock(&inode->ei_update_lock);
3275         return ret;
3276 }
3277
3278 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
3279                              u64 start_sector, u64 end_sector)
3280 {
3281         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3282         struct btree_trans trans;
3283         struct btree_iter iter;
3284         struct bpos end_pos = POS(inode->v.i_ino, end_sector);
3285         struct bch_io_opts opts;
3286         int ret = 0;
3287
3288         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
3289         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
3290
3291         bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
3292                         POS(inode->v.i_ino, start_sector),
3293                         BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
3294
3295         while (!ret && bkey_lt(iter.pos, end_pos)) {
3296                 s64 i_sectors_delta = 0;
3297                 struct quota_res quota_res = { 0 };
3298                 struct bkey_s_c k;
3299                 unsigned sectors;
3300                 u32 snapshot;
3301
3302                 bch2_trans_begin(&trans);
3303
3304                 ret = bch2_subvolume_get_snapshot(&trans,
3305                                         inode->ei_subvol, &snapshot);
3306                 if (ret)
3307                         goto bkey_err;
3308
3309                 bch2_btree_iter_set_snapshot(&iter, snapshot);
3310
3311                 k = bch2_btree_iter_peek_slot(&iter);
3312                 if ((ret = bkey_err(k)))
3313                         goto bkey_err;
3314
3315                 /* already reserved */
3316                 if (bkey_extent_is_reservation(k) &&
3317                     bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
3318                         bch2_btree_iter_advance(&iter);
3319                         continue;
3320                 }
3321
3322                 if (bkey_extent_is_data(k.k) &&
3323                     !(mode & FALLOC_FL_ZERO_RANGE)) {
3324                         bch2_btree_iter_advance(&iter);
3325                         continue;
3326                 }
3327
3328                 /*
3329                  * XXX: for nocow mode, we should promote shared extents to
3330                  * unshared here
3331                  */
3332
3333                 sectors = bpos_min(k.k->p, end_pos).offset - iter.pos.offset;
3334
3335                 if (!bkey_extent_is_allocation(k.k)) {
3336                         ret = bch2_quota_reservation_add(c, inode,
3337                                         &quota_res,
3338                                         sectors, true);
3339                         if (unlikely(ret))
3340                                 goto bkey_err;
3341                 }
3342
3343                 ret = bch2_extent_fallocate(&trans, inode_inum(inode), &iter,
3344                                             sectors, opts, &i_sectors_delta,
3345                                             writepoint_hashed((unsigned long) current));
3346                 if (ret)
3347                         goto bkey_err;
3348
3349                 i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
3350 bkey_err:
3351                 bch2_quota_reservation_put(c, inode, &quota_res);
3352                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3353                         ret = 0;
3354         }
3355
3356         bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
3357         mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
3358
3359         if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
3360                 struct quota_res quota_res = { 0 };
3361                 s64 i_sectors_delta = 0;
3362
3363                 bch2_fpunch_at(&trans, &iter, inode_inum(inode),
3364                                end_sector, &i_sectors_delta);
3365                 i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
3366                 bch2_quota_reservation_put(c, inode, &quota_res);
3367         }
3368
3369         bch2_trans_iter_exit(&trans, &iter);
3370         bch2_trans_exit(&trans);
3371         return ret;
3372 }
3373
3374 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
3375                             loff_t offset, loff_t len)
3376 {
3377         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3378         u64 end         = offset + len;
3379         u64 block_start = round_down(offset,    block_bytes(c));
3380         u64 block_end   = round_up(end,         block_bytes(c));
3381         bool truncated_last_page = false;
3382         int ret, ret2 = 0;
3383
3384         if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
3385                 ret = inode_newsize_ok(&inode->v, end);
3386                 if (ret)
3387                         return ret;
3388         }
3389
3390         if (mode & FALLOC_FL_ZERO_RANGE) {
3391                 ret = bch2_truncate_folios(inode, offset, end);
3392                 if (unlikely(ret < 0))
3393                         return ret;
3394
3395                 truncated_last_page = ret;
3396
3397                 truncate_pagecache_range(&inode->v, offset, end - 1);
3398
3399                 block_start     = round_up(offset,      block_bytes(c));
3400                 block_end       = round_down(end,       block_bytes(c));
3401         }
3402
3403         ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
3404
3405         /*
3406          * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
3407          * so that the VFS cache i_size is consistent with the btree i_size:
3408          */
3409         if (ret &&
3410             !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
3411                 return ret;
3412
3413         if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
3414                 end = inode->v.i_size;
3415
3416         if (end >= inode->v.i_size &&
3417             (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
3418              !(mode & FALLOC_FL_KEEP_SIZE))) {
3419                 spin_lock(&inode->v.i_lock);
3420                 i_size_write(&inode->v, end);
3421                 spin_unlock(&inode->v.i_lock);
3422
3423                 mutex_lock(&inode->ei_update_lock);
3424                 ret2 = bch2_write_inode_size(c, inode, end, 0);
3425                 mutex_unlock(&inode->ei_update_lock);
3426         }
3427
3428         return ret ?: ret2;
3429 }
3430
3431 long bch2_fallocate_dispatch(struct file *file, int mode,
3432                              loff_t offset, loff_t len)
3433 {
3434         struct bch_inode_info *inode = file_bch_inode(file);
3435         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3436         long ret;
3437
3438         if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fallocate))
3439                 return -EROFS;
3440
3441         inode_lock(&inode->v);
3442         inode_dio_wait(&inode->v);
3443         bch2_pagecache_block_get(inode);
3444
3445         ret = file_modified(file);
3446         if (ret)
3447                 goto err;
3448
3449         if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
3450                 ret = bchfs_fallocate(inode, mode, offset, len);
3451         else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
3452                 ret = bchfs_fpunch(inode, offset, len);
3453         else if (mode == FALLOC_FL_INSERT_RANGE)
3454                 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
3455         else if (mode == FALLOC_FL_COLLAPSE_RANGE)
3456                 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
3457         else
3458                 ret = -EOPNOTSUPP;
3459 err:
3460         bch2_pagecache_block_put(inode);
3461         inode_unlock(&inode->v);
3462         bch2_write_ref_put(c, BCH_WRITE_REF_fallocate);
3463
3464         return bch2_err_class(ret);
3465 }
3466
3467 /*
3468  * Take a quota reservation for unallocated blocks in a given file range
3469  * Does not check pagecache
3470  */
3471 static int quota_reserve_range(struct bch_inode_info *inode,
3472                                struct quota_res *res,
3473                                u64 start, u64 end)
3474 {
3475         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3476         struct btree_trans trans;
3477         struct btree_iter iter;
3478         struct bkey_s_c k;
3479         u32 snapshot;
3480         u64 sectors = end - start;
3481         u64 pos = start;
3482         int ret;
3483
3484         bch2_trans_init(&trans, c, 0, 0);
3485 retry:
3486         bch2_trans_begin(&trans);
3487
3488         ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
3489         if (ret)
3490                 goto err;
3491
3492         bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
3493                              SPOS(inode->v.i_ino, pos, snapshot), 0);
3494
3495         while (!(ret = btree_trans_too_many_iters(&trans)) &&
3496                (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
3497                !(ret = bkey_err(k))) {
3498                 if (bkey_extent_is_allocation(k.k)) {
3499                         u64 s = min(end, k.k->p.offset) -
3500                                 max(start, bkey_start_offset(k.k));
3501                         BUG_ON(s > sectors);
3502                         sectors -= s;
3503                 }
3504                 bch2_btree_iter_advance(&iter);
3505         }
3506         pos = iter.pos.offset;
3507         bch2_trans_iter_exit(&trans, &iter);
3508 err:
3509         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3510                 goto retry;
3511
3512         bch2_trans_exit(&trans);
3513
3514         if (ret)
3515                 return ret;
3516
3517         return bch2_quota_reservation_add(c, inode, res, sectors, true);
3518 }
3519
3520 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
3521                              struct file *file_dst, loff_t pos_dst,
3522                              loff_t len, unsigned remap_flags)
3523 {
3524         struct bch_inode_info *src = file_bch_inode(file_src);
3525         struct bch_inode_info *dst = file_bch_inode(file_dst);
3526         struct bch_fs *c = src->v.i_sb->s_fs_info;
3527         struct quota_res quota_res = { 0 };
3528         s64 i_sectors_delta = 0;
3529         u64 aligned_len;
3530         loff_t ret = 0;
3531
3532         if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
3533                 return -EINVAL;
3534
3535         if (remap_flags & REMAP_FILE_DEDUP)
3536                 return -EOPNOTSUPP;
3537
3538         if ((pos_src & (block_bytes(c) - 1)) ||
3539             (pos_dst & (block_bytes(c) - 1)))
3540                 return -EINVAL;
3541
3542         if (src == dst &&
3543             abs(pos_src - pos_dst) < len)
3544                 return -EINVAL;
3545
3546         bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3547
3548         inode_dio_wait(&src->v);
3549         inode_dio_wait(&dst->v);
3550
3551         ret = generic_remap_file_range_prep(file_src, pos_src,
3552                                             file_dst, pos_dst,
3553                                             &len, remap_flags);
3554         if (ret < 0 || len == 0)
3555                 goto err;
3556
3557         aligned_len = round_up((u64) len, block_bytes(c));
3558
3559         ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
3560                                 pos_dst, pos_dst + len - 1);
3561         if (ret)
3562                 goto err;
3563
3564         ret = quota_reserve_range(dst, &quota_res, pos_dst >> 9,
3565                                   (pos_dst + aligned_len) >> 9);
3566         if (ret)
3567                 goto err;
3568
3569         file_update_time(file_dst);
3570
3571         mark_pagecache_unallocated(src, pos_src >> 9,
3572                                    (pos_src + aligned_len) >> 9);
3573
3574         ret = bch2_remap_range(c,
3575                                inode_inum(dst), pos_dst >> 9,
3576                                inode_inum(src), pos_src >> 9,
3577                                aligned_len >> 9,
3578                                pos_dst + len, &i_sectors_delta);
3579         if (ret < 0)
3580                 goto err;
3581
3582         /*
3583          * due to alignment, we might have remapped slightly more than requsted
3584          */
3585         ret = min((u64) ret << 9, (u64) len);
3586
3587         i_sectors_acct(c, dst, &quota_res, i_sectors_delta);
3588
3589         spin_lock(&dst->v.i_lock);
3590         if (pos_dst + ret > dst->v.i_size)
3591                 i_size_write(&dst->v, pos_dst + ret);
3592         spin_unlock(&dst->v.i_lock);
3593
3594         if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
3595             IS_SYNC(file_inode(file_dst)))
3596                 ret = bch2_flush_inode(c, dst);
3597 err:
3598         bch2_quota_reservation_put(c, dst, &quota_res);
3599         bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3600
3601         return bch2_err_class(ret);
3602 }
3603
3604 /* fseek: */
3605
3606 static int folio_data_offset(struct folio *folio, unsigned offset)
3607 {
3608         struct bch_folio *s = bch2_folio(folio);
3609         unsigned i, sectors = folio_sectors(folio);
3610
3611         if (s)
3612                 for (i = offset >> 9; i < sectors; i++)
3613                         if (s->s[i].state >= SECTOR_dirty)
3614                                 return i << 9;
3615
3616         return -1;
3617 }
3618
3619 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
3620                                        loff_t start_offset,
3621                                        loff_t end_offset)
3622 {
3623         struct folio_batch fbatch;
3624         pgoff_t start_index     = start_offset >> PAGE_SHIFT;
3625         pgoff_t end_index       = end_offset >> PAGE_SHIFT;
3626         pgoff_t index           = start_index;
3627         unsigned i;
3628         loff_t ret;
3629         int offset;
3630
3631         folio_batch_init(&fbatch);
3632
3633         while (filemap_get_folios(vinode->i_mapping,
3634                                   &index, end_index, &fbatch)) {
3635                 for (i = 0; i < folio_batch_count(&fbatch); i++) {
3636                         struct folio *folio = fbatch.folios[i];
3637
3638                         folio_lock(folio);
3639                         offset = folio_data_offset(folio,
3640                                         max(folio_pos(folio), start_offset) -
3641                                         folio_pos(folio));
3642                         if (offset >= 0) {
3643                                 ret = clamp(folio_pos(folio) + offset,
3644                                             start_offset, end_offset);
3645                                 folio_unlock(folio);
3646                                 folio_batch_release(&fbatch);
3647                                 return ret;
3648                         }
3649                         folio_unlock(folio);
3650                 }
3651                 folio_batch_release(&fbatch);
3652                 cond_resched();
3653         }
3654
3655         return end_offset;
3656 }
3657
3658 static loff_t bch2_seek_data(struct file *file, u64 offset)
3659 {
3660         struct bch_inode_info *inode = file_bch_inode(file);
3661         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3662         struct btree_trans trans;
3663         struct btree_iter iter;
3664         struct bkey_s_c k;
3665         subvol_inum inum = inode_inum(inode);
3666         u64 isize, next_data = MAX_LFS_FILESIZE;
3667         u32 snapshot;
3668         int ret;
3669
3670         isize = i_size_read(&inode->v);
3671         if (offset >= isize)
3672                 return -ENXIO;
3673
3674         bch2_trans_init(&trans, c, 0, 0);
3675 retry:
3676         bch2_trans_begin(&trans);
3677
3678         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3679         if (ret)
3680                 goto err;
3681
3682         for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents,
3683                            SPOS(inode->v.i_ino, offset >> 9, snapshot),
3684                            POS(inode->v.i_ino, U64_MAX),
3685                            0, k, ret) {
3686                 if (bkey_extent_is_data(k.k)) {
3687                         next_data = max(offset, bkey_start_offset(k.k) << 9);
3688                         break;
3689                 } else if (k.k->p.offset >> 9 > isize)
3690                         break;
3691         }
3692         bch2_trans_iter_exit(&trans, &iter);
3693 err:
3694         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3695                 goto retry;
3696
3697         bch2_trans_exit(&trans);
3698         if (ret)
3699                 return ret;
3700
3701         if (next_data > offset)
3702                 next_data = bch2_seek_pagecache_data(&inode->v,
3703                                                      offset, next_data);
3704
3705         if (next_data >= isize)
3706                 return -ENXIO;
3707
3708         return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
3709 }
3710
3711 static bool folio_hole_offset(struct address_space *mapping, loff_t *offset)
3712 {
3713         struct folio *folio;
3714         struct bch_folio *s;
3715         unsigned i, sectors, f_offset;
3716         bool ret = true;
3717
3718         folio = filemap_lock_folio(mapping, *offset >> PAGE_SHIFT);
3719         if (!folio)
3720                 return true;
3721
3722         s = bch2_folio(folio);
3723         if (!s)
3724                 goto unlock;
3725
3726         sectors = folio_sectors(folio);
3727         f_offset = *offset - folio_pos(folio);
3728
3729         for (i = f_offset >> 9; i < sectors; i++)
3730                 if (s->s[i].state < SECTOR_dirty) {
3731                         *offset = max(*offset, folio_pos(folio) + (i << 9));
3732                         goto unlock;
3733                 }
3734
3735         *offset = folio_end_pos(folio);
3736         ret = false;
3737 unlock:
3738         folio_unlock(folio);
3739         return ret;
3740 }
3741
3742 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3743                                        loff_t start_offset,
3744                                        loff_t end_offset)
3745 {
3746         struct address_space *mapping = vinode->i_mapping;
3747         loff_t offset = start_offset;
3748
3749         while (offset < end_offset &&
3750                !folio_hole_offset(mapping, &offset))
3751                 ;
3752
3753         return min(offset, end_offset);
3754 }
3755
3756 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3757 {
3758         struct bch_inode_info *inode = file_bch_inode(file);
3759         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3760         struct btree_trans trans;
3761         struct btree_iter iter;
3762         struct bkey_s_c k;
3763         subvol_inum inum = inode_inum(inode);
3764         u64 isize, next_hole = MAX_LFS_FILESIZE;
3765         u32 snapshot;
3766         int ret;
3767
3768         isize = i_size_read(&inode->v);
3769         if (offset >= isize)
3770                 return -ENXIO;
3771
3772         bch2_trans_init(&trans, c, 0, 0);
3773 retry:
3774         bch2_trans_begin(&trans);
3775
3776         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3777         if (ret)
3778                 goto err;
3779
3780         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3781                            SPOS(inode->v.i_ino, offset >> 9, snapshot),
3782                            BTREE_ITER_SLOTS, k, ret) {
3783                 if (k.k->p.inode != inode->v.i_ino) {
3784                         next_hole = bch2_seek_pagecache_hole(&inode->v,
3785                                         offset, MAX_LFS_FILESIZE);
3786                         break;
3787                 } else if (!bkey_extent_is_data(k.k)) {
3788                         next_hole = bch2_seek_pagecache_hole(&inode->v,
3789                                         max(offset, bkey_start_offset(k.k) << 9),
3790                                         k.k->p.offset << 9);
3791
3792                         if (next_hole < k.k->p.offset << 9)
3793                                 break;
3794                 } else {
3795                         offset = max(offset, bkey_start_offset(k.k) << 9);
3796                 }
3797         }
3798         bch2_trans_iter_exit(&trans, &iter);
3799 err:
3800         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3801                 goto retry;
3802
3803         bch2_trans_exit(&trans);
3804         if (ret)
3805                 return ret;
3806
3807         if (next_hole > isize)
3808                 next_hole = isize;
3809
3810         return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3811 }
3812
3813 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3814 {
3815         loff_t ret;
3816
3817         switch (whence) {
3818         case SEEK_SET:
3819         case SEEK_CUR:
3820         case SEEK_END:
3821                 ret = generic_file_llseek(file, offset, whence);
3822                 break;
3823         case SEEK_DATA:
3824                 ret = bch2_seek_data(file, offset);
3825                 break;
3826         case SEEK_HOLE:
3827                 ret = bch2_seek_hole(file, offset);
3828                 break;
3829         default:
3830                 ret = -EINVAL;
3831                 break;
3832         }
3833
3834         return bch2_err_class(ret);
3835 }
3836
3837 void bch2_fs_fsio_exit(struct bch_fs *c)
3838 {
3839         bioset_exit(&c->nocow_flush_bioset);
3840         bioset_exit(&c->dio_write_bioset);
3841         bioset_exit(&c->dio_read_bioset);
3842         bioset_exit(&c->writepage_bioset);
3843 }
3844
3845 int bch2_fs_fsio_init(struct bch_fs *c)
3846 {
3847         int ret = 0;
3848
3849         pr_verbose_init(c->opts, "");
3850
3851         if (bioset_init(&c->writepage_bioset,
3852                         4, offsetof(struct bch_writepage_io, op.wbio.bio),
3853                         BIOSET_NEED_BVECS))
3854                 return -BCH_ERR_ENOMEM_writepage_bioset_init;
3855
3856         if (bioset_init(&c->dio_read_bioset,
3857                         4, offsetof(struct dio_read, rbio.bio),
3858                         BIOSET_NEED_BVECS))
3859                 return -BCH_ERR_ENOMEM_dio_read_bioset_init;
3860
3861         if (bioset_init(&c->dio_write_bioset,
3862                         4, offsetof(struct dio_write, op.wbio.bio),
3863                         BIOSET_NEED_BVECS))
3864                 return -BCH_ERR_ENOMEM_dio_write_bioset_init;
3865
3866         if (bioset_init(&c->nocow_flush_bioset,
3867                         1, offsetof(struct nocow_flush, bio), 0))
3868                 return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
3869
3870         pr_verbose_init(c->opts, "ret %i", ret);
3871         return ret;
3872 }
3873
3874 #endif /* NO_BCACHEFS_FS */