]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/fs-io.c
Update bcachefs sources to 7c0fe6f104 bcachefs: Fix bch2_fsck_ask_yn()
[bcachefs-tools-debian] / libbcachefs / fs-io.c
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3
4 #include "bcachefs.h"
5 #include "alloc_foreground.h"
6 #include "bkey_buf.h"
7 #include "btree_update.h"
8 #include "buckets.h"
9 #include "clock.h"
10 #include "error.h"
11 #include "extents.h"
12 #include "extent_update.h"
13 #include "fs.h"
14 #include "fs-io.h"
15 #include "fsck.h"
16 #include "inode.h"
17 #include "journal.h"
18 #include "io.h"
19 #include "keylist.h"
20 #include "quota.h"
21 #include "reflink.h"
22 #include "trace.h"
23
24 #include <linux/aio.h>
25 #include <linux/backing-dev.h>
26 #include <linux/falloc.h>
27 #include <linux/migrate.h>
28 #include <linux/mmu_context.h>
29 #include <linux/pagevec.h>
30 #include <linux/rmap.h>
31 #include <linux/sched/signal.h>
32 #include <linux/task_io_accounting_ops.h>
33 #include <linux/uio.h>
34 #include <linux/writeback.h>
35
36 #include <trace/events/writeback.h>
37
38 /*
39  * Use u64 for the end pos and sector helpers because if the folio covers the
40  * max supported range of the mapping, the start offset of the next folio
41  * overflows loff_t. This breaks much of the range based processing in the
42  * buffered write path.
43  */
44 static inline u64 folio_end_pos(struct folio *folio)
45 {
46         return folio_pos(folio) + folio_size(folio);
47 }
48
49 static inline size_t folio_sectors(struct folio *folio)
50 {
51         return PAGE_SECTORS << folio_order(folio);
52 }
53
54 static inline loff_t folio_sector(struct folio *folio)
55 {
56         return folio_pos(folio) >> 9;
57 }
58
59 static inline u64 folio_end_sector(struct folio *folio)
60 {
61         return folio_end_pos(folio) >> 9;
62 }
63
64 typedef DARRAY(struct folio *) folios;
65
66 static int filemap_get_contig_folios_d(struct address_space *mapping,
67                                        loff_t start, u64 end,
68                                        int fgp_flags, gfp_t gfp,
69                                        folios *folios)
70 {
71         struct folio *f;
72         u64 pos = start;
73         int ret = 0;
74
75         while (pos < end) {
76                 if ((u64) pos >= (u64) start + (1ULL << 20))
77                         fgp_flags &= ~FGP_CREAT;
78
79                 ret = darray_make_room_gfp(folios, 1, gfp & GFP_KERNEL);
80                 if (ret)
81                         break;
82
83                 f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
84                 if (!f)
85                         break;
86
87                 BUG_ON(folios->nr && folio_pos(f) != pos);
88
89                 pos = folio_end_pos(f);
90                 darray_push(folios, f);
91         }
92
93         if (!folios->nr && !ret && (fgp_flags & FGP_CREAT))
94                 ret = -ENOMEM;
95
96         return folios->nr ? 0 : ret;
97 }
98
99 struct nocow_flush {
100         struct closure  *cl;
101         struct bch_dev  *ca;
102         struct bio      bio;
103 };
104
105 static void nocow_flush_endio(struct bio *_bio)
106 {
107
108         struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
109
110         closure_put(bio->cl);
111         percpu_ref_put(&bio->ca->io_ref);
112         bio_put(&bio->bio);
113 }
114
115 static void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
116                                                 struct bch_inode_info *inode,
117                                                 struct closure *cl)
118 {
119         struct nocow_flush *bio;
120         struct bch_dev *ca;
121         struct bch_devs_mask devs;
122         unsigned dev;
123
124         dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
125         if (dev == BCH_SB_MEMBERS_MAX)
126                 return;
127
128         devs = inode->ei_devs_need_flush;
129         memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
130
131         for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
132                 rcu_read_lock();
133                 ca = rcu_dereference(c->devs[dev]);
134                 if (ca && !percpu_ref_tryget(&ca->io_ref))
135                         ca = NULL;
136                 rcu_read_unlock();
137
138                 if (!ca)
139                         continue;
140
141                 bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
142                                                     REQ_OP_FLUSH,
143                                                     GFP_KERNEL,
144                                                     &c->nocow_flush_bioset),
145                                    struct nocow_flush, bio);
146                 bio->cl                 = cl;
147                 bio->ca                 = ca;
148                 bio->bio.bi_end_io      = nocow_flush_endio;
149                 closure_bio_submit(&bio->bio, cl);
150         }
151 }
152
153 static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
154                                          struct bch_inode_info *inode)
155 {
156         struct closure cl;
157
158         closure_init_stack(&cl);
159         bch2_inode_flush_nocow_writes_async(c, inode, &cl);
160         closure_sync(&cl);
161
162         return 0;
163 }
164
165 static inline bool bio_full(struct bio *bio, unsigned len)
166 {
167         if (bio->bi_vcnt >= bio->bi_max_vecs)
168                 return true;
169         if (bio->bi_iter.bi_size > UINT_MAX - len)
170                 return true;
171         return false;
172 }
173
174 static inline struct address_space *faults_disabled_mapping(void)
175 {
176         return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
177 }
178
179 static inline void set_fdm_dropped_locks(void)
180 {
181         current->faults_disabled_mapping =
182                 (void *) (((unsigned long) current->faults_disabled_mapping)|1);
183 }
184
185 static inline bool fdm_dropped_locks(void)
186 {
187         return ((unsigned long) current->faults_disabled_mapping) & 1;
188 }
189
190 struct quota_res {
191         u64                             sectors;
192 };
193
194 struct bch_writepage_io {
195         struct bch_inode_info           *inode;
196
197         /* must be last: */
198         struct bch_write_op             op;
199 };
200
201 struct dio_write {
202         struct kiocb                    *req;
203         struct address_space            *mapping;
204         struct bch_inode_info           *inode;
205         struct mm_struct                *mm;
206         unsigned                        loop:1,
207                                         extending:1,
208                                         sync:1,
209                                         flush:1,
210                                         free_iov:1;
211         struct quota_res                quota_res;
212         u64                             written;
213
214         struct iov_iter                 iter;
215         struct iovec                    inline_vecs[2];
216
217         /* must be last: */
218         struct bch_write_op             op;
219 };
220
221 struct dio_read {
222         struct closure                  cl;
223         struct kiocb                    *req;
224         long                            ret;
225         bool                            should_dirty;
226         struct bch_read_bio             rbio;
227 };
228
229 /* pagecache_block must be held */
230 static noinline int write_invalidate_inode_pages_range(struct address_space *mapping,
231                                               loff_t start, loff_t end)
232 {
233         int ret;
234
235         /*
236          * XXX: the way this is currently implemented, we can spin if a process
237          * is continually redirtying a specific page
238          */
239         do {
240                 if (!mapping->nrpages)
241                         return 0;
242
243                 ret = filemap_write_and_wait_range(mapping, start, end);
244                 if (ret)
245                         break;
246
247                 if (!mapping->nrpages)
248                         return 0;
249
250                 ret = invalidate_inode_pages2_range(mapping,
251                                 start >> PAGE_SHIFT,
252                                 end >> PAGE_SHIFT);
253         } while (ret == -EBUSY);
254
255         return ret;
256 }
257
258 /* quotas */
259
260 #ifdef CONFIG_BCACHEFS_QUOTA
261
262 static void __bch2_quota_reservation_put(struct bch_fs *c,
263                                          struct bch_inode_info *inode,
264                                          struct quota_res *res)
265 {
266         BUG_ON(res->sectors > inode->ei_quota_reserved);
267
268         bch2_quota_acct(c, inode->ei_qid, Q_SPC,
269                         -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
270         inode->ei_quota_reserved -= res->sectors;
271         res->sectors = 0;
272 }
273
274 static void bch2_quota_reservation_put(struct bch_fs *c,
275                                        struct bch_inode_info *inode,
276                                        struct quota_res *res)
277 {
278         if (res->sectors) {
279                 mutex_lock(&inode->ei_quota_lock);
280                 __bch2_quota_reservation_put(c, inode, res);
281                 mutex_unlock(&inode->ei_quota_lock);
282         }
283 }
284
285 static int bch2_quota_reservation_add(struct bch_fs *c,
286                                       struct bch_inode_info *inode,
287                                       struct quota_res *res,
288                                       u64 sectors,
289                                       bool check_enospc)
290 {
291         int ret;
292
293         if (test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags))
294                 return 0;
295
296         mutex_lock(&inode->ei_quota_lock);
297         ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
298                               check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
299         if (likely(!ret)) {
300                 inode->ei_quota_reserved += sectors;
301                 res->sectors += sectors;
302         }
303         mutex_unlock(&inode->ei_quota_lock);
304
305         return ret;
306 }
307
308 #else
309
310 static void __bch2_quota_reservation_put(struct bch_fs *c,
311                                          struct bch_inode_info *inode,
312                                          struct quota_res *res) {}
313
314 static void bch2_quota_reservation_put(struct bch_fs *c,
315                                        struct bch_inode_info *inode,
316                                        struct quota_res *res) {}
317
318 static int bch2_quota_reservation_add(struct bch_fs *c,
319                                       struct bch_inode_info *inode,
320                                       struct quota_res *res,
321                                       unsigned sectors,
322                                       bool check_enospc)
323 {
324         return 0;
325 }
326
327 #endif
328
329 /* i_size updates: */
330
331 struct inode_new_size {
332         loff_t          new_size;
333         u64             now;
334         unsigned        fields;
335 };
336
337 static int inode_set_size(struct bch_inode_info *inode,
338                           struct bch_inode_unpacked *bi,
339                           void *p)
340 {
341         struct inode_new_size *s = p;
342
343         bi->bi_size = s->new_size;
344         if (s->fields & ATTR_ATIME)
345                 bi->bi_atime = s->now;
346         if (s->fields & ATTR_MTIME)
347                 bi->bi_mtime = s->now;
348         if (s->fields & ATTR_CTIME)
349                 bi->bi_ctime = s->now;
350
351         return 0;
352 }
353
354 int __must_check bch2_write_inode_size(struct bch_fs *c,
355                                        struct bch_inode_info *inode,
356                                        loff_t new_size, unsigned fields)
357 {
358         struct inode_new_size s = {
359                 .new_size       = new_size,
360                 .now            = bch2_current_time(c),
361                 .fields         = fields,
362         };
363
364         return bch2_write_inode(c, inode, inode_set_size, &s, fields);
365 }
366
367 static void __i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
368                            struct quota_res *quota_res, s64 sectors)
369 {
370         bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
371                                 "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
372                                 inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
373                                 inode->ei_inode.bi_sectors);
374         inode->v.i_blocks += sectors;
375
376 #ifdef CONFIG_BCACHEFS_QUOTA
377         if (quota_res &&
378             !test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags) &&
379             sectors > 0) {
380                 BUG_ON(sectors > quota_res->sectors);
381                 BUG_ON(sectors > inode->ei_quota_reserved);
382
383                 quota_res->sectors -= sectors;
384                 inode->ei_quota_reserved -= sectors;
385         } else {
386                 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
387         }
388 #endif
389 }
390
391 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
392                            struct quota_res *quota_res, s64 sectors)
393 {
394         if (sectors) {
395                 mutex_lock(&inode->ei_quota_lock);
396                 __i_sectors_acct(c, inode, quota_res, sectors);
397                 mutex_unlock(&inode->ei_quota_lock);
398         }
399 }
400
401 /* page state: */
402
403 /* stored in page->private: */
404
405 #define BCH_FOLIO_SECTOR_STATE()        \
406         x(unallocated)                  \
407         x(reserved)                     \
408         x(dirty)                        \
409         x(dirty_reserved)               \
410         x(allocated)
411
412 enum bch_folio_sector_state {
413 #define x(n)    SECTOR_##n,
414         BCH_FOLIO_SECTOR_STATE()
415 #undef x
416 };
417
418 const char * const bch2_folio_sector_states[] = {
419 #define x(n)    #n,
420         BCH_FOLIO_SECTOR_STATE()
421 #undef x
422         NULL
423 };
424
425 static inline enum bch_folio_sector_state
426 folio_sector_dirty(enum bch_folio_sector_state state)
427 {
428         switch (state) {
429         case SECTOR_unallocated:
430                 return SECTOR_dirty;
431         case SECTOR_reserved:
432                 return SECTOR_dirty_reserved;
433         default:
434                 return state;
435         }
436 }
437
438 static inline enum bch_folio_sector_state
439 folio_sector_undirty(enum bch_folio_sector_state state)
440 {
441         switch (state) {
442         case SECTOR_dirty:
443                 return SECTOR_unallocated;
444         case SECTOR_dirty_reserved:
445                 return SECTOR_reserved;
446         default:
447                 return state;
448         }
449 }
450
451 static inline enum bch_folio_sector_state
452 folio_sector_reserve(enum bch_folio_sector_state state)
453 {
454         switch (state) {
455         case SECTOR_unallocated:
456                 return SECTOR_reserved;
457         case SECTOR_dirty:
458                 return SECTOR_dirty_reserved;
459         default:
460                 return state;
461         }
462 }
463
464 struct bch_folio_sector {
465         /* Uncompressed, fully allocated replicas (or on disk reservation): */
466         unsigned                nr_replicas:4;
467
468         /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
469         unsigned                replicas_reserved:4;
470
471         /* i_sectors: */
472         enum bch_folio_sector_state state:8;
473 };
474
475 struct bch_folio {
476         spinlock_t              lock;
477         atomic_t                write_count;
478         /*
479          * Is the sector state up to date with the btree?
480          * (Not the data itself)
481          */
482         bool                    uptodate;
483         struct bch_folio_sector s[];
484 };
485
486 static inline void folio_sector_set(struct folio *folio,
487                              struct bch_folio *s,
488                              unsigned i, unsigned n)
489 {
490         s->s[i].state = n;
491 }
492
493 /* file offset (to folio offset) to bch_folio_sector index */
494 static inline int folio_pos_to_s(struct folio *folio, loff_t pos)
495 {
496         u64 f_offset = pos - folio_pos(folio);
497         BUG_ON(pos < folio_pos(folio) || pos >= folio_end_pos(folio));
498         return f_offset >> SECTOR_SHIFT;
499 }
500
501 static inline struct bch_folio *__bch2_folio(struct folio *folio)
502 {
503         return folio_has_private(folio)
504                 ? (struct bch_folio *) folio_get_private(folio)
505                 : NULL;
506 }
507
508 static inline struct bch_folio *bch2_folio(struct folio *folio)
509 {
510         EBUG_ON(!folio_test_locked(folio));
511
512         return __bch2_folio(folio);
513 }
514
515 /* for newly allocated folios: */
516 static void __bch2_folio_release(struct folio *folio)
517 {
518         kfree(folio_detach_private(folio));
519 }
520
521 static void bch2_folio_release(struct folio *folio)
522 {
523         EBUG_ON(!folio_test_locked(folio));
524         __bch2_folio_release(folio);
525 }
526
527 /* for newly allocated folios: */
528 static struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp)
529 {
530         struct bch_folio *s;
531
532         s = kzalloc(sizeof(*s) +
533                     sizeof(struct bch_folio_sector) *
534                     folio_sectors(folio), gfp);
535         if (!s)
536                 return NULL;
537
538         spin_lock_init(&s->lock);
539         folio_attach_private(folio, s);
540         return s;
541 }
542
543 static struct bch_folio *bch2_folio_create(struct folio *folio, gfp_t gfp)
544 {
545         return bch2_folio(folio) ?: __bch2_folio_create(folio, gfp);
546 }
547
548 static unsigned bkey_to_sector_state(struct bkey_s_c k)
549 {
550         if (bkey_extent_is_reservation(k))
551                 return SECTOR_reserved;
552         if (bkey_extent_is_allocation(k.k))
553                 return SECTOR_allocated;
554         return SECTOR_unallocated;
555 }
556
557 static void __bch2_folio_set(struct folio *folio,
558                              unsigned pg_offset, unsigned pg_len,
559                              unsigned nr_ptrs, unsigned state)
560 {
561         struct bch_folio *s = bch2_folio(folio);
562         unsigned i, sectors = folio_sectors(folio);
563
564         BUG_ON(pg_offset >= sectors);
565         BUG_ON(pg_offset + pg_len > sectors);
566
567         spin_lock(&s->lock);
568
569         for (i = pg_offset; i < pg_offset + pg_len; i++) {
570                 s->s[i].nr_replicas     = nr_ptrs;
571                 folio_sector_set(folio, s, i, state);
572         }
573
574         if (i == sectors)
575                 s->uptodate = true;
576
577         spin_unlock(&s->lock);
578 }
579
580 /*
581  * Initialize bch_folio state (allocated/unallocated, nr_replicas) from the
582  * extents btree:
583  */
584 static int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
585                           struct folio **folios, unsigned nr_folios)
586 {
587         struct btree_trans trans;
588         struct btree_iter iter;
589         struct bkey_s_c k;
590         struct bch_folio *s;
591         u64 offset = folio_sector(folios[0]);
592         unsigned folio_idx;
593         u32 snapshot;
594         bool need_set = false;
595         int ret;
596
597         for (folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
598                 s = bch2_folio_create(folios[folio_idx], GFP_KERNEL);
599                 if (!s)
600                         return -ENOMEM;
601
602                 need_set |= !s->uptodate;
603         }
604
605         if (!need_set)
606                 return 0;
607
608         folio_idx = 0;
609         bch2_trans_init(&trans, c, 0, 0);
610 retry:
611         bch2_trans_begin(&trans);
612
613         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
614         if (ret)
615                 goto err;
616
617         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
618                            SPOS(inum.inum, offset, snapshot),
619                            BTREE_ITER_SLOTS, k, ret) {
620                 unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
621                 unsigned state = bkey_to_sector_state(k);
622
623                 while (folio_idx < nr_folios) {
624                         struct folio *folio = folios[folio_idx];
625                         u64 folio_start = folio_sector(folio);
626                         u64 folio_end   = folio_end_sector(folio);
627                         unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) - folio_start;
628                         unsigned folio_len = min(k.k->p.offset, folio_end) - folio_offset - folio_start;
629
630                         BUG_ON(k.k->p.offset < folio_start);
631                         BUG_ON(bkey_start_offset(k.k) > folio_end);
632
633                         if (!bch2_folio(folio)->uptodate)
634                                 __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
635
636                         if (k.k->p.offset < folio_end)
637                                 break;
638                         folio_idx++;
639                 }
640
641                 if (folio_idx == nr_folios)
642                         break;
643         }
644
645         offset = iter.pos.offset;
646         bch2_trans_iter_exit(&trans, &iter);
647 err:
648         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
649                 goto retry;
650         bch2_trans_exit(&trans);
651
652         return ret;
653 }
654
655 static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
656 {
657         struct bvec_iter iter;
658         struct folio_vec fv;
659         unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
660                 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
661         unsigned state = bkey_to_sector_state(k);
662
663         bio_for_each_folio(fv, bio, iter)
664                 __bch2_folio_set(fv.fv_folio,
665                                  fv.fv_offset >> 9,
666                                  fv.fv_len >> 9,
667                                  nr_ptrs, state);
668 }
669
670 static void mark_pagecache_unallocated(struct bch_inode_info *inode,
671                                        u64 start, u64 end)
672 {
673         pgoff_t index = start >> PAGE_SECTORS_SHIFT;
674         pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
675         struct folio_batch fbatch;
676         unsigned i, j;
677
678         if (end <= start)
679                 return;
680
681         folio_batch_init(&fbatch);
682
683         while (filemap_get_folios(inode->v.i_mapping,
684                                   &index, end_index, &fbatch)) {
685                 for (i = 0; i < folio_batch_count(&fbatch); i++) {
686                         struct folio *folio = fbatch.folios[i];
687                         u64 folio_start = folio_sector(folio);
688                         u64 folio_end = folio_end_sector(folio);
689                         unsigned folio_offset = max(start, folio_start) - folio_start;
690                         unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
691                         struct bch_folio *s;
692
693                         BUG_ON(end <= folio_start);
694
695                         folio_lock(folio);
696                         s = bch2_folio(folio);
697
698                         if (s) {
699                                 spin_lock(&s->lock);
700                                 for (j = folio_offset; j < folio_offset + folio_len; j++)
701                                         s->s[j].nr_replicas = 0;
702                                 spin_unlock(&s->lock);
703                         }
704
705                         folio_unlock(folio);
706                 }
707                 folio_batch_release(&fbatch);
708                 cond_resched();
709         }
710 }
711
712 static void mark_pagecache_reserved(struct bch_inode_info *inode,
713                                     u64 start, u64 end)
714 {
715         struct bch_fs *c = inode->v.i_sb->s_fs_info;
716         pgoff_t index = start >> PAGE_SECTORS_SHIFT;
717         pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
718         struct folio_batch fbatch;
719         s64 i_sectors_delta = 0;
720         unsigned i, j;
721
722         if (end <= start)
723                 return;
724
725         folio_batch_init(&fbatch);
726
727         while (filemap_get_folios(inode->v.i_mapping,
728                                   &index, end_index, &fbatch)) {
729                 for (i = 0; i < folio_batch_count(&fbatch); i++) {
730                         struct folio *folio = fbatch.folios[i];
731                         u64 folio_start = folio_sector(folio);
732                         u64 folio_end = folio_end_sector(folio);
733                         unsigned folio_offset = max(start, folio_start) - folio_start;
734                         unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
735                         struct bch_folio *s;
736
737                         BUG_ON(end <= folio_start);
738
739                         folio_lock(folio);
740                         s = bch2_folio(folio);
741
742                         if (s) {
743                                 spin_lock(&s->lock);
744                                 for (j = folio_offset; j < folio_offset + folio_len; j++) {
745                                         i_sectors_delta -= s->s[j].state == SECTOR_dirty;
746                                         folio_sector_set(folio, s, j, folio_sector_reserve(s->s[j].state));
747                                 }
748                                 spin_unlock(&s->lock);
749                         }
750
751                         folio_unlock(folio);
752                 }
753                 folio_batch_release(&fbatch);
754                 cond_resched();
755         }
756
757         i_sectors_acct(c, inode, NULL, i_sectors_delta);
758 }
759
760 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
761 {
762         /* XXX: this should not be open coded */
763         return inode->ei_inode.bi_data_replicas
764                 ? inode->ei_inode.bi_data_replicas - 1
765                 : c->opts.data_replicas;
766 }
767
768 static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
769                                           unsigned nr_replicas)
770 {
771         return max(0, (int) nr_replicas -
772                    s->nr_replicas -
773                    s->replicas_reserved);
774 }
775
776 static int bch2_get_folio_disk_reservation(struct bch_fs *c,
777                                 struct bch_inode_info *inode,
778                                 struct folio *folio, bool check_enospc)
779 {
780         struct bch_folio *s = bch2_folio_create(folio, 0);
781         unsigned nr_replicas = inode_nr_replicas(c, inode);
782         struct disk_reservation disk_res = { 0 };
783         unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0;
784         int ret;
785
786         if (!s)
787                 return -ENOMEM;
788
789         for (i = 0; i < sectors; i++)
790                 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
791
792         if (!disk_res_sectors)
793                 return 0;
794
795         ret = bch2_disk_reservation_get(c, &disk_res,
796                                         disk_res_sectors, 1,
797                                         !check_enospc
798                                         ? BCH_DISK_RESERVATION_NOFAIL
799                                         : 0);
800         if (unlikely(ret))
801                 return ret;
802
803         for (i = 0; i < sectors; i++)
804                 s->s[i].replicas_reserved +=
805                         sectors_to_reserve(&s->s[i], nr_replicas);
806
807         return 0;
808 }
809
810 struct bch2_folio_reservation {
811         struct disk_reservation disk;
812         struct quota_res        quota;
813 };
814
815 static void bch2_folio_reservation_init(struct bch_fs *c,
816                         struct bch_inode_info *inode,
817                         struct bch2_folio_reservation *res)
818 {
819         memset(res, 0, sizeof(*res));
820
821         res->disk.nr_replicas = inode_nr_replicas(c, inode);
822 }
823
824 static void bch2_folio_reservation_put(struct bch_fs *c,
825                         struct bch_inode_info *inode,
826                         struct bch2_folio_reservation *res)
827 {
828         bch2_disk_reservation_put(c, &res->disk);
829         bch2_quota_reservation_put(c, inode, &res->quota);
830 }
831
832 static int bch2_folio_reservation_get(struct bch_fs *c,
833                         struct bch_inode_info *inode,
834                         struct folio *folio,
835                         struct bch2_folio_reservation *res,
836                         unsigned offset, unsigned len)
837 {
838         struct bch_folio *s = bch2_folio_create(folio, 0);
839         unsigned i, disk_sectors = 0, quota_sectors = 0;
840         int ret;
841
842         if (!s)
843                 return -ENOMEM;
844
845         BUG_ON(!s->uptodate);
846
847         for (i = round_down(offset, block_bytes(c)) >> 9;
848              i < round_up(offset + len, block_bytes(c)) >> 9;
849              i++) {
850                 disk_sectors += sectors_to_reserve(&s->s[i],
851                                                 res->disk.nr_replicas);
852                 quota_sectors += s->s[i].state == SECTOR_unallocated;
853         }
854
855         if (disk_sectors) {
856                 ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0);
857                 if (unlikely(ret))
858                         return ret;
859         }
860
861         if (quota_sectors) {
862                 ret = bch2_quota_reservation_add(c, inode, &res->quota,
863                                                  quota_sectors, true);
864                 if (unlikely(ret)) {
865                         struct disk_reservation tmp = {
866                                 .sectors = disk_sectors
867                         };
868
869                         bch2_disk_reservation_put(c, &tmp);
870                         res->disk.sectors -= disk_sectors;
871                         return ret;
872                 }
873         }
874
875         return 0;
876 }
877
878 static void bch2_clear_folio_bits(struct folio *folio)
879 {
880         struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
881         struct bch_fs *c = inode->v.i_sb->s_fs_info;
882         struct bch_folio *s = bch2_folio(folio);
883         struct disk_reservation disk_res = { 0 };
884         int i, sectors = folio_sectors(folio), dirty_sectors = 0;
885
886         if (!s)
887                 return;
888
889         EBUG_ON(!folio_test_locked(folio));
890         EBUG_ON(folio_test_writeback(folio));
891
892         for (i = 0; i < sectors; i++) {
893                 disk_res.sectors += s->s[i].replicas_reserved;
894                 s->s[i].replicas_reserved = 0;
895
896                 dirty_sectors -= s->s[i].state == SECTOR_dirty;
897                 folio_sector_set(folio, s, i, folio_sector_undirty(s->s[i].state));
898         }
899
900         bch2_disk_reservation_put(c, &disk_res);
901
902         i_sectors_acct(c, inode, NULL, dirty_sectors);
903
904         bch2_folio_release(folio);
905 }
906
907 static void bch2_set_folio_dirty(struct bch_fs *c,
908                         struct bch_inode_info *inode,
909                         struct folio *folio,
910                         struct bch2_folio_reservation *res,
911                         unsigned offset, unsigned len)
912 {
913         struct bch_folio *s = bch2_folio(folio);
914         unsigned i, dirty_sectors = 0;
915
916         WARN_ON((u64) folio_pos(folio) + offset + len >
917                 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
918
919         BUG_ON(!s->uptodate);
920
921         spin_lock(&s->lock);
922
923         for (i = round_down(offset, block_bytes(c)) >> 9;
924              i < round_up(offset + len, block_bytes(c)) >> 9;
925              i++) {
926                 unsigned sectors = sectors_to_reserve(&s->s[i],
927                                                 res->disk.nr_replicas);
928
929                 /*
930                  * This can happen if we race with the error path in
931                  * bch2_writepage_io_done():
932                  */
933                 sectors = min_t(unsigned, sectors, res->disk.sectors);
934
935                 s->s[i].replicas_reserved += sectors;
936                 res->disk.sectors -= sectors;
937
938                 dirty_sectors += s->s[i].state == SECTOR_unallocated;
939
940                 folio_sector_set(folio, s, i, folio_sector_dirty(s->s[i].state));
941         }
942
943         spin_unlock(&s->lock);
944
945         i_sectors_acct(c, inode, &res->quota, dirty_sectors);
946
947         if (!folio_test_dirty(folio))
948                 filemap_dirty_folio(inode->v.i_mapping, folio);
949 }
950
951 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
952 {
953         struct file *file = vmf->vma->vm_file;
954         struct address_space *mapping = file->f_mapping;
955         struct address_space *fdm = faults_disabled_mapping();
956         struct bch_inode_info *inode = file_bch_inode(file);
957         int ret;
958
959         if (fdm == mapping)
960                 return VM_FAULT_SIGBUS;
961
962         /* Lock ordering: */
963         if (fdm > mapping) {
964                 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
965
966                 if (bch2_pagecache_add_tryget(inode))
967                         goto got_lock;
968
969                 bch2_pagecache_block_put(fdm_host);
970
971                 bch2_pagecache_add_get(inode);
972                 bch2_pagecache_add_put(inode);
973
974                 bch2_pagecache_block_get(fdm_host);
975
976                 /* Signal that lock has been dropped: */
977                 set_fdm_dropped_locks();
978                 return VM_FAULT_SIGBUS;
979         }
980
981         bch2_pagecache_add_get(inode);
982 got_lock:
983         ret = filemap_fault(vmf);
984         bch2_pagecache_add_put(inode);
985
986         return ret;
987 }
988
989 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
990 {
991         struct folio *folio = page_folio(vmf->page);
992         struct file *file = vmf->vma->vm_file;
993         struct bch_inode_info *inode = file_bch_inode(file);
994         struct address_space *mapping = file->f_mapping;
995         struct bch_fs *c = inode->v.i_sb->s_fs_info;
996         struct bch2_folio_reservation res;
997         unsigned len;
998         loff_t isize;
999         int ret;
1000
1001         bch2_folio_reservation_init(c, inode, &res);
1002
1003         sb_start_pagefault(inode->v.i_sb);
1004         file_update_time(file);
1005
1006         /*
1007          * Not strictly necessary, but helps avoid dio writes livelocking in
1008          * write_invalidate_inode_pages_range() - can drop this if/when we get
1009          * a write_invalidate_inode_pages_range() that works without dropping
1010          * page lock before invalidating page
1011          */
1012         bch2_pagecache_add_get(inode);
1013
1014         folio_lock(folio);
1015         isize = i_size_read(&inode->v);
1016
1017         if (folio->mapping != mapping || folio_pos(folio) >= isize) {
1018                 folio_unlock(folio);
1019                 ret = VM_FAULT_NOPAGE;
1020                 goto out;
1021         }
1022
1023         len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio));
1024
1025         if (bch2_folio_set(c, inode_inum(inode), &folio, 1) ?:
1026             bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
1027                 folio_unlock(folio);
1028                 ret = VM_FAULT_SIGBUS;
1029                 goto out;
1030         }
1031
1032         bch2_set_folio_dirty(c, inode, folio, &res, 0, len);
1033         bch2_folio_reservation_put(c, inode, &res);
1034
1035         folio_wait_stable(folio);
1036         ret = VM_FAULT_LOCKED;
1037 out:
1038         bch2_pagecache_add_put(inode);
1039         sb_end_pagefault(inode->v.i_sb);
1040
1041         return ret;
1042 }
1043
1044 void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1045 {
1046         if (offset || length < folio_size(folio))
1047                 return;
1048
1049         bch2_clear_folio_bits(folio);
1050 }
1051
1052 bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
1053 {
1054         if (folio_test_dirty(folio) || folio_test_writeback(folio))
1055                 return false;
1056
1057         bch2_clear_folio_bits(folio);
1058         return true;
1059 }
1060
1061 /* readpage(s): */
1062
1063 static void bch2_readpages_end_io(struct bio *bio)
1064 {
1065         struct bvec_iter_all iter;
1066         struct folio_vec fv;
1067
1068         bio_for_each_folio_all(fv, bio, iter) {
1069                 if (!bio->bi_status) {
1070                         folio_mark_uptodate(fv.fv_folio);
1071                 } else {
1072                         folio_clear_uptodate(fv.fv_folio);
1073                         folio_set_error(fv.fv_folio);
1074                 }
1075                 folio_unlock(fv.fv_folio);
1076         }
1077
1078         bio_put(bio);
1079 }
1080
1081 struct readpages_iter {
1082         struct address_space    *mapping;
1083         unsigned                idx;
1084         folios                  folios;
1085 };
1086
1087 static int readpages_iter_init(struct readpages_iter *iter,
1088                                struct readahead_control *ractl)
1089 {
1090         struct folio **fi;
1091         int ret;
1092
1093         memset(iter, 0, sizeof(*iter));
1094
1095         iter->mapping = ractl->mapping;
1096
1097         ret = filemap_get_contig_folios_d(iter->mapping,
1098                                 ractl->_index << PAGE_SHIFT,
1099                                 (ractl->_index + ractl->_nr_pages) << PAGE_SHIFT,
1100                                 0, mapping_gfp_mask(iter->mapping),
1101                                 &iter->folios);
1102         if (ret)
1103                 return ret;
1104
1105         darray_for_each(iter->folios, fi) {
1106                 ractl->_nr_pages -= 1U << folio_order(*fi);
1107                 __bch2_folio_create(*fi, __GFP_NOFAIL|GFP_KERNEL);
1108                 folio_put(*fi);
1109                 folio_put(*fi);
1110         }
1111
1112         return 0;
1113 }
1114
1115 static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
1116 {
1117         if (iter->idx >= iter->folios.nr)
1118                 return NULL;
1119         return iter->folios.data[iter->idx];
1120 }
1121
1122 static inline void readpage_iter_advance(struct readpages_iter *iter)
1123 {
1124         iter->idx++;
1125 }
1126
1127 static bool extent_partial_reads_expensive(struct bkey_s_c k)
1128 {
1129         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1130         struct bch_extent_crc_unpacked crc;
1131         const union bch_extent_entry *i;
1132
1133         bkey_for_each_crc(k.k, ptrs, crc, i)
1134                 if (crc.csum_type || crc.compression_type)
1135                         return true;
1136         return false;
1137 }
1138
1139 static int readpage_bio_extend(struct btree_trans *trans,
1140                                struct readpages_iter *iter,
1141                                struct bio *bio,
1142                                unsigned sectors_this_extent,
1143                                bool get_more)
1144 {
1145         /* Don't hold btree locks while allocating memory: */
1146         bch2_trans_unlock(trans);
1147
1148         while (bio_sectors(bio) < sectors_this_extent &&
1149                bio->bi_vcnt < bio->bi_max_vecs) {
1150                 struct folio *folio = readpage_iter_peek(iter);
1151                 int ret;
1152
1153                 if (folio) {
1154                         readpage_iter_advance(iter);
1155                 } else {
1156                         pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
1157
1158                         if (!get_more)
1159                                 break;
1160
1161                         folio = xa_load(&iter->mapping->i_pages, folio_offset);
1162                         if (folio && !xa_is_value(folio))
1163                                 break;
1164
1165                         folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
1166                         if (!folio)
1167                                 break;
1168
1169                         if (!__bch2_folio_create(folio, GFP_KERNEL)) {
1170                                 folio_put(folio);
1171                                 break;
1172                         }
1173
1174                         ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_KERNEL);
1175                         if (ret) {
1176                                 __bch2_folio_release(folio);
1177                                 folio_put(folio);
1178                                 break;
1179                         }
1180
1181                         folio_put(folio);
1182                 }
1183
1184                 BUG_ON(folio_sector(folio) != bio_end_sector(bio));
1185
1186                 BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
1187         }
1188
1189         return bch2_trans_relock(trans);
1190 }
1191
1192 static void bchfs_read(struct btree_trans *trans,
1193                        struct bch_read_bio *rbio,
1194                        subvol_inum inum,
1195                        struct readpages_iter *readpages_iter)
1196 {
1197         struct bch_fs *c = trans->c;
1198         struct btree_iter iter;
1199         struct bkey_buf sk;
1200         int flags = BCH_READ_RETRY_IF_STALE|
1201                 BCH_READ_MAY_PROMOTE;
1202         u32 snapshot;
1203         int ret = 0;
1204
1205         rbio->c = c;
1206         rbio->start_time = local_clock();
1207         rbio->subvol = inum.subvol;
1208
1209         bch2_bkey_buf_init(&sk);
1210 retry:
1211         bch2_trans_begin(trans);
1212         iter = (struct btree_iter) { NULL };
1213
1214         ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1215         if (ret)
1216                 goto err;
1217
1218         bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1219                              SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
1220                              BTREE_ITER_SLOTS);
1221         while (1) {
1222                 struct bkey_s_c k;
1223                 unsigned bytes, sectors, offset_into_extent;
1224                 enum btree_id data_btree = BTREE_ID_extents;
1225
1226                 /*
1227                  * read_extent -> io_time_reset may cause a transaction restart
1228                  * without returning an error, we need to check for that here:
1229                  */
1230                 ret = bch2_trans_relock(trans);
1231                 if (ret)
1232                         break;
1233
1234                 bch2_btree_iter_set_pos(&iter,
1235                                 POS(inum.inum, rbio->bio.bi_iter.bi_sector));
1236
1237                 k = bch2_btree_iter_peek_slot(&iter);
1238                 ret = bkey_err(k);
1239                 if (ret)
1240                         break;
1241
1242                 offset_into_extent = iter.pos.offset -
1243                         bkey_start_offset(k.k);
1244                 sectors = k.k->size - offset_into_extent;
1245
1246                 bch2_bkey_buf_reassemble(&sk, c, k);
1247
1248                 ret = bch2_read_indirect_extent(trans, &data_btree,
1249                                         &offset_into_extent, &sk);
1250                 if (ret)
1251                         break;
1252
1253                 k = bkey_i_to_s_c(sk.k);
1254
1255                 sectors = min(sectors, k.k->size - offset_into_extent);
1256
1257                 if (readpages_iter) {
1258                         ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
1259                                                   extent_partial_reads_expensive(k));
1260                         if (ret)
1261                                 break;
1262                 }
1263
1264                 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
1265                 swap(rbio->bio.bi_iter.bi_size, bytes);
1266
1267                 if (rbio->bio.bi_iter.bi_size == bytes)
1268                         flags |= BCH_READ_LAST_FRAGMENT;
1269
1270                 bch2_bio_page_state_set(&rbio->bio, k);
1271
1272                 bch2_read_extent(trans, rbio, iter.pos,
1273                                  data_btree, k, offset_into_extent, flags);
1274
1275                 if (flags & BCH_READ_LAST_FRAGMENT)
1276                         break;
1277
1278                 swap(rbio->bio.bi_iter.bi_size, bytes);
1279                 bio_advance(&rbio->bio, bytes);
1280
1281                 ret = btree_trans_too_many_iters(trans);
1282                 if (ret)
1283                         break;
1284         }
1285 err:
1286         bch2_trans_iter_exit(trans, &iter);
1287
1288         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1289                 goto retry;
1290
1291         if (ret) {
1292                 bch_err_inum_offset_ratelimited(c,
1293                                 iter.pos.inode,
1294                                 iter.pos.offset << 9,
1295                                 "read error %i from btree lookup", ret);
1296                 rbio->bio.bi_status = BLK_STS_IOERR;
1297                 bio_endio(&rbio->bio);
1298         }
1299
1300         bch2_bkey_buf_exit(&sk, c);
1301 }
1302
1303 void bch2_readahead(struct readahead_control *ractl)
1304 {
1305         struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
1306         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1307         struct bch_io_opts opts;
1308         struct btree_trans trans;
1309         struct folio *folio;
1310         struct readpages_iter readpages_iter;
1311         int ret;
1312
1313         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
1314
1315         ret = readpages_iter_init(&readpages_iter, ractl);
1316         BUG_ON(ret);
1317
1318         bch2_trans_init(&trans, c, 0, 0);
1319
1320         bch2_pagecache_add_get(inode);
1321
1322         while ((folio = readpage_iter_peek(&readpages_iter))) {
1323                 unsigned n = min_t(unsigned,
1324                                    readpages_iter.folios.nr -
1325                                    readpages_iter.idx,
1326                                    BIO_MAX_VECS);
1327                 struct bch_read_bio *rbio =
1328                         rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
1329                                                    GFP_KERNEL, &c->bio_read),
1330                                   opts);
1331
1332                 readpage_iter_advance(&readpages_iter);
1333
1334                 rbio->bio.bi_iter.bi_sector = folio_sector(folio);
1335                 rbio->bio.bi_end_io = bch2_readpages_end_io;
1336                 BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
1337
1338                 bchfs_read(&trans, rbio, inode_inum(inode),
1339                            &readpages_iter);
1340                 bch2_trans_unlock(&trans);
1341         }
1342
1343         bch2_pagecache_add_put(inode);
1344
1345         bch2_trans_exit(&trans);
1346         darray_exit(&readpages_iter.folios);
1347 }
1348
1349 static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio,
1350                              subvol_inum inum, struct folio *folio)
1351 {
1352         struct btree_trans trans;
1353
1354         bch2_folio_create(folio, __GFP_NOFAIL);
1355
1356         rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
1357         rbio->bio.bi_iter.bi_sector = folio_sector(folio);
1358         BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
1359
1360         bch2_trans_init(&trans, c, 0, 0);
1361         bchfs_read(&trans, rbio, inum, NULL);
1362         bch2_trans_exit(&trans);
1363 }
1364
1365 static void bch2_read_single_folio_end_io(struct bio *bio)
1366 {
1367         complete(bio->bi_private);
1368 }
1369
1370 static int bch2_read_single_folio(struct folio *folio,
1371                                   struct address_space *mapping)
1372 {
1373         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1374         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1375         struct bch_read_bio *rbio;
1376         struct bch_io_opts opts;
1377         int ret;
1378         DECLARE_COMPLETION_ONSTACK(done);
1379
1380         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
1381
1382         rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
1383                          opts);
1384         rbio->bio.bi_private = &done;
1385         rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
1386
1387         __bchfs_readfolio(c, rbio, inode_inum(inode), folio);
1388         wait_for_completion(&done);
1389
1390         ret = blk_status_to_errno(rbio->bio.bi_status);
1391         bio_put(&rbio->bio);
1392
1393         if (ret < 0)
1394                 return ret;
1395
1396         folio_mark_uptodate(folio);
1397         return 0;
1398 }
1399
1400 int bch2_read_folio(struct file *file, struct folio *folio)
1401 {
1402         int ret;
1403
1404         ret = bch2_read_single_folio(folio, folio->mapping);
1405         folio_unlock(folio);
1406         return bch2_err_class(ret);
1407 }
1408
1409 /* writepages: */
1410
1411 struct bch_writepage_state {
1412         struct bch_writepage_io *io;
1413         struct bch_io_opts      opts;
1414         struct bch_folio_sector *tmp;
1415         unsigned                tmp_sectors;
1416 };
1417
1418 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1419                                                                   struct bch_inode_info *inode)
1420 {
1421         struct bch_writepage_state ret = { 0 };
1422
1423         bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
1424         return ret;
1425 }
1426
1427 static void bch2_writepage_io_done(struct bch_write_op *op)
1428 {
1429         struct bch_writepage_io *io =
1430                 container_of(op, struct bch_writepage_io, op);
1431         struct bch_fs *c = io->op.c;
1432         struct bio *bio = &io->op.wbio.bio;
1433         struct bvec_iter_all iter;
1434         struct folio_vec fv;
1435         unsigned i;
1436
1437         if (io->op.error) {
1438                 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
1439
1440                 bio_for_each_folio_all(fv, bio, iter) {
1441                         struct bch_folio *s;
1442
1443                         folio_set_error(fv.fv_folio);
1444                         mapping_set_error(fv.fv_folio->mapping, -EIO);
1445
1446                         s = __bch2_folio(fv.fv_folio);
1447                         spin_lock(&s->lock);
1448                         for (i = 0; i < folio_sectors(fv.fv_folio); i++)
1449                                 s->s[i].nr_replicas = 0;
1450                         spin_unlock(&s->lock);
1451                 }
1452         }
1453
1454         if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1455                 bio_for_each_folio_all(fv, bio, iter) {
1456                         struct bch_folio *s;
1457
1458                         s = __bch2_folio(fv.fv_folio);
1459                         spin_lock(&s->lock);
1460                         for (i = 0; i < folio_sectors(fv.fv_folio); i++)
1461                                 s->s[i].nr_replicas = 0;
1462                         spin_unlock(&s->lock);
1463                 }
1464         }
1465
1466         /*
1467          * racing with fallocate can cause us to add fewer sectors than
1468          * expected - but we shouldn't add more sectors than expected:
1469          */
1470         WARN_ON_ONCE(io->op.i_sectors_delta > 0);
1471
1472         /*
1473          * (error (due to going RO) halfway through a page can screw that up
1474          * slightly)
1475          * XXX wtf?
1476            BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1477          */
1478
1479         /*
1480          * PageWriteback is effectively our ref on the inode - fixup i_blocks
1481          * before calling end_page_writeback:
1482          */
1483         i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1484
1485         bio_for_each_folio_all(fv, bio, iter) {
1486                 struct bch_folio *s = __bch2_folio(fv.fv_folio);
1487
1488                 if (atomic_dec_and_test(&s->write_count))
1489                         folio_end_writeback(fv.fv_folio);
1490         }
1491
1492         bio_put(&io->op.wbio.bio);
1493 }
1494
1495 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1496 {
1497         struct bch_writepage_io *io = w->io;
1498
1499         w->io = NULL;
1500         closure_call(&io->op.cl, bch2_write, NULL, NULL);
1501 }
1502
1503 /*
1504  * Get a bch_writepage_io and add @page to it - appending to an existing one if
1505  * possible, else allocating a new one:
1506  */
1507 static void bch2_writepage_io_alloc(struct bch_fs *c,
1508                                     struct writeback_control *wbc,
1509                                     struct bch_writepage_state *w,
1510                                     struct bch_inode_info *inode,
1511                                     u64 sector,
1512                                     unsigned nr_replicas)
1513 {
1514         struct bch_write_op *op;
1515
1516         w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
1517                                               REQ_OP_WRITE,
1518                                               GFP_KERNEL,
1519                                               &c->writepage_bioset),
1520                              struct bch_writepage_io, op.wbio.bio);
1521
1522         w->io->inode            = inode;
1523         op                      = &w->io->op;
1524         bch2_write_op_init(op, c, w->opts);
1525         op->target              = w->opts.foreground_target;
1526         op->nr_replicas         = nr_replicas;
1527         op->res.nr_replicas     = nr_replicas;
1528         op->write_point         = writepoint_hashed(inode->ei_last_dirtied);
1529         op->subvol              = inode->ei_subvol;
1530         op->pos                 = POS(inode->v.i_ino, sector);
1531         op->end_io              = bch2_writepage_io_done;
1532         op->devs_need_flush     = &inode->ei_devs_need_flush;
1533         op->wbio.bio.bi_iter.bi_sector = sector;
1534         op->wbio.bio.bi_opf     = wbc_to_write_flags(wbc);
1535 }
1536
1537 static int __bch2_writepage(struct folio *folio,
1538                             struct writeback_control *wbc,
1539                             void *data)
1540 {
1541         struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
1542         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1543         struct bch_writepage_state *w = data;
1544         struct bch_folio *s;
1545         unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
1546         loff_t i_size = i_size_read(&inode->v);
1547         int ret;
1548
1549         EBUG_ON(!folio_test_uptodate(folio));
1550
1551         /* Is the folio fully inside i_size? */
1552         if (folio_end_pos(folio) <= i_size)
1553                 goto do_io;
1554
1555         /* Is the folio fully outside i_size? (truncate in progress) */
1556         if (folio_pos(folio) >= i_size) {
1557                 folio_unlock(folio);
1558                 return 0;
1559         }
1560
1561         /*
1562          * The folio straddles i_size.  It must be zeroed out on each and every
1563          * writepage invocation because it may be mmapped.  "A file is mapped
1564          * in multiples of the folio size.  For a file that is not a multiple of
1565          * the  folio size, the remaining memory is zeroed when mapped, and
1566          * writes to that region are not written out to the file."
1567          */
1568         folio_zero_segment(folio,
1569                            i_size - folio_pos(folio),
1570                            folio_size(folio));
1571 do_io:
1572         f_sectors = folio_sectors(folio);
1573         s = bch2_folio(folio);
1574
1575         if (f_sectors > w->tmp_sectors) {
1576                 kfree(w->tmp);
1577                 w->tmp = kzalloc(sizeof(struct bch_folio_sector) *
1578                                  f_sectors, __GFP_NOFAIL);
1579                 w->tmp_sectors = f_sectors;
1580         }
1581
1582         /*
1583          * Things get really hairy with errors during writeback:
1584          */
1585         ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
1586         BUG_ON(ret);
1587
1588         /* Before unlocking the page, get copy of reservations: */
1589         spin_lock(&s->lock);
1590         memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
1591
1592         for (i = 0; i < f_sectors; i++) {
1593                 if (s->s[i].state < SECTOR_dirty)
1594                         continue;
1595
1596                 nr_replicas_this_write =
1597                         min_t(unsigned, nr_replicas_this_write,
1598                               s->s[i].nr_replicas +
1599                               s->s[i].replicas_reserved);
1600         }
1601
1602         for (i = 0; i < f_sectors; i++) {
1603                 if (s->s[i].state < SECTOR_dirty)
1604                         continue;
1605
1606                 s->s[i].nr_replicas = w->opts.compression
1607                         ? 0 : nr_replicas_this_write;
1608
1609                 s->s[i].replicas_reserved = 0;
1610                 folio_sector_set(folio, s, i, SECTOR_allocated);
1611         }
1612         spin_unlock(&s->lock);
1613
1614         BUG_ON(atomic_read(&s->write_count));
1615         atomic_set(&s->write_count, 1);
1616
1617         BUG_ON(folio_test_writeback(folio));
1618         folio_start_writeback(folio);
1619
1620         folio_unlock(folio);
1621
1622         offset = 0;
1623         while (1) {
1624                 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
1625                 u64 sector;
1626
1627                 while (offset < f_sectors &&
1628                        w->tmp[offset].state < SECTOR_dirty)
1629                         offset++;
1630
1631                 if (offset == f_sectors)
1632                         break;
1633
1634                 while (offset + sectors < f_sectors &&
1635                        w->tmp[offset + sectors].state >= SECTOR_dirty) {
1636                         reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
1637                         dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
1638                         sectors++;
1639                 }
1640                 BUG_ON(!sectors);
1641
1642                 sector = folio_sector(folio) + offset;
1643
1644                 if (w->io &&
1645                     (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1646                      bio_full(&w->io->op.wbio.bio, sectors << 9) ||
1647                      w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1648                      (BIO_MAX_VECS * PAGE_SIZE) ||
1649                      bio_end_sector(&w->io->op.wbio.bio) != sector))
1650                         bch2_writepage_do_io(w);
1651
1652                 if (!w->io)
1653                         bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1654                                                 nr_replicas_this_write);
1655
1656                 atomic_inc(&s->write_count);
1657
1658                 BUG_ON(inode != w->io->inode);
1659                 BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
1660                                      sectors << 9, offset << 9));
1661
1662                 /* Check for writing past i_size: */
1663                 WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1664                           round_up(i_size, block_bytes(c)) &&
1665                           !test_bit(BCH_FS_EMERGENCY_RO, &c->flags),
1666                           "writing past i_size: %llu > %llu (unrounded %llu)\n",
1667                           bio_end_sector(&w->io->op.wbio.bio) << 9,
1668                           round_up(i_size, block_bytes(c)),
1669                           i_size);
1670
1671                 w->io->op.res.sectors += reserved_sectors;
1672                 w->io->op.i_sectors_delta -= dirty_sectors;
1673                 w->io->op.new_i_size = i_size;
1674
1675                 offset += sectors;
1676         }
1677
1678         if (atomic_dec_and_test(&s->write_count))
1679                 folio_end_writeback(folio);
1680
1681         return 0;
1682 }
1683
1684 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1685 {
1686         struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1687         struct bch_writepage_state w =
1688                 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1689         struct blk_plug plug;
1690         int ret;
1691
1692         blk_start_plug(&plug);
1693         ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1694         if (w.io)
1695                 bch2_writepage_do_io(&w);
1696         blk_finish_plug(&plug);
1697         kfree(w.tmp);
1698         return bch2_err_class(ret);
1699 }
1700
1701 /* buffered writes: */
1702
1703 int bch2_write_begin(struct file *file, struct address_space *mapping,
1704                      loff_t pos, unsigned len,
1705                      struct page **pagep, void **fsdata)
1706 {
1707         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1708         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1709         struct bch2_folio_reservation *res;
1710         struct folio *folio;
1711         unsigned offset;
1712         int ret = -ENOMEM;
1713
1714         res = kmalloc(sizeof(*res), GFP_KERNEL);
1715         if (!res)
1716                 return -ENOMEM;
1717
1718         bch2_folio_reservation_init(c, inode, res);
1719         *fsdata = res;
1720
1721         bch2_pagecache_add_get(inode);
1722
1723         folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
1724                                 FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
1725                                 mapping_gfp_mask(mapping));
1726         if (!folio)
1727                 goto err_unlock;
1728
1729         if (folio_test_uptodate(folio))
1730                 goto out;
1731
1732         offset = pos - folio_pos(folio);
1733         len = min_t(size_t, len, folio_end_pos(folio) - pos);
1734
1735         /* If we're writing entire folio, don't need to read it in first: */
1736         if (!offset && len == folio_size(folio))
1737                 goto out;
1738
1739         if (!offset && pos + len >= inode->v.i_size) {
1740                 folio_zero_segment(folio, len, folio_size(folio));
1741                 flush_dcache_folio(folio);
1742                 goto out;
1743         }
1744
1745         if (folio_pos(folio) >= inode->v.i_size) {
1746                 folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
1747                 flush_dcache_folio(folio);
1748                 goto out;
1749         }
1750 readpage:
1751         ret = bch2_read_single_folio(folio, mapping);
1752         if (ret)
1753                 goto err;
1754 out:
1755         ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
1756         if (ret)
1757                 goto err;
1758
1759         ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
1760         if (ret) {
1761                 if (!folio_test_uptodate(folio)) {
1762                         /*
1763                          * If the folio hasn't been read in, we won't know if we
1764                          * actually need a reservation - we don't actually need
1765                          * to read here, we just need to check if the folio is
1766                          * fully backed by uncompressed data:
1767                          */
1768                         goto readpage;
1769                 }
1770
1771                 goto err;
1772         }
1773
1774         *pagep = &folio->page;
1775         return 0;
1776 err:
1777         folio_unlock(folio);
1778         folio_put(folio);
1779         *pagep = NULL;
1780 err_unlock:
1781         bch2_pagecache_add_put(inode);
1782         kfree(res);
1783         *fsdata = NULL;
1784         return bch2_err_class(ret);
1785 }
1786
1787 int bch2_write_end(struct file *file, struct address_space *mapping,
1788                    loff_t pos, unsigned len, unsigned copied,
1789                    struct page *page, void *fsdata)
1790 {
1791         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1792         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1793         struct bch2_folio_reservation *res = fsdata;
1794         struct folio *folio = page_folio(page);
1795         unsigned offset = pos - folio_pos(folio);
1796
1797         lockdep_assert_held(&inode->v.i_rwsem);
1798         BUG_ON(offset + copied > folio_size(folio));
1799
1800         if (unlikely(copied < len && !folio_test_uptodate(folio))) {
1801                 /*
1802                  * The folio needs to be read in, but that would destroy
1803                  * our partial write - simplest thing is to just force
1804                  * userspace to redo the write:
1805                  */
1806                 folio_zero_range(folio, 0, folio_size(folio));
1807                 flush_dcache_folio(folio);
1808                 copied = 0;
1809         }
1810
1811         spin_lock(&inode->v.i_lock);
1812         if (pos + copied > inode->v.i_size)
1813                 i_size_write(&inode->v, pos + copied);
1814         spin_unlock(&inode->v.i_lock);
1815
1816         if (copied) {
1817                 if (!folio_test_uptodate(folio))
1818                         folio_mark_uptodate(folio);
1819
1820                 bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
1821
1822                 inode->ei_last_dirtied = (unsigned long) current;
1823         }
1824
1825         folio_unlock(folio);
1826         folio_put(folio);
1827         bch2_pagecache_add_put(inode);
1828
1829         bch2_folio_reservation_put(c, inode, res);
1830         kfree(res);
1831
1832         return copied;
1833 }
1834
1835 static noinline void folios_trunc(folios *folios, struct folio **fi)
1836 {
1837         while (folios->data + folios->nr > fi) {
1838                 struct folio *f = darray_pop(folios);
1839
1840                 folio_unlock(f);
1841                 folio_put(f);
1842         }
1843 }
1844
1845 static int __bch2_buffered_write(struct bch_inode_info *inode,
1846                                  struct address_space *mapping,
1847                                  struct iov_iter *iter,
1848                                  loff_t pos, unsigned len)
1849 {
1850         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1851         struct bch2_folio_reservation res;
1852         folios folios;
1853         struct folio **fi, *f;
1854         unsigned copied = 0, f_offset;
1855         u64 end = pos + len, f_pos;
1856         loff_t last_folio_pos = inode->v.i_size;
1857         int ret = 0;
1858
1859         BUG_ON(!len);
1860
1861         bch2_folio_reservation_init(c, inode, &res);
1862         darray_init(&folios);
1863
1864         ret = filemap_get_contig_folios_d(mapping, pos, end,
1865                                    FGP_LOCK|FGP_WRITE|FGP_STABLE|FGP_CREAT,
1866                                    mapping_gfp_mask(mapping),
1867                                    &folios);
1868         if (ret)
1869                 goto out;
1870
1871         BUG_ON(!folios.nr);
1872
1873         f = darray_first(folios);
1874         if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
1875                 ret = bch2_read_single_folio(f, mapping);
1876                 if (ret)
1877                         goto out;
1878         }
1879
1880         f = darray_last(folios);
1881         end = min(end, folio_end_pos(f));
1882         last_folio_pos = folio_pos(f);
1883         if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
1884                 if (end >= inode->v.i_size) {
1885                         folio_zero_range(f, 0, folio_size(f));
1886                 } else {
1887                         ret = bch2_read_single_folio(f, mapping);
1888                         if (ret)
1889                                 goto out;
1890                 }
1891         }
1892
1893         ret = bch2_folio_set(c, inode_inum(inode), folios.data, folios.nr);
1894         if (ret)
1895                 goto out;
1896
1897         f_pos = pos;
1898         f_offset = pos - folio_pos(darray_first(folios));
1899         darray_for_each(folios, fi) {
1900                 struct folio *f = *fi;
1901                 u64 f_len = min(end, folio_end_pos(f)) - f_pos;
1902
1903                 /*
1904                  * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
1905                  * supposed to write as much as we have disk space for.
1906                  *
1907                  * On failure here we should still write out a partial page if
1908                  * we aren't completely out of disk space - we don't do that
1909                  * yet:
1910                  */
1911                 ret = bch2_folio_reservation_get(c, inode, f, &res, f_offset, f_len);
1912                 if (unlikely(ret)) {
1913                         folios_trunc(&folios, fi);
1914                         if (!folios.nr)
1915                                 goto out;
1916
1917                         end = min(end, folio_end_pos(darray_last(folios)));
1918                         break;
1919                 }
1920
1921                 f_pos = folio_end_pos(f);
1922                 f_offset = 0;
1923         }
1924
1925         if (mapping_writably_mapped(mapping))
1926                 darray_for_each(folios, fi)
1927                         flush_dcache_folio(*fi);
1928
1929         f_pos = pos;
1930         f_offset = pos - folio_pos(darray_first(folios));
1931         darray_for_each(folios, fi) {
1932                 struct folio *f = *fi;
1933                 u64 f_len = min(end, folio_end_pos(f)) - f_pos;
1934                 unsigned f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
1935
1936                 if (!f_copied) {
1937                         folios_trunc(&folios, fi);
1938                         break;
1939                 }
1940
1941                 if (!folio_test_uptodate(f) &&
1942                     f_copied != folio_size(f) &&
1943                     pos + copied + f_copied < inode->v.i_size) {
1944                         folio_zero_range(f, 0, folio_size(f));
1945                         folios_trunc(&folios, fi);
1946                         break;
1947                 }
1948
1949                 flush_dcache_folio(f);
1950                 copied += f_copied;
1951
1952                 if (f_copied != f_len) {
1953                         folios_trunc(&folios, fi + 1);
1954                         break;
1955                 }
1956
1957                 f_pos = folio_end_pos(f);
1958                 f_offset = 0;
1959         }
1960
1961         if (!copied)
1962                 goto out;
1963
1964         end = pos + copied;
1965
1966         spin_lock(&inode->v.i_lock);
1967         if (end > inode->v.i_size)
1968                 i_size_write(&inode->v, end);
1969         spin_unlock(&inode->v.i_lock);
1970
1971         f_pos = pos;
1972         f_offset = pos - folio_pos(darray_first(folios));
1973         darray_for_each(folios, fi) {
1974                 struct folio *f = *fi;
1975                 u64 f_len = min(end, folio_end_pos(f)) - f_pos;
1976
1977                 if (!folio_test_uptodate(f))
1978                         folio_mark_uptodate(f);
1979
1980                 bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
1981
1982                 f_pos = folio_end_pos(f);
1983                 f_offset = 0;
1984         }
1985
1986         inode->ei_last_dirtied = (unsigned long) current;
1987 out:
1988         darray_for_each(folios, fi) {
1989                 folio_unlock(*fi);
1990                 folio_put(*fi);
1991         }
1992
1993         /*
1994          * If the last folio added to the mapping starts beyond current EOF, we
1995          * performed a short write but left around at least one post-EOF folio.
1996          * Clean up the mapping before we return.
1997          */
1998         if (last_folio_pos >= inode->v.i_size)
1999                 truncate_pagecache(&inode->v, inode->v.i_size);
2000
2001         darray_exit(&folios);
2002         bch2_folio_reservation_put(c, inode, &res);
2003
2004         return copied ?: ret;
2005 }
2006
2007 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
2008 {
2009         struct file *file = iocb->ki_filp;
2010         struct address_space *mapping = file->f_mapping;
2011         struct bch_inode_info *inode = file_bch_inode(file);
2012         loff_t pos = iocb->ki_pos;
2013         ssize_t written = 0;
2014         int ret = 0;
2015
2016         bch2_pagecache_add_get(inode);
2017
2018         do {
2019                 unsigned offset = pos & (PAGE_SIZE - 1);
2020                 unsigned bytes = iov_iter_count(iter);
2021 again:
2022                 /*
2023                  * Bring in the user page that we will copy from _first_.
2024                  * Otherwise there's a nasty deadlock on copying from the
2025                  * same page as we're writing to, without it being marked
2026                  * up-to-date.
2027                  *
2028                  * Not only is this an optimisation, but it is also required
2029                  * to check that the address is actually valid, when atomic
2030                  * usercopies are used, below.
2031                  */
2032                 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
2033                         bytes = min_t(unsigned long, iov_iter_count(iter),
2034                                       PAGE_SIZE - offset);
2035
2036                         if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
2037                                 ret = -EFAULT;
2038                                 break;
2039                         }
2040                 }
2041
2042                 if (unlikely(fatal_signal_pending(current))) {
2043                         ret = -EINTR;
2044                         break;
2045                 }
2046
2047                 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
2048                 if (unlikely(ret < 0))
2049                         break;
2050
2051                 cond_resched();
2052
2053                 if (unlikely(ret == 0)) {
2054                         /*
2055                          * If we were unable to copy any data at all, we must
2056                          * fall back to a single segment length write.
2057                          *
2058                          * If we didn't fallback here, we could livelock
2059                          * because not all segments in the iov can be copied at
2060                          * once without a pagefault.
2061                          */
2062                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
2063                                       iov_iter_single_seg_count(iter));
2064                         goto again;
2065                 }
2066                 pos += ret;
2067                 written += ret;
2068                 ret = 0;
2069
2070                 balance_dirty_pages_ratelimited(mapping);
2071         } while (iov_iter_count(iter));
2072
2073         bch2_pagecache_add_put(inode);
2074
2075         return written ? written : ret;
2076 }
2077
2078 /* O_DIRECT reads */
2079
2080 static void bio_check_or_release(struct bio *bio, bool check_dirty)
2081 {
2082         if (check_dirty) {
2083                 bio_check_pages_dirty(bio);
2084         } else {
2085                 bio_release_pages(bio, false);
2086                 bio_put(bio);
2087         }
2088 }
2089
2090 static void bch2_dio_read_complete(struct closure *cl)
2091 {
2092         struct dio_read *dio = container_of(cl, struct dio_read, cl);
2093
2094         dio->req->ki_complete(dio->req, dio->ret);
2095         bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
2096 }
2097
2098 static void bch2_direct_IO_read_endio(struct bio *bio)
2099 {
2100         struct dio_read *dio = bio->bi_private;
2101
2102         if (bio->bi_status)
2103                 dio->ret = blk_status_to_errno(bio->bi_status);
2104
2105         closure_put(&dio->cl);
2106 }
2107
2108 static void bch2_direct_IO_read_split_endio(struct bio *bio)
2109 {
2110         struct dio_read *dio = bio->bi_private;
2111         bool should_dirty = dio->should_dirty;
2112
2113         bch2_direct_IO_read_endio(bio);
2114         bio_check_or_release(bio, should_dirty);
2115 }
2116
2117 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
2118 {
2119         struct file *file = req->ki_filp;
2120         struct bch_inode_info *inode = file_bch_inode(file);
2121         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2122         struct bch_io_opts opts;
2123         struct dio_read *dio;
2124         struct bio *bio;
2125         loff_t offset = req->ki_pos;
2126         bool sync = is_sync_kiocb(req);
2127         size_t shorten;
2128         ssize_t ret;
2129
2130         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
2131
2132         if ((offset|iter->count) & (block_bytes(c) - 1))
2133                 return -EINVAL;
2134
2135         ret = min_t(loff_t, iter->count,
2136                     max_t(loff_t, 0, i_size_read(&inode->v) - offset));
2137
2138         if (!ret)
2139                 return ret;
2140
2141         shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
2142         iter->count -= shorten;
2143
2144         bio = bio_alloc_bioset(NULL,
2145                                bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2146                                REQ_OP_READ,
2147                                GFP_KERNEL,
2148                                &c->dio_read_bioset);
2149
2150         bio->bi_end_io = bch2_direct_IO_read_endio;
2151
2152         dio = container_of(bio, struct dio_read, rbio.bio);
2153         closure_init(&dio->cl, NULL);
2154
2155         /*
2156          * this is a _really_ horrible hack just to avoid an atomic sub at the
2157          * end:
2158          */
2159         if (!sync) {
2160                 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
2161                 atomic_set(&dio->cl.remaining,
2162                            CLOSURE_REMAINING_INITIALIZER -
2163                            CLOSURE_RUNNING +
2164                            CLOSURE_DESTRUCTOR);
2165         } else {
2166                 atomic_set(&dio->cl.remaining,
2167                            CLOSURE_REMAINING_INITIALIZER + 1);
2168         }
2169
2170         dio->req        = req;
2171         dio->ret        = ret;
2172         /*
2173          * This is one of the sketchier things I've encountered: we have to skip
2174          * the dirtying of requests that are internal from the kernel (i.e. from
2175          * loopback), because we'll deadlock on page_lock.
2176          */
2177         dio->should_dirty = iter_is_iovec(iter);
2178
2179         goto start;
2180         while (iter->count) {
2181                 bio = bio_alloc_bioset(NULL,
2182                                        bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2183                                        REQ_OP_READ,
2184                                        GFP_KERNEL,
2185                                        &c->bio_read);
2186                 bio->bi_end_io          = bch2_direct_IO_read_split_endio;
2187 start:
2188                 bio->bi_opf             = REQ_OP_READ|REQ_SYNC;
2189                 bio->bi_iter.bi_sector  = offset >> 9;
2190                 bio->bi_private         = dio;
2191
2192                 ret = bio_iov_iter_get_pages(bio, iter);
2193                 if (ret < 0) {
2194                         /* XXX: fault inject this path */
2195                         bio->bi_status = BLK_STS_RESOURCE;
2196                         bio_endio(bio);
2197                         break;
2198                 }
2199
2200                 offset += bio->bi_iter.bi_size;
2201
2202                 if (dio->should_dirty)
2203                         bio_set_pages_dirty(bio);
2204
2205                 if (iter->count)
2206                         closure_get(&dio->cl);
2207
2208                 bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
2209         }
2210
2211         iter->count += shorten;
2212
2213         if (sync) {
2214                 closure_sync(&dio->cl);
2215                 closure_debug_destroy(&dio->cl);
2216                 ret = dio->ret;
2217                 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
2218                 return ret;
2219         } else {
2220                 return -EIOCBQUEUED;
2221         }
2222 }
2223
2224 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2225 {
2226         struct file *file = iocb->ki_filp;
2227         struct bch_inode_info *inode = file_bch_inode(file);
2228         struct address_space *mapping = file->f_mapping;
2229         size_t count = iov_iter_count(iter);
2230         ssize_t ret;
2231
2232         if (!count)
2233                 return 0; /* skip atime */
2234
2235         if (iocb->ki_flags & IOCB_DIRECT) {
2236                 struct blk_plug plug;
2237
2238                 if (unlikely(mapping->nrpages)) {
2239                         ret = filemap_write_and_wait_range(mapping,
2240                                                 iocb->ki_pos,
2241                                                 iocb->ki_pos + count - 1);
2242                         if (ret < 0)
2243                                 goto out;
2244                 }
2245
2246                 file_accessed(file);
2247
2248                 blk_start_plug(&plug);
2249                 ret = bch2_direct_IO_read(iocb, iter);
2250                 blk_finish_plug(&plug);
2251
2252                 if (ret >= 0)
2253                         iocb->ki_pos += ret;
2254         } else {
2255                 bch2_pagecache_add_get(inode);
2256                 ret = generic_file_read_iter(iocb, iter);
2257                 bch2_pagecache_add_put(inode);
2258         }
2259 out:
2260         return bch2_err_class(ret);
2261 }
2262
2263 /* O_DIRECT writes */
2264
2265 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
2266                                        u64 offset, u64 size,
2267                                        unsigned nr_replicas, bool compressed)
2268 {
2269         struct btree_trans trans;
2270         struct btree_iter iter;
2271         struct bkey_s_c k;
2272         u64 end = offset + size;
2273         u32 snapshot;
2274         bool ret = true;
2275         int err;
2276
2277         bch2_trans_init(&trans, c, 0, 0);
2278 retry:
2279         bch2_trans_begin(&trans);
2280
2281         err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
2282         if (err)
2283                 goto err;
2284
2285         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
2286                            SPOS(inum.inum, offset, snapshot),
2287                            BTREE_ITER_SLOTS, k, err) {
2288                 if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
2289                         break;
2290
2291                 if (k.k->p.snapshot != snapshot ||
2292                     nr_replicas > bch2_bkey_replicas(c, k) ||
2293                     (!compressed && bch2_bkey_sectors_compressed(k))) {
2294                         ret = false;
2295                         break;
2296                 }
2297         }
2298
2299         offset = iter.pos.offset;
2300         bch2_trans_iter_exit(&trans, &iter);
2301 err:
2302         if (bch2_err_matches(err, BCH_ERR_transaction_restart))
2303                 goto retry;
2304         bch2_trans_exit(&trans);
2305
2306         return err ? false : ret;
2307 }
2308
2309 static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
2310 {
2311         struct bch_fs *c = dio->op.c;
2312         struct bch_inode_info *inode = dio->inode;
2313         struct bio *bio = &dio->op.wbio.bio;
2314
2315         return bch2_check_range_allocated(c, inode_inum(inode),
2316                                 dio->op.pos.offset, bio_sectors(bio),
2317                                 dio->op.opts.data_replicas,
2318                                 dio->op.opts.compression != 0);
2319 }
2320
2321 static void bch2_dio_write_loop_async(struct bch_write_op *);
2322 static __always_inline long bch2_dio_write_done(struct dio_write *dio);
2323
2324 static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
2325 {
2326         struct iovec *iov = dio->inline_vecs;
2327
2328         if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
2329                 iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
2330                                     GFP_KERNEL);
2331                 if (unlikely(!iov))
2332                         return -ENOMEM;
2333
2334                 dio->free_iov = true;
2335         }
2336
2337         memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
2338         dio->iter.iov = iov;
2339         return 0;
2340 }
2341
2342 static void bch2_dio_write_flush_done(struct closure *cl)
2343 {
2344         struct dio_write *dio = container_of(cl, struct dio_write, op.cl);
2345         struct bch_fs *c = dio->op.c;
2346
2347         closure_debug_destroy(cl);
2348
2349         dio->op.error = bch2_journal_error(&c->journal);
2350
2351         bch2_dio_write_done(dio);
2352 }
2353
2354 static noinline void bch2_dio_write_flush(struct dio_write *dio)
2355 {
2356         struct bch_fs *c = dio->op.c;
2357         struct bch_inode_unpacked inode;
2358         int ret;
2359
2360         dio->flush = 0;
2361
2362         closure_init(&dio->op.cl, NULL);
2363
2364         if (!dio->op.error) {
2365                 ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
2366                 if (ret) {
2367                         dio->op.error = ret;
2368                 } else {
2369                         bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq, &dio->op.cl);
2370                         bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
2371                 }
2372         }
2373
2374         if (dio->sync) {
2375                 closure_sync(&dio->op.cl);
2376                 closure_debug_destroy(&dio->op.cl);
2377         } else {
2378                 continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
2379         }
2380 }
2381
2382 static __always_inline long bch2_dio_write_done(struct dio_write *dio)
2383 {
2384         struct kiocb *req = dio->req;
2385         struct bch_inode_info *inode = dio->inode;
2386         bool sync = dio->sync;
2387         long ret;
2388
2389         if (unlikely(dio->flush)) {
2390                 bch2_dio_write_flush(dio);
2391                 if (!sync)
2392                         return -EIOCBQUEUED;
2393         }
2394
2395         bch2_pagecache_block_put(inode);
2396
2397         if (dio->free_iov)
2398                 kfree(dio->iter.iov);
2399
2400         ret = dio->op.error ?: ((long) dio->written << 9);
2401         bio_put(&dio->op.wbio.bio);
2402
2403         /* inode->i_dio_count is our ref on inode and thus bch_fs */
2404         inode_dio_end(&inode->v);
2405
2406         if (ret < 0)
2407                 ret = bch2_err_class(ret);
2408
2409         if (!sync) {
2410                 req->ki_complete(req, ret);
2411                 ret = -EIOCBQUEUED;
2412         }
2413         return ret;
2414 }
2415
2416 static __always_inline void bch2_dio_write_end(struct dio_write *dio)
2417 {
2418         struct bch_fs *c = dio->op.c;
2419         struct kiocb *req = dio->req;
2420         struct bch_inode_info *inode = dio->inode;
2421         struct bio *bio = &dio->op.wbio.bio;
2422
2423         req->ki_pos     += (u64) dio->op.written << 9;
2424         dio->written    += dio->op.written;
2425
2426         if (dio->extending) {
2427                 spin_lock(&inode->v.i_lock);
2428                 if (req->ki_pos > inode->v.i_size)
2429                         i_size_write(&inode->v, req->ki_pos);
2430                 spin_unlock(&inode->v.i_lock);
2431         }
2432
2433         if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
2434                 mutex_lock(&inode->ei_quota_lock);
2435                 __i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
2436                 __bch2_quota_reservation_put(c, inode, &dio->quota_res);
2437                 mutex_unlock(&inode->ei_quota_lock);
2438         }
2439
2440         if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF))) {
2441                 struct bvec_iter_all iter;
2442                 struct folio_vec fv;
2443
2444                 bio_for_each_folio_all(fv, bio, iter)
2445                         folio_put(fv.fv_folio);
2446         }
2447
2448         if (unlikely(dio->op.error))
2449                 set_bit(EI_INODE_ERROR, &inode->ei_flags);
2450 }
2451
2452 static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
2453 {
2454         struct bch_fs *c = dio->op.c;
2455         struct kiocb *req = dio->req;
2456         struct address_space *mapping = dio->mapping;
2457         struct bch_inode_info *inode = dio->inode;
2458         struct bch_io_opts opts;
2459         struct bio *bio = &dio->op.wbio.bio;
2460         unsigned unaligned, iter_count;
2461         bool sync = dio->sync, dropped_locks;
2462         long ret;
2463
2464         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
2465
2466         while (1) {
2467                 iter_count = dio->iter.count;
2468
2469                 EBUG_ON(current->faults_disabled_mapping);
2470                 current->faults_disabled_mapping = mapping;
2471
2472                 ret = bio_iov_iter_get_pages(bio, &dio->iter);
2473
2474                 dropped_locks = fdm_dropped_locks();
2475
2476                 current->faults_disabled_mapping = NULL;
2477
2478                 /*
2479                  * If the fault handler returned an error but also signalled
2480                  * that it dropped & retook ei_pagecache_lock, we just need to
2481                  * re-shoot down the page cache and retry:
2482                  */
2483                 if (dropped_locks && ret)
2484                         ret = 0;
2485
2486                 if (unlikely(ret < 0))
2487                         goto err;
2488
2489                 if (unlikely(dropped_locks)) {
2490                         ret = write_invalidate_inode_pages_range(mapping,
2491                                         req->ki_pos,
2492                                         req->ki_pos + iter_count - 1);
2493                         if (unlikely(ret))
2494                                 goto err;
2495
2496                         if (!bio->bi_iter.bi_size)
2497                                 continue;
2498                 }
2499
2500                 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
2501                 bio->bi_iter.bi_size -= unaligned;
2502                 iov_iter_revert(&dio->iter, unaligned);
2503
2504                 if (!bio->bi_iter.bi_size) {
2505                         /*
2506                          * bio_iov_iter_get_pages was only able to get <
2507                          * blocksize worth of pages:
2508                          */
2509                         ret = -EFAULT;
2510                         goto err;
2511                 }
2512
2513                 bch2_write_op_init(&dio->op, c, opts);
2514                 dio->op.end_io          = sync
2515                         ? NULL
2516                         : bch2_dio_write_loop_async;
2517                 dio->op.target          = dio->op.opts.foreground_target;
2518                 dio->op.write_point     = writepoint_hashed((unsigned long) current);
2519                 dio->op.nr_replicas     = dio->op.opts.data_replicas;
2520                 dio->op.subvol          = inode->ei_subvol;
2521                 dio->op.pos             = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
2522                 dio->op.devs_need_flush = &inode->ei_devs_need_flush;
2523
2524                 if (sync)
2525                         dio->op.flags |= BCH_WRITE_SYNC;
2526                 dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
2527
2528                 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2529                                                  bio_sectors(bio), true);
2530                 if (unlikely(ret))
2531                         goto err;
2532
2533                 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
2534                                                 dio->op.opts.data_replicas, 0);
2535                 if (unlikely(ret) &&
2536                     !bch2_dio_write_check_allocated(dio))
2537                         goto err;
2538
2539                 task_io_account_write(bio->bi_iter.bi_size);
2540
2541                 if (unlikely(dio->iter.count) &&
2542                     !dio->sync &&
2543                     !dio->loop &&
2544                     bch2_dio_write_copy_iov(dio))
2545                         dio->sync = sync = true;
2546
2547                 dio->loop = true;
2548                 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
2549
2550                 if (!sync)
2551                         return -EIOCBQUEUED;
2552
2553                 bch2_dio_write_end(dio);
2554
2555                 if (likely(!dio->iter.count) || dio->op.error)
2556                         break;
2557
2558                 bio_reset(bio, NULL, REQ_OP_WRITE);
2559         }
2560 out:
2561         return bch2_dio_write_done(dio);
2562 err:
2563         dio->op.error = ret;
2564
2565         if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
2566                 struct bvec_iter_all iter;
2567                 struct folio_vec fv;
2568
2569                 bio_for_each_folio_all(fv, bio, iter)
2570                         folio_put(fv.fv_folio);
2571         }
2572
2573         bch2_quota_reservation_put(c, inode, &dio->quota_res);
2574         goto out;
2575 }
2576
2577 static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
2578 {
2579         struct mm_struct *mm = dio->mm;
2580
2581         bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
2582
2583         if (mm)
2584                 kthread_use_mm(mm);
2585         bch2_dio_write_loop(dio);
2586         if (mm)
2587                 kthread_unuse_mm(mm);
2588 }
2589
2590 static void bch2_dio_write_loop_async(struct bch_write_op *op)
2591 {
2592         struct dio_write *dio = container_of(op, struct dio_write, op);
2593
2594         bch2_dio_write_end(dio);
2595
2596         if (likely(!dio->iter.count) || dio->op.error)
2597                 bch2_dio_write_done(dio);
2598         else
2599                 bch2_dio_write_continue(dio);
2600 }
2601
2602 static noinline
2603 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
2604 {
2605         struct file *file = req->ki_filp;
2606         struct address_space *mapping = file->f_mapping;
2607         struct bch_inode_info *inode = file_bch_inode(file);
2608         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2609         struct dio_write *dio;
2610         struct bio *bio;
2611         bool locked = true, extending;
2612         ssize_t ret;
2613
2614         prefetch(&c->opts);
2615         prefetch((void *) &c->opts + 64);
2616         prefetch(&inode->ei_inode);
2617         prefetch((void *) &inode->ei_inode + 64);
2618
2619         inode_lock(&inode->v);
2620
2621         ret = generic_write_checks(req, iter);
2622         if (unlikely(ret <= 0))
2623                 goto err;
2624
2625         ret = file_remove_privs(file);
2626         if (unlikely(ret))
2627                 goto err;
2628
2629         ret = file_update_time(file);
2630         if (unlikely(ret))
2631                 goto err;
2632
2633         if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
2634                 goto err;
2635
2636         inode_dio_begin(&inode->v);
2637         bch2_pagecache_block_get(inode);
2638
2639         extending = req->ki_pos + iter->count > inode->v.i_size;
2640         if (!extending) {
2641                 inode_unlock(&inode->v);
2642                 locked = false;
2643         }
2644
2645         bio = bio_alloc_bioset(NULL,
2646                                bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2647                                REQ_OP_WRITE,
2648                                GFP_KERNEL,
2649                                &c->dio_write_bioset);
2650         dio = container_of(bio, struct dio_write, op.wbio.bio);
2651         dio->req                = req;
2652         dio->mapping            = mapping;
2653         dio->inode              = inode;
2654         dio->mm                 = current->mm;
2655         dio->loop               = false;
2656         dio->extending          = extending;
2657         dio->sync               = is_sync_kiocb(req) || extending;
2658         dio->flush              = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
2659         dio->free_iov           = false;
2660         dio->quota_res.sectors  = 0;
2661         dio->written            = 0;
2662         dio->iter               = *iter;
2663         dio->op.c               = c;
2664
2665         if (unlikely(mapping->nrpages)) {
2666                 ret = write_invalidate_inode_pages_range(mapping,
2667                                                 req->ki_pos,
2668                                                 req->ki_pos + iter->count - 1);
2669                 if (unlikely(ret))
2670                         goto err_put_bio;
2671         }
2672
2673         ret = bch2_dio_write_loop(dio);
2674 err:
2675         if (locked)
2676                 inode_unlock(&inode->v);
2677         return ret;
2678 err_put_bio:
2679         bch2_pagecache_block_put(inode);
2680         bio_put(bio);
2681         inode_dio_end(&inode->v);
2682         goto err;
2683 }
2684
2685 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2686 {
2687         struct file *file = iocb->ki_filp;
2688         struct bch_inode_info *inode = file_bch_inode(file);
2689         ssize_t ret;
2690
2691         if (iocb->ki_flags & IOCB_DIRECT) {
2692                 ret = bch2_direct_write(iocb, from);
2693                 goto out;
2694         }
2695
2696         /* We can write back this queue in page reclaim */
2697         current->backing_dev_info = inode_to_bdi(&inode->v);
2698         inode_lock(&inode->v);
2699
2700         ret = generic_write_checks(iocb, from);
2701         if (ret <= 0)
2702                 goto unlock;
2703
2704         ret = file_remove_privs(file);
2705         if (ret)
2706                 goto unlock;
2707
2708         ret = file_update_time(file);
2709         if (ret)
2710                 goto unlock;
2711
2712         ret = bch2_buffered_write(iocb, from);
2713         if (likely(ret > 0))
2714                 iocb->ki_pos += ret;
2715 unlock:
2716         inode_unlock(&inode->v);
2717         current->backing_dev_info = NULL;
2718
2719         if (ret > 0)
2720                 ret = generic_write_sync(iocb, ret);
2721 out:
2722         return bch2_err_class(ret);
2723 }
2724
2725 /* fsync: */
2726
2727 /*
2728  * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
2729  * insert trigger: look up the btree inode instead
2730  */
2731 static int bch2_flush_inode(struct bch_fs *c,
2732                             struct bch_inode_info *inode)
2733 {
2734         struct bch_inode_unpacked u;
2735         int ret;
2736
2737         if (c->opts.journal_flush_disabled)
2738                 return 0;
2739
2740         ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
2741         if (ret)
2742                 return ret;
2743
2744         return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
2745                 bch2_inode_flush_nocow_writes(c, inode);
2746 }
2747
2748 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2749 {
2750         struct bch_inode_info *inode = file_bch_inode(file);
2751         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2752         int ret, ret2, ret3;
2753
2754         ret = file_write_and_wait_range(file, start, end);
2755         ret2 = sync_inode_metadata(&inode->v, 1);
2756         ret3 = bch2_flush_inode(c, inode);
2757
2758         return bch2_err_class(ret ?: ret2 ?: ret3);
2759 }
2760
2761 /* truncate: */
2762
2763 static inline int range_has_data(struct bch_fs *c, u32 subvol,
2764                                  struct bpos start,
2765                                  struct bpos end)
2766 {
2767         struct btree_trans trans;
2768         struct btree_iter iter;
2769         struct bkey_s_c k;
2770         int ret = 0;
2771
2772         bch2_trans_init(&trans, c, 0, 0);
2773 retry:
2774         bch2_trans_begin(&trans);
2775
2776         ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
2777         if (ret)
2778                 goto err;
2779
2780         for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
2781                 if (bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k)) {
2782                         ret = 1;
2783                         break;
2784                 }
2785         start = iter.pos;
2786         bch2_trans_iter_exit(&trans, &iter);
2787 err:
2788         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2789                 goto retry;
2790
2791         bch2_trans_exit(&trans);
2792         return ret;
2793 }
2794
2795 static int __bch2_truncate_folio(struct bch_inode_info *inode,
2796                                  pgoff_t index, loff_t start, loff_t end)
2797 {
2798         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2799         struct address_space *mapping = inode->v.i_mapping;
2800         struct bch_folio *s;
2801         unsigned start_offset = start & (PAGE_SIZE - 1);
2802         unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2803         unsigned i;
2804         struct folio *folio;
2805         s64 i_sectors_delta = 0;
2806         int ret = 0;
2807         u64 end_pos;
2808
2809         folio = filemap_lock_folio(mapping, index);
2810         if (!folio) {
2811                 /*
2812                  * XXX: we're doing two index lookups when we end up reading the
2813                  * folio
2814                  */
2815                 ret = range_has_data(c, inode->ei_subvol,
2816                                 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
2817                                 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
2818                 if (ret <= 0)
2819                         return ret;
2820
2821                 folio = __filemap_get_folio(mapping, index,
2822                                             FGP_LOCK|FGP_CREAT, GFP_KERNEL);
2823                 if (unlikely(!folio)) {
2824                         ret = -ENOMEM;
2825                         goto out;
2826                 }
2827         }
2828
2829         BUG_ON(start    >= folio_end_pos(folio));
2830         BUG_ON(end      <= folio_pos(folio));
2831
2832         start_offset    = max(start, folio_pos(folio)) - folio_pos(folio);
2833         end_offset      = min_t(u64, end, folio_end_pos(folio)) - folio_pos(folio);
2834
2835         /* Folio boundary? Nothing to do */
2836         if (start_offset == 0 &&
2837             end_offset == folio_size(folio)) {
2838                 ret = 0;
2839                 goto unlock;
2840         }
2841
2842         s = bch2_folio_create(folio, 0);
2843         if (!s) {
2844                 ret = -ENOMEM;
2845                 goto unlock;
2846         }
2847
2848         if (!folio_test_uptodate(folio)) {
2849                 ret = bch2_read_single_folio(folio, mapping);
2850                 if (ret)
2851                         goto unlock;
2852         }
2853
2854         ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
2855         if (ret)
2856                 goto unlock;
2857
2858         for (i = round_up(start_offset, block_bytes(c)) >> 9;
2859              i < round_down(end_offset, block_bytes(c)) >> 9;
2860              i++) {
2861                 s->s[i].nr_replicas     = 0;
2862
2863                 i_sectors_delta -= s->s[i].state == SECTOR_dirty;
2864                 folio_sector_set(folio, s, i, SECTOR_unallocated);
2865         }
2866
2867         i_sectors_acct(c, inode, NULL, i_sectors_delta);
2868
2869         /*
2870          * Caller needs to know whether this folio will be written out by
2871          * writeback - doing an i_size update if necessary - or whether it will
2872          * be responsible for the i_size update.
2873          *
2874          * Note that we shouldn't ever see a folio beyond EOF, but check and
2875          * warn if so. This has been observed by failure to clean up folios
2876          * after a short write and there's still a chance reclaim will fix
2877          * things up.
2878          */
2879         WARN_ON_ONCE(folio_pos(folio) >= inode->v.i_size);
2880         end_pos = folio_end_pos(folio);
2881         if (inode->v.i_size > folio_pos(folio))
2882                 end_pos = min_t(u64, inode->v.i_size, end_pos);
2883         ret = s->s[folio_pos_to_s(folio, end_pos - 1)].state >= SECTOR_dirty;
2884
2885         folio_zero_segment(folio, start_offset, end_offset);
2886
2887         /*
2888          * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2889          *
2890          * XXX: because we aren't currently tracking whether the folio has actual
2891          * data in it (vs. just 0s, or only partially written) this wrong. ick.
2892          */
2893         BUG_ON(bch2_get_folio_disk_reservation(c, inode, folio, false));
2894
2895         /*
2896          * This removes any writeable userspace mappings; we need to force
2897          * .page_mkwrite to be called again before any mmapped writes, to
2898          * redirty the full page:
2899          */
2900         folio_mkclean(folio);
2901         filemap_dirty_folio(mapping, folio);
2902 unlock:
2903         folio_unlock(folio);
2904         folio_put(folio);
2905 out:
2906         return ret;
2907 }
2908
2909 static int bch2_truncate_folio(struct bch_inode_info *inode, loff_t from)
2910 {
2911         return __bch2_truncate_folio(inode, from >> PAGE_SHIFT,
2912                                      from, ANYSINT_MAX(loff_t));
2913 }
2914
2915 static int bch2_truncate_folios(struct bch_inode_info *inode,
2916                                 loff_t start, loff_t end)
2917 {
2918         int ret = __bch2_truncate_folio(inode, start >> PAGE_SHIFT,
2919                                         start, end);
2920
2921         if (ret >= 0 &&
2922             start >> PAGE_SHIFT != end >> PAGE_SHIFT)
2923                 ret = __bch2_truncate_folio(inode,
2924                                         (end - 1) >> PAGE_SHIFT,
2925                                         start, end);
2926         return ret;
2927 }
2928
2929 static int bch2_extend(struct mnt_idmap *idmap,
2930                        struct bch_inode_info *inode,
2931                        struct bch_inode_unpacked *inode_u,
2932                        struct iattr *iattr)
2933 {
2934         struct address_space *mapping = inode->v.i_mapping;
2935         int ret;
2936
2937         /*
2938          * sync appends:
2939          *
2940          * this has to be done _before_ extending i_size:
2941          */
2942         ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2943         if (ret)
2944                 return ret;
2945
2946         truncate_setsize(&inode->v, iattr->ia_size);
2947
2948         return bch2_setattr_nonsize(idmap, inode, iattr);
2949 }
2950
2951 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2952                                    struct bch_inode_unpacked *bi,
2953                                    void *p)
2954 {
2955         bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2956         return 0;
2957 }
2958
2959 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2960                                   struct bch_inode_unpacked *bi, void *p)
2961 {
2962         u64 *new_i_size = p;
2963
2964         bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2965         bi->bi_size = *new_i_size;
2966         return 0;
2967 }
2968
2969 int bch2_truncate(struct mnt_idmap *idmap,
2970                   struct bch_inode_info *inode, struct iattr *iattr)
2971 {
2972         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2973         struct address_space *mapping = inode->v.i_mapping;
2974         struct bch_inode_unpacked inode_u;
2975         u64 new_i_size = iattr->ia_size;
2976         s64 i_sectors_delta = 0;
2977         int ret = 0;
2978
2979         /*
2980          * If the truncate call with change the size of the file, the
2981          * cmtimes should be updated. If the size will not change, we
2982          * do not need to update the cmtimes.
2983          */
2984         if (iattr->ia_size != inode->v.i_size) {
2985                 if (!(iattr->ia_valid & ATTR_MTIME))
2986                         ktime_get_coarse_real_ts64(&iattr->ia_mtime);
2987                 if (!(iattr->ia_valid & ATTR_CTIME))
2988                         ktime_get_coarse_real_ts64(&iattr->ia_ctime);
2989                 iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
2990         }
2991
2992         inode_dio_wait(&inode->v);
2993         bch2_pagecache_block_get(inode);
2994
2995         ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
2996         if (ret)
2997                 goto err;
2998
2999         /*
3000          * check this before next assertion; on filesystem error our normal
3001          * invariants are a bit broken (truncate has to truncate the page cache
3002          * before the inode).
3003          */
3004         ret = bch2_journal_error(&c->journal);
3005         if (ret)
3006                 goto err;
3007
3008         WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
3009                   inode->v.i_size < inode_u.bi_size,
3010                   "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
3011                   (u64) inode->v.i_size, inode_u.bi_size);
3012
3013         if (iattr->ia_size > inode->v.i_size) {
3014                 ret = bch2_extend(idmap, inode, &inode_u, iattr);
3015                 goto err;
3016         }
3017
3018         iattr->ia_valid &= ~ATTR_SIZE;
3019
3020         ret = bch2_truncate_folio(inode, iattr->ia_size);
3021         if (unlikely(ret < 0))
3022                 goto err;
3023
3024         /*
3025          * When extending, we're going to write the new i_size to disk
3026          * immediately so we need to flush anything above the current on disk
3027          * i_size first:
3028          *
3029          * Also, when extending we need to flush the page that i_size currently
3030          * straddles - if it's mapped to userspace, we need to ensure that
3031          * userspace has to redirty it and call .mkwrite -> set_page_dirty
3032          * again to allocate the part of the page that was extended.
3033          */
3034         if (iattr->ia_size > inode_u.bi_size)
3035                 ret = filemap_write_and_wait_range(mapping,
3036                                 inode_u.bi_size,
3037                                 iattr->ia_size - 1);
3038         else if (iattr->ia_size & (PAGE_SIZE - 1))
3039                 ret = filemap_write_and_wait_range(mapping,
3040                                 round_down(iattr->ia_size, PAGE_SIZE),
3041                                 iattr->ia_size - 1);
3042         if (ret)
3043                 goto err;
3044
3045         mutex_lock(&inode->ei_update_lock);
3046         ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
3047                                &new_i_size, 0);
3048         mutex_unlock(&inode->ei_update_lock);
3049
3050         if (unlikely(ret))
3051                 goto err;
3052
3053         truncate_setsize(&inode->v, iattr->ia_size);
3054
3055         ret = bch2_fpunch(c, inode_inum(inode),
3056                         round_up(iattr->ia_size, block_bytes(c)) >> 9,
3057                         U64_MAX, &i_sectors_delta);
3058         i_sectors_acct(c, inode, NULL, i_sectors_delta);
3059
3060         bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
3061                                 !bch2_journal_error(&c->journal), c,
3062                                 "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
3063                                 inode->v.i_ino, (u64) inode->v.i_blocks,
3064                                 inode->ei_inode.bi_sectors);
3065         if (unlikely(ret))
3066                 goto err;
3067
3068         mutex_lock(&inode->ei_update_lock);
3069         ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
3070         mutex_unlock(&inode->ei_update_lock);
3071
3072         ret = bch2_setattr_nonsize(idmap, inode, iattr);
3073 err:
3074         bch2_pagecache_block_put(inode);
3075         return bch2_err_class(ret);
3076 }
3077
3078 /* fallocate: */
3079
3080 static int inode_update_times_fn(struct bch_inode_info *inode,
3081                                  struct bch_inode_unpacked *bi, void *p)
3082 {
3083         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3084
3085         bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
3086         return 0;
3087 }
3088
3089 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
3090 {
3091         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3092         u64 end         = offset + len;
3093         u64 block_start = round_up(offset, block_bytes(c));
3094         u64 block_end   = round_down(end, block_bytes(c));
3095         bool truncated_last_page;
3096         int ret = 0;
3097
3098         ret = bch2_truncate_folios(inode, offset, end);
3099         if (unlikely(ret < 0))
3100                 goto err;
3101
3102         truncated_last_page = ret;
3103
3104         truncate_pagecache_range(&inode->v, offset, end - 1);
3105
3106         if (block_start < block_end) {
3107                 s64 i_sectors_delta = 0;
3108
3109                 ret = bch2_fpunch(c, inode_inum(inode),
3110                                   block_start >> 9, block_end >> 9,
3111                                   &i_sectors_delta);
3112                 i_sectors_acct(c, inode, NULL, i_sectors_delta);
3113         }
3114
3115         mutex_lock(&inode->ei_update_lock);
3116         if (end >= inode->v.i_size && !truncated_last_page) {
3117                 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
3118                                             ATTR_MTIME|ATTR_CTIME);
3119         } else {
3120                 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
3121                                        ATTR_MTIME|ATTR_CTIME);
3122         }
3123         mutex_unlock(&inode->ei_update_lock);
3124 err:
3125         return ret;
3126 }
3127
3128 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
3129                                    loff_t offset, loff_t len,
3130                                    bool insert)
3131 {
3132         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3133         struct address_space *mapping = inode->v.i_mapping;
3134         struct bkey_buf copy;
3135         struct btree_trans trans;
3136         struct btree_iter src, dst, del;
3137         loff_t shift, new_size;
3138         u64 src_start;
3139         int ret = 0;
3140
3141         if ((offset | len) & (block_bytes(c) - 1))
3142                 return -EINVAL;
3143
3144         if (insert) {
3145                 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
3146                         return -EFBIG;
3147
3148                 if (offset >= inode->v.i_size)
3149                         return -EINVAL;
3150
3151                 src_start       = U64_MAX;
3152                 shift           = len;
3153         } else {
3154                 if (offset + len >= inode->v.i_size)
3155                         return -EINVAL;
3156
3157                 src_start       = offset + len;
3158                 shift           = -len;
3159         }
3160
3161         new_size = inode->v.i_size + shift;
3162
3163         ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
3164         if (ret)
3165                 return ret;
3166
3167         if (insert) {
3168                 i_size_write(&inode->v, new_size);
3169                 mutex_lock(&inode->ei_update_lock);
3170                 ret = bch2_write_inode_size(c, inode, new_size,
3171                                             ATTR_MTIME|ATTR_CTIME);
3172                 mutex_unlock(&inode->ei_update_lock);
3173         } else {
3174                 s64 i_sectors_delta = 0;
3175
3176                 ret = bch2_fpunch(c, inode_inum(inode),
3177                                   offset >> 9, (offset + len) >> 9,
3178                                   &i_sectors_delta);
3179                 i_sectors_acct(c, inode, NULL, i_sectors_delta);
3180
3181                 if (ret)
3182                         return ret;
3183         }
3184
3185         bch2_bkey_buf_init(&copy);
3186         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
3187         bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
3188                         POS(inode->v.i_ino, src_start >> 9),
3189                         BTREE_ITER_INTENT);
3190         bch2_trans_copy_iter(&dst, &src);
3191         bch2_trans_copy_iter(&del, &src);
3192
3193         while (ret == 0 ||
3194                bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
3195                 struct disk_reservation disk_res =
3196                         bch2_disk_reservation_init(c, 0);
3197                 struct bkey_i delete;
3198                 struct bkey_s_c k;
3199                 struct bpos next_pos;
3200                 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
3201                 struct bpos atomic_end;
3202                 unsigned trigger_flags = 0;
3203                 u32 snapshot;
3204
3205                 bch2_trans_begin(&trans);
3206
3207                 ret = bch2_subvolume_get_snapshot(&trans,
3208                                         inode->ei_subvol, &snapshot);
3209                 if (ret)
3210                         continue;
3211
3212                 bch2_btree_iter_set_snapshot(&src, snapshot);
3213                 bch2_btree_iter_set_snapshot(&dst, snapshot);
3214                 bch2_btree_iter_set_snapshot(&del, snapshot);
3215
3216                 bch2_trans_begin(&trans);
3217
3218                 k = insert
3219                         ? bch2_btree_iter_peek_prev(&src)
3220                         : bch2_btree_iter_peek_upto(&src, POS(inode->v.i_ino, U64_MAX));
3221                 if ((ret = bkey_err(k)))
3222                         continue;
3223
3224                 if (!k.k || k.k->p.inode != inode->v.i_ino)
3225                         break;
3226
3227                 if (insert &&
3228                     bkey_le(k.k->p, POS(inode->v.i_ino, offset >> 9)))
3229                         break;
3230 reassemble:
3231                 bch2_bkey_buf_reassemble(&copy, c, k);
3232
3233                 if (insert &&
3234                     bkey_lt(bkey_start_pos(k.k), move_pos))
3235                         bch2_cut_front(move_pos, copy.k);
3236
3237                 copy.k->k.p.offset += shift >> 9;
3238                 bch2_btree_iter_set_pos(&dst, bkey_start_pos(&copy.k->k));
3239
3240                 ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
3241                 if (ret)
3242                         continue;
3243
3244                 if (!bkey_eq(atomic_end, copy.k->k.p)) {
3245                         if (insert) {
3246                                 move_pos = atomic_end;
3247                                 move_pos.offset -= shift >> 9;
3248                                 goto reassemble;
3249                         } else {
3250                                 bch2_cut_back(atomic_end, copy.k);
3251                         }
3252                 }
3253
3254                 bkey_init(&delete.k);
3255                 delete.k.p = copy.k->k.p;
3256                 delete.k.size = copy.k->k.size;
3257                 delete.k.p.offset -= shift >> 9;
3258                 bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
3259
3260                 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
3261
3262                 if (copy.k->k.size != k.k->size) {
3263                         /* We might end up splitting compressed extents: */
3264                         unsigned nr_ptrs =
3265                                 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
3266
3267                         ret = bch2_disk_reservation_get(c, &disk_res,
3268                                         copy.k->k.size, nr_ptrs,
3269                                         BCH_DISK_RESERVATION_NOFAIL);
3270                         BUG_ON(ret);
3271                 }
3272
3273                 ret =   bch2_btree_iter_traverse(&del) ?:
3274                         bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
3275                         bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
3276                         bch2_trans_commit(&trans, &disk_res, NULL,
3277                                           BTREE_INSERT_NOFAIL);
3278                 bch2_disk_reservation_put(c, &disk_res);
3279
3280                 if (!ret)
3281                         bch2_btree_iter_set_pos(&src, next_pos);
3282         }
3283         bch2_trans_iter_exit(&trans, &del);
3284         bch2_trans_iter_exit(&trans, &dst);
3285         bch2_trans_iter_exit(&trans, &src);
3286         bch2_trans_exit(&trans);
3287         bch2_bkey_buf_exit(&copy, c);
3288
3289         if (ret)
3290                 return ret;
3291
3292         mutex_lock(&inode->ei_update_lock);
3293         if (!insert) {
3294                 i_size_write(&inode->v, new_size);
3295                 ret = bch2_write_inode_size(c, inode, new_size,
3296                                             ATTR_MTIME|ATTR_CTIME);
3297         } else {
3298                 /* We need an inode update to update bi_journal_seq for fsync: */
3299                 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
3300                                        ATTR_MTIME|ATTR_CTIME);
3301         }
3302         mutex_unlock(&inode->ei_update_lock);
3303         return ret;
3304 }
3305
3306 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
3307                              u64 start_sector, u64 end_sector)
3308 {
3309         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3310         struct btree_trans trans;
3311         struct btree_iter iter;
3312         struct bpos end_pos = POS(inode->v.i_ino, end_sector);
3313         struct bch_io_opts opts;
3314         int ret = 0;
3315
3316         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
3317         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
3318
3319         bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
3320                         POS(inode->v.i_ino, start_sector),
3321                         BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
3322
3323         while (!ret && bkey_lt(iter.pos, end_pos)) {
3324                 s64 i_sectors_delta = 0;
3325                 struct quota_res quota_res = { 0 };
3326                 struct bkey_s_c k;
3327                 unsigned sectors;
3328                 u32 snapshot;
3329
3330                 bch2_trans_begin(&trans);
3331
3332                 ret = bch2_subvolume_get_snapshot(&trans,
3333                                         inode->ei_subvol, &snapshot);
3334                 if (ret)
3335                         goto bkey_err;
3336
3337                 bch2_btree_iter_set_snapshot(&iter, snapshot);
3338
3339                 k = bch2_btree_iter_peek_slot(&iter);
3340                 if ((ret = bkey_err(k)))
3341                         goto bkey_err;
3342
3343                 /* already reserved */
3344                 if (bkey_extent_is_reservation(k) &&
3345                     bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
3346                         bch2_btree_iter_advance(&iter);
3347                         continue;
3348                 }
3349
3350                 if (bkey_extent_is_data(k.k) &&
3351                     !(mode & FALLOC_FL_ZERO_RANGE)) {
3352                         bch2_btree_iter_advance(&iter);
3353                         continue;
3354                 }
3355
3356                 /*
3357                  * XXX: for nocow mode, we should promote shared extents to
3358                  * unshared here
3359                  */
3360
3361                 sectors = bpos_min(k.k->p, end_pos).offset - iter.pos.offset;
3362
3363                 if (!bkey_extent_is_allocation(k.k)) {
3364                         ret = bch2_quota_reservation_add(c, inode,
3365                                         &quota_res,
3366                                         sectors, true);
3367                         if (unlikely(ret))
3368                                 goto bkey_err;
3369                 }
3370
3371                 ret = bch2_extent_fallocate(&trans, inode_inum(inode), &iter,
3372                                             sectors, opts, &i_sectors_delta,
3373                                             writepoint_hashed((unsigned long) current));
3374                 if (ret)
3375                         goto bkey_err;
3376
3377                 i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
3378 bkey_err:
3379                 bch2_quota_reservation_put(c, inode, &quota_res);
3380                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3381                         ret = 0;
3382         }
3383
3384         bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
3385         mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
3386
3387         if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
3388                 struct quota_res quota_res = { 0 };
3389                 s64 i_sectors_delta = 0;
3390
3391                 bch2_fpunch_at(&trans, &iter, inode_inum(inode),
3392                                end_sector, &i_sectors_delta);
3393                 i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
3394                 bch2_quota_reservation_put(c, inode, &quota_res);
3395         }
3396
3397         bch2_trans_iter_exit(&trans, &iter);
3398         bch2_trans_exit(&trans);
3399         return ret;
3400 }
3401
3402 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
3403                             loff_t offset, loff_t len)
3404 {
3405         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3406         u64 end         = offset + len;
3407         u64 block_start = round_down(offset,    block_bytes(c));
3408         u64 block_end   = round_up(end,         block_bytes(c));
3409         bool truncated_last_page = false;
3410         int ret, ret2 = 0;
3411
3412         if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
3413                 ret = inode_newsize_ok(&inode->v, end);
3414                 if (ret)
3415                         return ret;
3416         }
3417
3418         if (mode & FALLOC_FL_ZERO_RANGE) {
3419                 ret = bch2_truncate_folios(inode, offset, end);
3420                 if (unlikely(ret < 0))
3421                         return ret;
3422
3423                 truncated_last_page = ret;
3424
3425                 truncate_pagecache_range(&inode->v, offset, end - 1);
3426
3427                 block_start     = round_up(offset,      block_bytes(c));
3428                 block_end       = round_down(end,       block_bytes(c));
3429         }
3430
3431         ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
3432
3433         /*
3434          * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
3435          * so that the VFS cache i_size is consistent with the btree i_size:
3436          */
3437         if (ret &&
3438             !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
3439                 return ret;
3440
3441         if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
3442                 end = inode->v.i_size;
3443
3444         if (end >= inode->v.i_size &&
3445             (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
3446              !(mode & FALLOC_FL_KEEP_SIZE))) {
3447                 spin_lock(&inode->v.i_lock);
3448                 i_size_write(&inode->v, end);
3449                 spin_unlock(&inode->v.i_lock);
3450
3451                 mutex_lock(&inode->ei_update_lock);
3452                 ret2 = bch2_write_inode_size(c, inode, end, 0);
3453                 mutex_unlock(&inode->ei_update_lock);
3454         }
3455
3456         return ret ?: ret2;
3457 }
3458
3459 long bch2_fallocate_dispatch(struct file *file, int mode,
3460                              loff_t offset, loff_t len)
3461 {
3462         struct bch_inode_info *inode = file_bch_inode(file);
3463         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3464         long ret;
3465
3466         if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fallocate))
3467                 return -EROFS;
3468
3469         inode_lock(&inode->v);
3470         inode_dio_wait(&inode->v);
3471         bch2_pagecache_block_get(inode);
3472
3473         ret = file_modified(file);
3474         if (ret)
3475                 goto err;
3476
3477         if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
3478                 ret = bchfs_fallocate(inode, mode, offset, len);
3479         else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
3480                 ret = bchfs_fpunch(inode, offset, len);
3481         else if (mode == FALLOC_FL_INSERT_RANGE)
3482                 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
3483         else if (mode == FALLOC_FL_COLLAPSE_RANGE)
3484                 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
3485         else
3486                 ret = -EOPNOTSUPP;
3487 err:
3488         bch2_pagecache_block_put(inode);
3489         inode_unlock(&inode->v);
3490         bch2_write_ref_put(c, BCH_WRITE_REF_fallocate);
3491
3492         return bch2_err_class(ret);
3493 }
3494
3495 /*
3496  * Take a quota reservation for unallocated blocks in a given file range
3497  * Does not check pagecache
3498  */
3499 static int quota_reserve_range(struct bch_inode_info *inode,
3500                                struct quota_res *res,
3501                                u64 start, u64 end)
3502 {
3503         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3504         struct btree_trans trans;
3505         struct btree_iter iter;
3506         struct bkey_s_c k;
3507         u32 snapshot;
3508         u64 sectors = end - start;
3509         u64 pos = start;
3510         int ret;
3511
3512         bch2_trans_init(&trans, c, 0, 0);
3513 retry:
3514         bch2_trans_begin(&trans);
3515
3516         ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
3517         if (ret)
3518                 goto err;
3519
3520         bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
3521                              SPOS(inode->v.i_ino, pos, snapshot), 0);
3522
3523         while (!(ret = btree_trans_too_many_iters(&trans)) &&
3524                (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
3525                !(ret = bkey_err(k))) {
3526                 if (bkey_extent_is_allocation(k.k)) {
3527                         u64 s = min(end, k.k->p.offset) -
3528                                 max(start, bkey_start_offset(k.k));
3529                         BUG_ON(s > sectors);
3530                         sectors -= s;
3531                 }
3532                 bch2_btree_iter_advance(&iter);
3533         }
3534         pos = iter.pos.offset;
3535         bch2_trans_iter_exit(&trans, &iter);
3536 err:
3537         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3538                 goto retry;
3539
3540         bch2_trans_exit(&trans);
3541
3542         if (ret)
3543                 return ret;
3544
3545         return bch2_quota_reservation_add(c, inode, res, sectors, true);
3546 }
3547
3548 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
3549                              struct file *file_dst, loff_t pos_dst,
3550                              loff_t len, unsigned remap_flags)
3551 {
3552         struct bch_inode_info *src = file_bch_inode(file_src);
3553         struct bch_inode_info *dst = file_bch_inode(file_dst);
3554         struct bch_fs *c = src->v.i_sb->s_fs_info;
3555         struct quota_res quota_res = { 0 };
3556         s64 i_sectors_delta = 0;
3557         u64 aligned_len;
3558         loff_t ret = 0;
3559
3560         if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
3561                 return -EINVAL;
3562
3563         if (remap_flags & REMAP_FILE_DEDUP)
3564                 return -EOPNOTSUPP;
3565
3566         if ((pos_src & (block_bytes(c) - 1)) ||
3567             (pos_dst & (block_bytes(c) - 1)))
3568                 return -EINVAL;
3569
3570         if (src == dst &&
3571             abs(pos_src - pos_dst) < len)
3572                 return -EINVAL;
3573
3574         bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3575
3576         inode_dio_wait(&src->v);
3577         inode_dio_wait(&dst->v);
3578
3579         ret = generic_remap_file_range_prep(file_src, pos_src,
3580                                             file_dst, pos_dst,
3581                                             &len, remap_flags);
3582         if (ret < 0 || len == 0)
3583                 goto err;
3584
3585         aligned_len = round_up((u64) len, block_bytes(c));
3586
3587         ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
3588                                 pos_dst, pos_dst + len - 1);
3589         if (ret)
3590                 goto err;
3591
3592         ret = quota_reserve_range(dst, &quota_res, pos_dst >> 9,
3593                                   (pos_dst + aligned_len) >> 9);
3594         if (ret)
3595                 goto err;
3596
3597         file_update_time(file_dst);
3598
3599         mark_pagecache_unallocated(src, pos_src >> 9,
3600                                    (pos_src + aligned_len) >> 9);
3601
3602         ret = bch2_remap_range(c,
3603                                inode_inum(dst), pos_dst >> 9,
3604                                inode_inum(src), pos_src >> 9,
3605                                aligned_len >> 9,
3606                                pos_dst + len, &i_sectors_delta);
3607         if (ret < 0)
3608                 goto err;
3609
3610         /*
3611          * due to alignment, we might have remapped slightly more than requsted
3612          */
3613         ret = min((u64) ret << 9, (u64) len);
3614
3615         i_sectors_acct(c, dst, &quota_res, i_sectors_delta);
3616
3617         spin_lock(&dst->v.i_lock);
3618         if (pos_dst + ret > dst->v.i_size)
3619                 i_size_write(&dst->v, pos_dst + ret);
3620         spin_unlock(&dst->v.i_lock);
3621
3622         if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
3623             IS_SYNC(file_inode(file_dst)))
3624                 ret = bch2_flush_inode(c, dst);
3625 err:
3626         bch2_quota_reservation_put(c, dst, &quota_res);
3627         bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3628
3629         return bch2_err_class(ret);
3630 }
3631
3632 /* fseek: */
3633
3634 static int folio_data_offset(struct folio *folio, loff_t pos)
3635 {
3636         struct bch_folio *s = bch2_folio(folio);
3637         unsigned i, sectors = folio_sectors(folio);
3638
3639         if (s)
3640                 for (i = folio_pos_to_s(folio, pos); i < sectors; i++)
3641                         if (s->s[i].state >= SECTOR_dirty)
3642                                 return i << SECTOR_SHIFT;
3643
3644         return -1;
3645 }
3646
3647 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
3648                                        loff_t start_offset,
3649                                        loff_t end_offset)
3650 {
3651         struct folio_batch fbatch;
3652         pgoff_t start_index     = start_offset >> PAGE_SHIFT;
3653         pgoff_t end_index       = end_offset >> PAGE_SHIFT;
3654         pgoff_t index           = start_index;
3655         unsigned i;
3656         loff_t ret;
3657         int offset;
3658
3659         folio_batch_init(&fbatch);
3660
3661         while (filemap_get_folios(vinode->i_mapping,
3662                                   &index, end_index, &fbatch)) {
3663                 for (i = 0; i < folio_batch_count(&fbatch); i++) {
3664                         struct folio *folio = fbatch.folios[i];
3665
3666                         folio_lock(folio);
3667                         offset = folio_data_offset(folio,
3668                                         max(folio_pos(folio), start_offset));
3669                         if (offset >= 0) {
3670                                 ret = clamp(folio_pos(folio) + offset,
3671                                             start_offset, end_offset);
3672                                 folio_unlock(folio);
3673                                 folio_batch_release(&fbatch);
3674                                 return ret;
3675                         }
3676                         folio_unlock(folio);
3677                 }
3678                 folio_batch_release(&fbatch);
3679                 cond_resched();
3680         }
3681
3682         return end_offset;
3683 }
3684
3685 static loff_t bch2_seek_data(struct file *file, u64 offset)
3686 {
3687         struct bch_inode_info *inode = file_bch_inode(file);
3688         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3689         struct btree_trans trans;
3690         struct btree_iter iter;
3691         struct bkey_s_c k;
3692         subvol_inum inum = inode_inum(inode);
3693         u64 isize, next_data = MAX_LFS_FILESIZE;
3694         u32 snapshot;
3695         int ret;
3696
3697         isize = i_size_read(&inode->v);
3698         if (offset >= isize)
3699                 return -ENXIO;
3700
3701         bch2_trans_init(&trans, c, 0, 0);
3702 retry:
3703         bch2_trans_begin(&trans);
3704
3705         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3706         if (ret)
3707                 goto err;
3708
3709         for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents,
3710                            SPOS(inode->v.i_ino, offset >> 9, snapshot),
3711                            POS(inode->v.i_ino, U64_MAX),
3712                            0, k, ret) {
3713                 if (bkey_extent_is_data(k.k)) {
3714                         next_data = max(offset, bkey_start_offset(k.k) << 9);
3715                         break;
3716                 } else if (k.k->p.offset >> 9 > isize)
3717                         break;
3718         }
3719         bch2_trans_iter_exit(&trans, &iter);
3720 err:
3721         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3722                 goto retry;
3723
3724         bch2_trans_exit(&trans);
3725         if (ret)
3726                 return ret;
3727
3728         if (next_data > offset)
3729                 next_data = bch2_seek_pagecache_data(&inode->v,
3730                                                      offset, next_data);
3731
3732         if (next_data >= isize)
3733                 return -ENXIO;
3734
3735         return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
3736 }
3737
3738 static bool folio_hole_offset(struct address_space *mapping, loff_t *offset)
3739 {
3740         struct folio *folio;
3741         struct bch_folio *s;
3742         unsigned i, sectors;
3743         bool ret = true;
3744
3745         folio = filemap_lock_folio(mapping, *offset >> PAGE_SHIFT);
3746         if (!folio)
3747                 return true;
3748
3749         s = bch2_folio(folio);
3750         if (!s)
3751                 goto unlock;
3752
3753         sectors = folio_sectors(folio);
3754         for (i = folio_pos_to_s(folio, *offset); i < sectors; i++)
3755                 if (s->s[i].state < SECTOR_dirty) {
3756                         *offset = max(*offset,
3757                                       folio_pos(folio) + (i << SECTOR_SHIFT));
3758                         goto unlock;
3759                 }
3760
3761         *offset = folio_end_pos(folio);
3762         ret = false;
3763 unlock:
3764         folio_unlock(folio);
3765         return ret;
3766 }
3767
3768 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3769                                        loff_t start_offset,
3770                                        loff_t end_offset)
3771 {
3772         struct address_space *mapping = vinode->i_mapping;
3773         loff_t offset = start_offset;
3774
3775         while (offset < end_offset &&
3776                !folio_hole_offset(mapping, &offset))
3777                 ;
3778
3779         return min(offset, end_offset);
3780 }
3781
3782 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3783 {
3784         struct bch_inode_info *inode = file_bch_inode(file);
3785         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3786         struct btree_trans trans;
3787         struct btree_iter iter;
3788         struct bkey_s_c k;
3789         subvol_inum inum = inode_inum(inode);
3790         u64 isize, next_hole = MAX_LFS_FILESIZE;
3791         u32 snapshot;
3792         int ret;
3793
3794         isize = i_size_read(&inode->v);
3795         if (offset >= isize)
3796                 return -ENXIO;
3797
3798         bch2_trans_init(&trans, c, 0, 0);
3799 retry:
3800         bch2_trans_begin(&trans);
3801
3802         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3803         if (ret)
3804                 goto err;
3805
3806         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3807                            SPOS(inode->v.i_ino, offset >> 9, snapshot),
3808                            BTREE_ITER_SLOTS, k, ret) {
3809                 if (k.k->p.inode != inode->v.i_ino) {
3810                         next_hole = bch2_seek_pagecache_hole(&inode->v,
3811                                         offset, MAX_LFS_FILESIZE);
3812                         break;
3813                 } else if (!bkey_extent_is_data(k.k)) {
3814                         next_hole = bch2_seek_pagecache_hole(&inode->v,
3815                                         max(offset, bkey_start_offset(k.k) << 9),
3816                                         k.k->p.offset << 9);
3817
3818                         if (next_hole < k.k->p.offset << 9)
3819                                 break;
3820                 } else {
3821                         offset = max(offset, bkey_start_offset(k.k) << 9);
3822                 }
3823         }
3824         bch2_trans_iter_exit(&trans, &iter);
3825 err:
3826         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3827                 goto retry;
3828
3829         bch2_trans_exit(&trans);
3830         if (ret)
3831                 return ret;
3832
3833         if (next_hole > isize)
3834                 next_hole = isize;
3835
3836         return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3837 }
3838
3839 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3840 {
3841         loff_t ret;
3842
3843         switch (whence) {
3844         case SEEK_SET:
3845         case SEEK_CUR:
3846         case SEEK_END:
3847                 ret = generic_file_llseek(file, offset, whence);
3848                 break;
3849         case SEEK_DATA:
3850                 ret = bch2_seek_data(file, offset);
3851                 break;
3852         case SEEK_HOLE:
3853                 ret = bch2_seek_hole(file, offset);
3854                 break;
3855         default:
3856                 ret = -EINVAL;
3857                 break;
3858         }
3859
3860         return bch2_err_class(ret);
3861 }
3862
3863 void bch2_fs_fsio_exit(struct bch_fs *c)
3864 {
3865         bioset_exit(&c->nocow_flush_bioset);
3866         bioset_exit(&c->dio_write_bioset);
3867         bioset_exit(&c->dio_read_bioset);
3868         bioset_exit(&c->writepage_bioset);
3869 }
3870
3871 int bch2_fs_fsio_init(struct bch_fs *c)
3872 {
3873         int ret = 0;
3874
3875         pr_verbose_init(c->opts, "");
3876
3877         if (bioset_init(&c->writepage_bioset,
3878                         4, offsetof(struct bch_writepage_io, op.wbio.bio),
3879                         BIOSET_NEED_BVECS))
3880                 return -BCH_ERR_ENOMEM_writepage_bioset_init;
3881
3882         if (bioset_init(&c->dio_read_bioset,
3883                         4, offsetof(struct dio_read, rbio.bio),
3884                         BIOSET_NEED_BVECS))
3885                 return -BCH_ERR_ENOMEM_dio_read_bioset_init;
3886
3887         if (bioset_init(&c->dio_write_bioset,
3888                         4, offsetof(struct dio_write, op.wbio.bio),
3889                         BIOSET_NEED_BVECS))
3890                 return -BCH_ERR_ENOMEM_dio_write_bioset_init;
3891
3892         if (bioset_init(&c->nocow_flush_bioset,
3893                         1, offsetof(struct nocow_flush, bio), 0))
3894                 return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
3895
3896         pr_verbose_init(c->opts, "ret %i", ret);
3897         return ret;
3898 }
3899
3900 #endif /* NO_BCACHEFS_FS */