]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/fs-io.c
02ef3430a30b69f4d321d162f3b2d52b52fedfb4
[bcachefs-tools-debian] / libbcachefs / fs-io.c
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3
4 #include "bcachefs.h"
5 #include "alloc_foreground.h"
6 #include "bkey_buf.h"
7 #include "btree_update.h"
8 #include "buckets.h"
9 #include "clock.h"
10 #include "error.h"
11 #include "extents.h"
12 #include "extent_update.h"
13 #include "fs.h"
14 #include "fs-io.h"
15 #include "fsck.h"
16 #include "inode.h"
17 #include "journal.h"
18 #include "io.h"
19 #include "keylist.h"
20 #include "quota.h"
21 #include "reflink.h"
22
23 #include <linux/aio.h>
24 #include <linux/backing-dev.h>
25 #include <linux/falloc.h>
26 #include <linux/migrate.h>
27 #include <linux/mmu_context.h>
28 #include <linux/pagevec.h>
29 #include <linux/rmap.h>
30 #include <linux/sched/signal.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/uio.h>
33 #include <linux/writeback.h>
34
35 #include <trace/events/bcachefs.h>
36 #include <trace/events/writeback.h>
37
38 static inline bool bio_full(struct bio *bio, unsigned len)
39 {
40         if (bio->bi_vcnt >= bio->bi_max_vecs)
41                 return true;
42         if (bio->bi_iter.bi_size > UINT_MAX - len)
43                 return true;
44         return false;
45 }
46
47 static inline struct address_space *faults_disabled_mapping(void)
48 {
49         return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
50 }
51
52 static inline void set_fdm_dropped_locks(void)
53 {
54         current->faults_disabled_mapping =
55                 (void *) (((unsigned long) current->faults_disabled_mapping)|1);
56 }
57
58 static inline bool fdm_dropped_locks(void)
59 {
60         return ((unsigned long) current->faults_disabled_mapping) & 1;
61 }
62
63 struct quota_res {
64         u64                             sectors;
65 };
66
67 struct bch_writepage_io {
68         struct closure                  cl;
69         struct bch_inode_info           *inode;
70
71         /* must be last: */
72         struct bch_write_op             op;
73 };
74
75 struct dio_write {
76         struct completion               done;
77         struct kiocb                    *req;
78         struct mm_struct                *mm;
79         unsigned                        loop:1,
80                                         sync:1,
81                                         free_iov:1;
82         struct quota_res                quota_res;
83         u64                             written;
84
85         struct iov_iter                 iter;
86         struct iovec                    inline_vecs[2];
87
88         /* must be last: */
89         struct bch_write_op             op;
90 };
91
92 struct dio_read {
93         struct closure                  cl;
94         struct kiocb                    *req;
95         long                            ret;
96         bool                            should_dirty;
97         struct bch_read_bio             rbio;
98 };
99
100 /* pagecache_block must be held */
101 static int write_invalidate_inode_pages_range(struct address_space *mapping,
102                                               loff_t start, loff_t end)
103 {
104         int ret;
105
106         /*
107          * XXX: the way this is currently implemented, we can spin if a process
108          * is continually redirtying a specific page
109          */
110         do {
111                 if (!mapping->nrpages)
112                         return 0;
113
114                 ret = filemap_write_and_wait_range(mapping, start, end);
115                 if (ret)
116                         break;
117
118                 if (!mapping->nrpages)
119                         return 0;
120
121                 ret = invalidate_inode_pages2_range(mapping,
122                                 start >> PAGE_SHIFT,
123                                 end >> PAGE_SHIFT);
124         } while (ret == -EBUSY);
125
126         return ret;
127 }
128
129 /* quotas */
130
131 #ifdef CONFIG_BCACHEFS_QUOTA
132
133 static void bch2_quota_reservation_put(struct bch_fs *c,
134                                        struct bch_inode_info *inode,
135                                        struct quota_res *res)
136 {
137         if (!res->sectors)
138                 return;
139
140         mutex_lock(&inode->ei_quota_lock);
141         BUG_ON(res->sectors > inode->ei_quota_reserved);
142
143         bch2_quota_acct(c, inode->ei_qid, Q_SPC,
144                         -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
145         inode->ei_quota_reserved -= res->sectors;
146         mutex_unlock(&inode->ei_quota_lock);
147
148         res->sectors = 0;
149 }
150
151 static int bch2_quota_reservation_add(struct bch_fs *c,
152                                       struct bch_inode_info *inode,
153                                       struct quota_res *res,
154                                       u64 sectors,
155                                       bool check_enospc)
156 {
157         int ret;
158
159         mutex_lock(&inode->ei_quota_lock);
160         ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
161                               check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
162         if (likely(!ret)) {
163                 inode->ei_quota_reserved += sectors;
164                 res->sectors += sectors;
165         }
166         mutex_unlock(&inode->ei_quota_lock);
167
168         return ret;
169 }
170
171 #else
172
173 static void bch2_quota_reservation_put(struct bch_fs *c,
174                                        struct bch_inode_info *inode,
175                                        struct quota_res *res)
176 {
177 }
178
179 static int bch2_quota_reservation_add(struct bch_fs *c,
180                                       struct bch_inode_info *inode,
181                                       struct quota_res *res,
182                                       unsigned sectors,
183                                       bool check_enospc)
184 {
185         return 0;
186 }
187
188 #endif
189
190 /* i_size updates: */
191
192 struct inode_new_size {
193         loff_t          new_size;
194         u64             now;
195         unsigned        fields;
196 };
197
198 static int inode_set_size(struct bch_inode_info *inode,
199                           struct bch_inode_unpacked *bi,
200                           void *p)
201 {
202         struct inode_new_size *s = p;
203
204         bi->bi_size = s->new_size;
205         if (s->fields & ATTR_ATIME)
206                 bi->bi_atime = s->now;
207         if (s->fields & ATTR_MTIME)
208                 bi->bi_mtime = s->now;
209         if (s->fields & ATTR_CTIME)
210                 bi->bi_ctime = s->now;
211
212         return 0;
213 }
214
215 int __must_check bch2_write_inode_size(struct bch_fs *c,
216                                        struct bch_inode_info *inode,
217                                        loff_t new_size, unsigned fields)
218 {
219         struct inode_new_size s = {
220                 .new_size       = new_size,
221                 .now            = bch2_current_time(c),
222                 .fields         = fields,
223         };
224
225         return bch2_write_inode(c, inode, inode_set_size, &s, fields);
226 }
227
228 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
229                            struct quota_res *quota_res, s64 sectors)
230 {
231         if (!sectors)
232                 return;
233
234         mutex_lock(&inode->ei_quota_lock);
235         bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
236                                 "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
237                                 inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
238                                 inode->ei_inode.bi_sectors);
239         inode->v.i_blocks += sectors;
240
241 #ifdef CONFIG_BCACHEFS_QUOTA
242         if (quota_res && sectors > 0) {
243                 BUG_ON(sectors > quota_res->sectors);
244                 BUG_ON(sectors > inode->ei_quota_reserved);
245
246                 quota_res->sectors -= sectors;
247                 inode->ei_quota_reserved -= sectors;
248         } else {
249                 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
250         }
251 #endif
252         mutex_unlock(&inode->ei_quota_lock);
253 }
254
255 /* page state: */
256
257 /* stored in page->private: */
258
259 struct bch_page_sector {
260         /* Uncompressed, fully allocated replicas (or on disk reservation): */
261         unsigned                nr_replicas:4;
262
263         /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
264         unsigned                replicas_reserved:4;
265
266         /* i_sectors: */
267         enum {
268                 SECTOR_UNALLOCATED,
269                 SECTOR_RESERVED,
270                 SECTOR_DIRTY,
271                 SECTOR_DIRTY_RESERVED,
272                 SECTOR_ALLOCATED,
273         }                       state:8;
274 };
275
276 struct bch_page_state {
277         spinlock_t              lock;
278         atomic_t                write_count;
279         bool                    uptodate;
280         struct bch_page_sector  s[PAGE_SECTORS];
281 };
282
283 static inline struct bch_page_state *__bch2_page_state(struct page *page)
284 {
285         return page_has_private(page)
286                 ? (struct bch_page_state *) page_private(page)
287                 : NULL;
288 }
289
290 static inline struct bch_page_state *bch2_page_state(struct page *page)
291 {
292         EBUG_ON(!PageLocked(page));
293
294         return __bch2_page_state(page);
295 }
296
297 /* for newly allocated pages: */
298 static void __bch2_page_state_release(struct page *page)
299 {
300         kfree(detach_page_private(page));
301 }
302
303 static void bch2_page_state_release(struct page *page)
304 {
305         EBUG_ON(!PageLocked(page));
306         __bch2_page_state_release(page);
307 }
308
309 /* for newly allocated pages: */
310 static struct bch_page_state *__bch2_page_state_create(struct page *page,
311                                                        gfp_t gfp)
312 {
313         struct bch_page_state *s;
314
315         s = kzalloc(sizeof(*s), GFP_NOFS|gfp);
316         if (!s)
317                 return NULL;
318
319         spin_lock_init(&s->lock);
320         attach_page_private(page, s);
321         return s;
322 }
323
324 static struct bch_page_state *bch2_page_state_create(struct page *page,
325                                                      gfp_t gfp)
326 {
327         return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
328 }
329
330 static unsigned bkey_to_sector_state(const struct bkey *k)
331 {
332         if (k->type == KEY_TYPE_reservation)
333                 return SECTOR_RESERVED;
334         if (bkey_extent_is_allocation(k))
335                 return SECTOR_ALLOCATED;
336         return SECTOR_UNALLOCATED;
337 }
338
339 static void __bch2_page_state_set(struct page *page,
340                                   unsigned pg_offset, unsigned pg_len,
341                                   unsigned nr_ptrs, unsigned state)
342 {
343         struct bch_page_state *s = bch2_page_state_create(page, __GFP_NOFAIL);
344         unsigned i;
345
346         BUG_ON(pg_offset >= PAGE_SECTORS);
347         BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
348
349         spin_lock(&s->lock);
350
351         for (i = pg_offset; i < pg_offset + pg_len; i++) {
352                 s->s[i].nr_replicas = nr_ptrs;
353                 s->s[i].state = state;
354         }
355
356         if (i == PAGE_SECTORS)
357                 s->uptodate = true;
358
359         spin_unlock(&s->lock);
360 }
361
362 static int bch2_page_state_set(struct bch_fs *c, subvol_inum inum,
363                                struct page **pages, unsigned nr_pages)
364 {
365         struct btree_trans trans;
366         struct btree_iter iter;
367         struct bkey_s_c k;
368         u64 offset = pages[0]->index << PAGE_SECTORS_SHIFT;
369         unsigned pg_idx = 0;
370         u32 snapshot;
371         int ret;
372
373         bch2_trans_init(&trans, c, 0, 0);
374 retry:
375         bch2_trans_begin(&trans);
376
377         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
378         if (ret)
379                 goto err;
380
381         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
382                            SPOS(inum.inum, offset, snapshot),
383                            BTREE_ITER_SLOTS, k, ret) {
384                 unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
385                 unsigned state = bkey_to_sector_state(k.k);
386
387                 while (pg_idx < nr_pages) {
388                         struct page *page = pages[pg_idx];
389                         u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
390                         u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
391                         unsigned pg_offset = max(bkey_start_offset(k.k), pg_start) - pg_start;
392                         unsigned pg_len = min(k.k->p.offset, pg_end) - pg_offset - pg_start;
393
394                         BUG_ON(k.k->p.offset < pg_start);
395                         BUG_ON(bkey_start_offset(k.k) > pg_end);
396
397                         if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate)
398                                 __bch2_page_state_set(page, pg_offset, pg_len, nr_ptrs, state);
399
400                         if (k.k->p.offset < pg_end)
401                                 break;
402                         pg_idx++;
403                 }
404
405                 if (pg_idx == nr_pages)
406                         break;
407         }
408
409         offset = iter.pos.offset;
410         bch2_trans_iter_exit(&trans, &iter);
411 err:
412         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
413                 goto retry;
414         bch2_trans_exit(&trans);
415
416         return ret;
417 }
418
419 static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
420 {
421         struct bvec_iter iter;
422         struct bio_vec bv;
423         unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
424                 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
425         unsigned state = bkey_to_sector_state(k.k);
426
427         bio_for_each_segment(bv, bio, iter)
428                 __bch2_page_state_set(bv.bv_page, bv.bv_offset >> 9,
429                                       bv.bv_len >> 9, nr_ptrs, state);
430 }
431
432 static void mark_pagecache_unallocated(struct bch_inode_info *inode,
433                                        u64 start, u64 end)
434 {
435         pgoff_t index = start >> PAGE_SECTORS_SHIFT;
436         pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
437         struct folio_batch fbatch;
438         unsigned i, j;
439
440         if (end <= start)
441                 return;
442
443         folio_batch_init(&fbatch);
444
445         while (filemap_get_folios(inode->v.i_mapping,
446                                   &index, end_index, &fbatch)) {
447                 for (i = 0; i < folio_batch_count(&fbatch); i++) {
448                         struct folio *folio = fbatch.folios[i];
449                         u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
450                         u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
451                         unsigned pg_offset = max(start, pg_start) - pg_start;
452                         unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
453                         struct bch_page_state *s;
454
455                         BUG_ON(end <= pg_start);
456                         BUG_ON(pg_offset >= PAGE_SECTORS);
457                         BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
458
459                         folio_lock(folio);
460                         s = bch2_page_state(&folio->page);
461
462                         if (s) {
463                                 spin_lock(&s->lock);
464                                 for (j = pg_offset; j < pg_offset + pg_len; j++)
465                                         s->s[j].nr_replicas = 0;
466                                 spin_unlock(&s->lock);
467                         }
468
469                         folio_unlock(folio);
470                 }
471                 folio_batch_release(&fbatch);
472                 cond_resched();
473         }
474 }
475
476 static void mark_pagecache_reserved(struct bch_inode_info *inode,
477                                     u64 start, u64 end)
478 {
479         struct bch_fs *c = inode->v.i_sb->s_fs_info;
480         pgoff_t index = start >> PAGE_SECTORS_SHIFT;
481         pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
482         struct folio_batch fbatch;
483         s64 i_sectors_delta = 0;
484         unsigned i, j;
485
486         if (end <= start)
487                 return;
488
489         folio_batch_init(&fbatch);
490
491         while (filemap_get_folios(inode->v.i_mapping,
492                                   &index, end_index, &fbatch)) {
493                 for (i = 0; i < folio_batch_count(&fbatch); i++) {
494                         struct folio *folio = fbatch.folios[i];
495                         u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
496                         u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
497                         unsigned pg_offset = max(start, pg_start) - pg_start;
498                         unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
499                         struct bch_page_state *s;
500
501                         BUG_ON(end <= pg_start);
502                         BUG_ON(pg_offset >= PAGE_SECTORS);
503                         BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
504
505                         folio_lock(folio);
506                         s = bch2_page_state(&folio->page);
507
508                         if (s) {
509                                 spin_lock(&s->lock);
510                                 for (j = pg_offset; j < pg_offset + pg_len; j++)
511                                         switch (s->s[j].state) {
512                                         case SECTOR_UNALLOCATED:
513                                                 s->s[j].state = SECTOR_RESERVED;
514                                                 break;
515                                         case SECTOR_DIRTY:
516                                                 s->s[j].state = SECTOR_DIRTY_RESERVED;
517                                                 i_sectors_delta--;
518                                                 break;
519                                         default:
520                                                 break;
521                                         }
522                                 spin_unlock(&s->lock);
523                         }
524
525                         folio_unlock(folio);
526                 }
527                 folio_batch_release(&fbatch);
528                 cond_resched();
529         }
530
531         i_sectors_acct(c, inode, NULL, i_sectors_delta);
532 }
533
534 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
535 {
536         /* XXX: this should not be open coded */
537         return inode->ei_inode.bi_data_replicas
538                 ? inode->ei_inode.bi_data_replicas - 1
539                 : c->opts.data_replicas;
540 }
541
542 static inline unsigned sectors_to_reserve(struct bch_page_sector *s,
543                                                   unsigned nr_replicas)
544 {
545         return max(0, (int) nr_replicas -
546                    s->nr_replicas -
547                    s->replicas_reserved);
548 }
549
550 static int bch2_get_page_disk_reservation(struct bch_fs *c,
551                                 struct bch_inode_info *inode,
552                                 struct page *page, bool check_enospc)
553 {
554         struct bch_page_state *s = bch2_page_state_create(page, 0);
555         unsigned nr_replicas = inode_nr_replicas(c, inode);
556         struct disk_reservation disk_res = { 0 };
557         unsigned i, disk_res_sectors = 0;
558         int ret;
559
560         if (!s)
561                 return -ENOMEM;
562
563         for (i = 0; i < ARRAY_SIZE(s->s); i++)
564                 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
565
566         if (!disk_res_sectors)
567                 return 0;
568
569         ret = bch2_disk_reservation_get(c, &disk_res,
570                                         disk_res_sectors, 1,
571                                         !check_enospc
572                                         ? BCH_DISK_RESERVATION_NOFAIL
573                                         : 0);
574         if (unlikely(ret))
575                 return ret;
576
577         for (i = 0; i < ARRAY_SIZE(s->s); i++)
578                 s->s[i].replicas_reserved +=
579                         sectors_to_reserve(&s->s[i], nr_replicas);
580
581         return 0;
582 }
583
584 struct bch2_page_reservation {
585         struct disk_reservation disk;
586         struct quota_res        quota;
587 };
588
589 static void bch2_page_reservation_init(struct bch_fs *c,
590                         struct bch_inode_info *inode,
591                         struct bch2_page_reservation *res)
592 {
593         memset(res, 0, sizeof(*res));
594
595         res->disk.nr_replicas = inode_nr_replicas(c, inode);
596 }
597
598 static void bch2_page_reservation_put(struct bch_fs *c,
599                         struct bch_inode_info *inode,
600                         struct bch2_page_reservation *res)
601 {
602         bch2_disk_reservation_put(c, &res->disk);
603         bch2_quota_reservation_put(c, inode, &res->quota);
604 }
605
606 static int bch2_page_reservation_get(struct bch_fs *c,
607                         struct bch_inode_info *inode, struct page *page,
608                         struct bch2_page_reservation *res,
609                         unsigned offset, unsigned len, bool check_enospc)
610 {
611         struct bch_page_state *s = bch2_page_state_create(page, 0);
612         unsigned i, disk_sectors = 0, quota_sectors = 0;
613         int ret;
614
615         if (!s)
616                 return -ENOMEM;
617
618         BUG_ON(!s->uptodate);
619
620         for (i = round_down(offset, block_bytes(c)) >> 9;
621              i < round_up(offset + len, block_bytes(c)) >> 9;
622              i++) {
623                 disk_sectors += sectors_to_reserve(&s->s[i],
624                                                 res->disk.nr_replicas);
625                 quota_sectors += s->s[i].state == SECTOR_UNALLOCATED;
626         }
627
628         if (disk_sectors) {
629                 ret = bch2_disk_reservation_add(c, &res->disk,
630                                                 disk_sectors,
631                                                 !check_enospc
632                                                 ? BCH_DISK_RESERVATION_NOFAIL
633                                                 : 0);
634                 if (unlikely(ret))
635                         return ret;
636         }
637
638         if (quota_sectors) {
639                 ret = bch2_quota_reservation_add(c, inode, &res->quota,
640                                                  quota_sectors,
641                                                  check_enospc);
642                 if (unlikely(ret)) {
643                         struct disk_reservation tmp = {
644                                 .sectors = disk_sectors
645                         };
646
647                         bch2_disk_reservation_put(c, &tmp);
648                         res->disk.sectors -= disk_sectors;
649                         return ret;
650                 }
651         }
652
653         return 0;
654 }
655
656 static void bch2_clear_page_bits(struct page *page)
657 {
658         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
659         struct bch_fs *c = inode->v.i_sb->s_fs_info;
660         struct bch_page_state *s = bch2_page_state(page);
661         struct disk_reservation disk_res = { 0 };
662         int i, dirty_sectors = 0;
663
664         if (!s)
665                 return;
666
667         EBUG_ON(!PageLocked(page));
668         EBUG_ON(PageWriteback(page));
669
670         for (i = 0; i < ARRAY_SIZE(s->s); i++) {
671                 disk_res.sectors += s->s[i].replicas_reserved;
672                 s->s[i].replicas_reserved = 0;
673
674                 switch (s->s[i].state) {
675                 case SECTOR_DIRTY:
676                         s->s[i].state = SECTOR_UNALLOCATED;
677                         --dirty_sectors;
678                         break;
679                 case SECTOR_DIRTY_RESERVED:
680                         s->s[i].state = SECTOR_RESERVED;
681                         break;
682                 default:
683                         break;
684                 }
685         }
686
687         bch2_disk_reservation_put(c, &disk_res);
688
689         i_sectors_acct(c, inode, NULL, dirty_sectors);
690
691         bch2_page_state_release(page);
692 }
693
694 static void bch2_set_page_dirty(struct bch_fs *c,
695                         struct bch_inode_info *inode, struct page *page,
696                         struct bch2_page_reservation *res,
697                         unsigned offset, unsigned len)
698 {
699         struct bch_page_state *s = bch2_page_state(page);
700         unsigned i, dirty_sectors = 0;
701
702         WARN_ON((u64) page_offset(page) + offset + len >
703                 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
704
705         spin_lock(&s->lock);
706
707         for (i = round_down(offset, block_bytes(c)) >> 9;
708              i < round_up(offset + len, block_bytes(c)) >> 9;
709              i++) {
710                 unsigned sectors = sectors_to_reserve(&s->s[i],
711                                                 res->disk.nr_replicas);
712
713                 /*
714                  * This can happen if we race with the error path in
715                  * bch2_writepage_io_done():
716                  */
717                 sectors = min_t(unsigned, sectors, res->disk.sectors);
718
719                 s->s[i].replicas_reserved += sectors;
720                 res->disk.sectors -= sectors;
721
722                 switch (s->s[i].state) {
723                 case SECTOR_UNALLOCATED:
724                         s->s[i].state = SECTOR_DIRTY;
725                         dirty_sectors++;
726                         break;
727                 case SECTOR_RESERVED:
728                         s->s[i].state = SECTOR_DIRTY_RESERVED;
729                         break;
730                 default:
731                         break;
732                 }
733         }
734
735         spin_unlock(&s->lock);
736
737         i_sectors_acct(c, inode, &res->quota, dirty_sectors);
738
739         if (!PageDirty(page))
740                 __set_page_dirty_nobuffers(page);
741 }
742
743 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
744 {
745         struct file *file = vmf->vma->vm_file;
746         struct address_space *mapping = file->f_mapping;
747         struct address_space *fdm = faults_disabled_mapping();
748         struct bch_inode_info *inode = file_bch_inode(file);
749         int ret;
750
751         if (fdm == mapping)
752                 return VM_FAULT_SIGBUS;
753
754         /* Lock ordering: */
755         if (fdm > mapping) {
756                 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
757
758                 if (bch2_pagecache_add_tryget(&inode->ei_pagecache_lock))
759                         goto got_lock;
760
761                 bch2_pagecache_block_put(&fdm_host->ei_pagecache_lock);
762
763                 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
764                 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
765
766                 bch2_pagecache_block_get(&fdm_host->ei_pagecache_lock);
767
768                 /* Signal that lock has been dropped: */
769                 set_fdm_dropped_locks();
770                 return VM_FAULT_SIGBUS;
771         }
772
773         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
774 got_lock:
775         ret = filemap_fault(vmf);
776         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
777
778         return ret;
779 }
780
781 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
782 {
783         struct page *page = vmf->page;
784         struct file *file = vmf->vma->vm_file;
785         struct bch_inode_info *inode = file_bch_inode(file);
786         struct address_space *mapping = file->f_mapping;
787         struct bch_fs *c = inode->v.i_sb->s_fs_info;
788         struct bch2_page_reservation res;
789         unsigned len;
790         loff_t isize;
791         int ret;
792
793         bch2_page_reservation_init(c, inode, &res);
794
795         sb_start_pagefault(inode->v.i_sb);
796         file_update_time(file);
797
798         /*
799          * Not strictly necessary, but helps avoid dio writes livelocking in
800          * write_invalidate_inode_pages_range() - can drop this if/when we get
801          * a write_invalidate_inode_pages_range() that works without dropping
802          * page lock before invalidating page
803          */
804         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
805
806         lock_page(page);
807         isize = i_size_read(&inode->v);
808
809         if (page->mapping != mapping || page_offset(page) >= isize) {
810                 unlock_page(page);
811                 ret = VM_FAULT_NOPAGE;
812                 goto out;
813         }
814
815         len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
816
817         if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
818                 if (bch2_page_state_set(c, inode_inum(inode), &page, 1)) {
819                         unlock_page(page);
820                         ret = VM_FAULT_SIGBUS;
821                         goto out;
822                 }
823         }
824
825         if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
826                 unlock_page(page);
827                 ret = VM_FAULT_SIGBUS;
828                 goto out;
829         }
830
831         bch2_set_page_dirty(c, inode, page, &res, 0, len);
832         bch2_page_reservation_put(c, inode, &res);
833
834         wait_for_stable_page(page);
835         ret = VM_FAULT_LOCKED;
836 out:
837         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
838         sb_end_pagefault(inode->v.i_sb);
839
840         return ret;
841 }
842
843 void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
844 {
845         if (offset || length < folio_size(folio))
846                 return;
847
848         bch2_clear_page_bits(&folio->page);
849 }
850
851 bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
852 {
853         if (folio_test_dirty(folio) || folio_test_writeback(folio))
854                 return false;
855
856         bch2_clear_page_bits(&folio->page);
857         return true;
858 }
859
860 /* readpage(s): */
861
862 static void bch2_readpages_end_io(struct bio *bio)
863 {
864         struct bvec_iter_all iter;
865         struct bio_vec *bv;
866
867         bio_for_each_segment_all(bv, bio, iter) {
868                 struct page *page = bv->bv_page;
869
870                 if (!bio->bi_status) {
871                         SetPageUptodate(page);
872                 } else {
873                         ClearPageUptodate(page);
874                         SetPageError(page);
875                 }
876                 unlock_page(page);
877         }
878
879         bio_put(bio);
880 }
881
882 struct readpages_iter {
883         struct address_space    *mapping;
884         struct page             **pages;
885         unsigned                nr_pages;
886         unsigned                idx;
887         pgoff_t                 offset;
888 };
889
890 static int readpages_iter_init(struct readpages_iter *iter,
891                                struct readahead_control *ractl)
892 {
893         unsigned i, nr_pages = readahead_count(ractl);
894
895         memset(iter, 0, sizeof(*iter));
896
897         iter->mapping   = ractl->mapping;
898         iter->offset    = readahead_index(ractl);
899         iter->nr_pages  = nr_pages;
900
901         iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
902         if (!iter->pages)
903                 return -ENOMEM;
904
905         nr_pages = __readahead_batch(ractl, iter->pages, nr_pages);
906         for (i = 0; i < nr_pages; i++) {
907                 __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL);
908                 put_page(iter->pages[i]);
909         }
910
911         return 0;
912 }
913
914 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
915 {
916         if (iter->idx >= iter->nr_pages)
917                 return NULL;
918
919         EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
920
921         return iter->pages[iter->idx];
922 }
923
924 static bool extent_partial_reads_expensive(struct bkey_s_c k)
925 {
926         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
927         struct bch_extent_crc_unpacked crc;
928         const union bch_extent_entry *i;
929
930         bkey_for_each_crc(k.k, ptrs, crc, i)
931                 if (crc.csum_type || crc.compression_type)
932                         return true;
933         return false;
934 }
935
936 static void readpage_bio_extend(struct readpages_iter *iter,
937                                 struct bio *bio,
938                                 unsigned sectors_this_extent,
939                                 bool get_more)
940 {
941         while (bio_sectors(bio) < sectors_this_extent &&
942                bio->bi_vcnt < bio->bi_max_vecs) {
943                 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
944                 struct page *page = readpage_iter_next(iter);
945                 int ret;
946
947                 if (page) {
948                         if (iter->offset + iter->idx != page_offset)
949                                 break;
950
951                         iter->idx++;
952                 } else {
953                         if (!get_more)
954                                 break;
955
956                         page = xa_load(&iter->mapping->i_pages, page_offset);
957                         if (page && !xa_is_value(page))
958                                 break;
959
960                         page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
961                         if (!page)
962                                 break;
963
964                         if (!__bch2_page_state_create(page, 0)) {
965                                 put_page(page);
966                                 break;
967                         }
968
969                         ret = add_to_page_cache_lru(page, iter->mapping,
970                                                     page_offset, GFP_NOFS);
971                         if (ret) {
972                                 __bch2_page_state_release(page);
973                                 put_page(page);
974                                 break;
975                         }
976
977                         put_page(page);
978                 }
979
980                 BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
981         }
982 }
983
984 static void bchfs_read(struct btree_trans *trans,
985                        struct bch_read_bio *rbio,
986                        subvol_inum inum,
987                        struct readpages_iter *readpages_iter)
988 {
989         struct bch_fs *c = trans->c;
990         struct btree_iter iter;
991         struct bkey_buf sk;
992         int flags = BCH_READ_RETRY_IF_STALE|
993                 BCH_READ_MAY_PROMOTE;
994         u32 snapshot;
995         int ret = 0;
996
997         rbio->c = c;
998         rbio->start_time = local_clock();
999         rbio->subvol = inum.subvol;
1000
1001         bch2_bkey_buf_init(&sk);
1002 retry:
1003         bch2_trans_begin(trans);
1004         iter = (struct btree_iter) { NULL };
1005
1006         ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1007         if (ret)
1008                 goto err;
1009
1010         bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1011                              SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
1012                              BTREE_ITER_SLOTS);
1013         while (1) {
1014                 struct bkey_s_c k;
1015                 unsigned bytes, sectors, offset_into_extent;
1016                 enum btree_id data_btree = BTREE_ID_extents;
1017
1018                 /*
1019                  * read_extent -> io_time_reset may cause a transaction restart
1020                  * without returning an error, we need to check for that here:
1021                  */
1022                 ret = bch2_trans_relock(trans);
1023                 if (ret)
1024                         break;
1025
1026                 bch2_btree_iter_set_pos(&iter,
1027                                 POS(inum.inum, rbio->bio.bi_iter.bi_sector));
1028
1029                 k = bch2_btree_iter_peek_slot(&iter);
1030                 ret = bkey_err(k);
1031                 if (ret)
1032                         break;
1033
1034                 offset_into_extent = iter.pos.offset -
1035                         bkey_start_offset(k.k);
1036                 sectors = k.k->size - offset_into_extent;
1037
1038                 bch2_bkey_buf_reassemble(&sk, c, k);
1039
1040                 ret = bch2_read_indirect_extent(trans, &data_btree,
1041                                         &offset_into_extent, &sk);
1042                 if (ret)
1043                         break;
1044
1045                 k = bkey_i_to_s_c(sk.k);
1046
1047                 sectors = min(sectors, k.k->size - offset_into_extent);
1048
1049                 if (readpages_iter)
1050                         readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
1051                                             extent_partial_reads_expensive(k));
1052
1053                 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
1054                 swap(rbio->bio.bi_iter.bi_size, bytes);
1055
1056                 if (rbio->bio.bi_iter.bi_size == bytes)
1057                         flags |= BCH_READ_LAST_FRAGMENT;
1058
1059                 bch2_bio_page_state_set(&rbio->bio, k);
1060
1061                 bch2_read_extent(trans, rbio, iter.pos,
1062                                  data_btree, k, offset_into_extent, flags);
1063
1064                 if (flags & BCH_READ_LAST_FRAGMENT)
1065                         break;
1066
1067                 swap(rbio->bio.bi_iter.bi_size, bytes);
1068                 bio_advance(&rbio->bio, bytes);
1069
1070                 ret = btree_trans_too_many_iters(trans);
1071                 if (ret)
1072                         break;
1073         }
1074 err:
1075         bch2_trans_iter_exit(trans, &iter);
1076
1077         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1078                 goto retry;
1079
1080         if (ret) {
1081                 bch_err_inum_ratelimited(c, inum.inum,
1082                                 "read error %i from btree lookup", ret);
1083                 rbio->bio.bi_status = BLK_STS_IOERR;
1084                 bio_endio(&rbio->bio);
1085         }
1086
1087         bch2_bkey_buf_exit(&sk, c);
1088 }
1089
1090 void bch2_readahead(struct readahead_control *ractl)
1091 {
1092         struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
1093         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1094         struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1095         struct btree_trans trans;
1096         struct page *page;
1097         struct readpages_iter readpages_iter;
1098         int ret;
1099
1100         ret = readpages_iter_init(&readpages_iter, ractl);
1101         BUG_ON(ret);
1102
1103         bch2_trans_init(&trans, c, 0, 0);
1104
1105         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1106
1107         while ((page = readpage_iter_next(&readpages_iter))) {
1108                 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
1109                 unsigned n = min_t(unsigned,
1110                                    readpages_iter.nr_pages -
1111                                    readpages_iter.idx,
1112                                    BIO_MAX_VECS);
1113                 struct bch_read_bio *rbio =
1114                         rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
1115                                                    GFP_NOFS, &c->bio_read),
1116                                   opts);
1117
1118                 readpages_iter.idx++;
1119
1120                 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTORS_SHIFT;
1121                 rbio->bio.bi_end_io = bch2_readpages_end_io;
1122                 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
1123
1124                 bchfs_read(&trans, rbio, inode_inum(inode),
1125                            &readpages_iter);
1126         }
1127
1128         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1129
1130         bch2_trans_exit(&trans);
1131         kfree(readpages_iter.pages);
1132 }
1133
1134 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
1135                              subvol_inum inum, struct page *page)
1136 {
1137         struct btree_trans trans;
1138
1139         bch2_page_state_create(page, __GFP_NOFAIL);
1140
1141         bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
1142         rbio->bio.bi_iter.bi_sector =
1143                 (sector_t) page->index << PAGE_SECTORS_SHIFT;
1144         BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
1145
1146         bch2_trans_init(&trans, c, 0, 0);
1147         bchfs_read(&trans, rbio, inum, NULL);
1148         bch2_trans_exit(&trans);
1149 }
1150
1151 static void bch2_read_single_page_end_io(struct bio *bio)
1152 {
1153         complete(bio->bi_private);
1154 }
1155
1156 static int bch2_read_single_page(struct page *page,
1157                                  struct address_space *mapping)
1158 {
1159         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1160         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1161         struct bch_read_bio *rbio;
1162         int ret;
1163         DECLARE_COMPLETION_ONSTACK(done);
1164
1165         rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
1166                          io_opts(c, &inode->ei_inode));
1167         rbio->bio.bi_private = &done;
1168         rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1169
1170         __bchfs_readpage(c, rbio, inode_inum(inode), page);
1171         wait_for_completion(&done);
1172
1173         ret = blk_status_to_errno(rbio->bio.bi_status);
1174         bio_put(&rbio->bio);
1175
1176         if (ret < 0)
1177                 return ret;
1178
1179         SetPageUptodate(page);
1180         return 0;
1181 }
1182
1183 int bch2_read_folio(struct file *file, struct folio *folio)
1184 {
1185         struct page *page = &folio->page;
1186         int ret;
1187
1188         ret = bch2_read_single_page(page, page->mapping);
1189         folio_unlock(folio);
1190         return bch2_err_class(ret);
1191 }
1192
1193 /* writepages: */
1194
1195 struct bch_writepage_state {
1196         struct bch_writepage_io *io;
1197         struct bch_io_opts      opts;
1198 };
1199
1200 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1201                                                                   struct bch_inode_info *inode)
1202 {
1203         return (struct bch_writepage_state) {
1204                 .opts = io_opts(c, &inode->ei_inode)
1205         };
1206 }
1207
1208 static void bch2_writepage_io_free(struct closure *cl)
1209 {
1210         struct bch_writepage_io *io = container_of(cl,
1211                                         struct bch_writepage_io, cl);
1212
1213         bio_put(&io->op.wbio.bio);
1214 }
1215
1216 static void bch2_writepage_io_done(struct closure *cl)
1217 {
1218         struct bch_writepage_io *io = container_of(cl,
1219                                         struct bch_writepage_io, cl);
1220         struct bch_fs *c = io->op.c;
1221         struct bio *bio = &io->op.wbio.bio;
1222         struct bvec_iter_all iter;
1223         struct bio_vec *bvec;
1224         unsigned i;
1225
1226         if (io->op.error) {
1227                 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
1228
1229                 bio_for_each_segment_all(bvec, bio, iter) {
1230                         struct bch_page_state *s;
1231
1232                         SetPageError(bvec->bv_page);
1233                         mapping_set_error(bvec->bv_page->mapping, -EIO);
1234
1235                         s = __bch2_page_state(bvec->bv_page);
1236                         spin_lock(&s->lock);
1237                         for (i = 0; i < PAGE_SECTORS; i++)
1238                                 s->s[i].nr_replicas = 0;
1239                         spin_unlock(&s->lock);
1240                 }
1241         }
1242
1243         if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1244                 bio_for_each_segment_all(bvec, bio, iter) {
1245                         struct bch_page_state *s;
1246
1247                         s = __bch2_page_state(bvec->bv_page);
1248                         spin_lock(&s->lock);
1249                         for (i = 0; i < PAGE_SECTORS; i++)
1250                                 s->s[i].nr_replicas = 0;
1251                         spin_unlock(&s->lock);
1252                 }
1253         }
1254
1255         /*
1256          * racing with fallocate can cause us to add fewer sectors than
1257          * expected - but we shouldn't add more sectors than expected:
1258          */
1259         WARN_ON_ONCE(io->op.i_sectors_delta > 0);
1260
1261         /*
1262          * (error (due to going RO) halfway through a page can screw that up
1263          * slightly)
1264          * XXX wtf?
1265            BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1266          */
1267
1268         /*
1269          * PageWriteback is effectively our ref on the inode - fixup i_blocks
1270          * before calling end_page_writeback:
1271          */
1272         i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1273
1274         bio_for_each_segment_all(bvec, bio, iter) {
1275                 struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
1276
1277                 if (atomic_dec_and_test(&s->write_count))
1278                         end_page_writeback(bvec->bv_page);
1279         }
1280
1281         closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1282 }
1283
1284 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1285 {
1286         struct bch_writepage_io *io = w->io;
1287
1288         w->io = NULL;
1289         closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
1290         continue_at(&io->cl, bch2_writepage_io_done, NULL);
1291 }
1292
1293 /*
1294  * Get a bch_writepage_io and add @page to it - appending to an existing one if
1295  * possible, else allocating a new one:
1296  */
1297 static void bch2_writepage_io_alloc(struct bch_fs *c,
1298                                     struct writeback_control *wbc,
1299                                     struct bch_writepage_state *w,
1300                                     struct bch_inode_info *inode,
1301                                     u64 sector,
1302                                     unsigned nr_replicas)
1303 {
1304         struct bch_write_op *op;
1305
1306         w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
1307                                               REQ_OP_WRITE,
1308                                               GFP_NOFS,
1309                                               &c->writepage_bioset),
1310                              struct bch_writepage_io, op.wbio.bio);
1311
1312         closure_init(&w->io->cl, NULL);
1313         w->io->inode            = inode;
1314
1315         op                      = &w->io->op;
1316         bch2_write_op_init(op, c, w->opts);
1317         op->target              = w->opts.foreground_target;
1318         op->nr_replicas         = nr_replicas;
1319         op->res.nr_replicas     = nr_replicas;
1320         op->write_point         = writepoint_hashed(inode->ei_last_dirtied);
1321         op->subvol              = inode->ei_subvol;
1322         op->pos                 = POS(inode->v.i_ino, sector);
1323         op->wbio.bio.bi_iter.bi_sector = sector;
1324         op->wbio.bio.bi_opf     = wbc_to_write_flags(wbc);
1325 }
1326
1327 static int __bch2_writepage(struct page *page,
1328                             struct writeback_control *wbc,
1329                             void *data)
1330 {
1331         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1332         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1333         struct bch_writepage_state *w = data;
1334         struct bch_page_state *s, orig;
1335         unsigned i, offset, nr_replicas_this_write = U32_MAX;
1336         loff_t i_size = i_size_read(&inode->v);
1337         pgoff_t end_index = i_size >> PAGE_SHIFT;
1338         int ret;
1339
1340         EBUG_ON(!PageUptodate(page));
1341
1342         /* Is the page fully inside i_size? */
1343         if (page->index < end_index)
1344                 goto do_io;
1345
1346         /* Is the page fully outside i_size? (truncate in progress) */
1347         offset = i_size & (PAGE_SIZE - 1);
1348         if (page->index > end_index || !offset) {
1349                 unlock_page(page);
1350                 return 0;
1351         }
1352
1353         /*
1354          * The page straddles i_size.  It must be zeroed out on each and every
1355          * writepage invocation because it may be mmapped.  "A file is mapped
1356          * in multiples of the page size.  For a file that is not a multiple of
1357          * the  page size, the remaining memory is zeroed when mapped, and
1358          * writes to that region are not written out to the file."
1359          */
1360         zero_user_segment(page, offset, PAGE_SIZE);
1361 do_io:
1362         s = bch2_page_state_create(page, __GFP_NOFAIL);
1363
1364         /*
1365          * Things get really hairy with errors during writeback:
1366          */
1367         ret = bch2_get_page_disk_reservation(c, inode, page, false);
1368         BUG_ON(ret);
1369
1370         /* Before unlocking the page, get copy of reservations: */
1371         spin_lock(&s->lock);
1372         orig = *s;
1373         spin_unlock(&s->lock);
1374
1375         for (i = 0; i < PAGE_SECTORS; i++) {
1376                 if (s->s[i].state < SECTOR_DIRTY)
1377                         continue;
1378
1379                 nr_replicas_this_write =
1380                         min_t(unsigned, nr_replicas_this_write,
1381                               s->s[i].nr_replicas +
1382                               s->s[i].replicas_reserved);
1383         }
1384
1385         for (i = 0; i < PAGE_SECTORS; i++) {
1386                 if (s->s[i].state < SECTOR_DIRTY)
1387                         continue;
1388
1389                 s->s[i].nr_replicas = w->opts.compression
1390                         ? 0 : nr_replicas_this_write;
1391
1392                 s->s[i].replicas_reserved = 0;
1393                 s->s[i].state = SECTOR_ALLOCATED;
1394         }
1395
1396         BUG_ON(atomic_read(&s->write_count));
1397         atomic_set(&s->write_count, 1);
1398
1399         BUG_ON(PageWriteback(page));
1400         set_page_writeback(page);
1401
1402         unlock_page(page);
1403
1404         offset = 0;
1405         while (1) {
1406                 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
1407                 u64 sector;
1408
1409                 while (offset < PAGE_SECTORS &&
1410                        orig.s[offset].state < SECTOR_DIRTY)
1411                         offset++;
1412
1413                 if (offset == PAGE_SECTORS)
1414                         break;
1415
1416                 while (offset + sectors < PAGE_SECTORS &&
1417                        orig.s[offset + sectors].state >= SECTOR_DIRTY) {
1418                         reserved_sectors += orig.s[offset + sectors].replicas_reserved;
1419                         dirty_sectors += orig.s[offset + sectors].state == SECTOR_DIRTY;
1420                         sectors++;
1421                 }
1422                 BUG_ON(!sectors);
1423
1424                 sector = ((u64) page->index << PAGE_SECTORS_SHIFT) + offset;
1425
1426                 if (w->io &&
1427                     (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1428                      bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
1429                      w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1430                      (BIO_MAX_VECS * PAGE_SIZE) ||
1431                      bio_end_sector(&w->io->op.wbio.bio) != sector))
1432                         bch2_writepage_do_io(w);
1433
1434                 if (!w->io)
1435                         bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1436                                                 nr_replicas_this_write);
1437
1438                 atomic_inc(&s->write_count);
1439
1440                 BUG_ON(inode != w->io->inode);
1441                 BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
1442                                      sectors << 9, offset << 9));
1443
1444                 /* Check for writing past i_size: */
1445                 WARN_ON_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1446                              round_up(i_size, block_bytes(c)));
1447
1448                 w->io->op.res.sectors += reserved_sectors;
1449                 w->io->op.i_sectors_delta -= dirty_sectors;
1450                 w->io->op.new_i_size = i_size;
1451
1452                 offset += sectors;
1453         }
1454
1455         if (atomic_dec_and_test(&s->write_count))
1456                 end_page_writeback(page);
1457
1458         return 0;
1459 }
1460
1461 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1462 {
1463         struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1464         struct bch_writepage_state w =
1465                 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1466         struct blk_plug plug;
1467         int ret;
1468
1469         blk_start_plug(&plug);
1470         ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1471         if (w.io)
1472                 bch2_writepage_do_io(&w);
1473         blk_finish_plug(&plug);
1474         return bch2_err_class(ret);
1475 }
1476
1477 /* buffered writes: */
1478
1479 int bch2_write_begin(struct file *file, struct address_space *mapping,
1480                      loff_t pos, unsigned len,
1481                      struct page **pagep, void **fsdata)
1482 {
1483         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1484         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1485         struct bch2_page_reservation *res;
1486         pgoff_t index = pos >> PAGE_SHIFT;
1487         unsigned offset = pos & (PAGE_SIZE - 1);
1488         struct page *page;
1489         int ret = -ENOMEM;
1490
1491         res = kmalloc(sizeof(*res), GFP_KERNEL);
1492         if (!res)
1493                 return -ENOMEM;
1494
1495         bch2_page_reservation_init(c, inode, res);
1496         *fsdata = res;
1497
1498         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1499
1500         page = grab_cache_page_write_begin(mapping, index);
1501         if (!page)
1502                 goto err_unlock;
1503
1504         if (PageUptodate(page))
1505                 goto out;
1506
1507         /* If we're writing entire page, don't need to read it in first: */
1508         if (len == PAGE_SIZE)
1509                 goto out;
1510
1511         if (!offset && pos + len >= inode->v.i_size) {
1512                 zero_user_segment(page, len, PAGE_SIZE);
1513                 flush_dcache_page(page);
1514                 goto out;
1515         }
1516
1517         if (index > inode->v.i_size >> PAGE_SHIFT) {
1518                 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1519                 flush_dcache_page(page);
1520                 goto out;
1521         }
1522 readpage:
1523         ret = bch2_read_single_page(page, mapping);
1524         if (ret)
1525                 goto err;
1526 out:
1527         if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
1528                 ret = bch2_page_state_set(c, inode_inum(inode), &page, 1);
1529                 if (ret)
1530                         goto err;
1531         }
1532
1533         ret = bch2_page_reservation_get(c, inode, page, res,
1534                                         offset, len, true);
1535         if (ret) {
1536                 if (!PageUptodate(page)) {
1537                         /*
1538                          * If the page hasn't been read in, we won't know if we
1539                          * actually need a reservation - we don't actually need
1540                          * to read here, we just need to check if the page is
1541                          * fully backed by uncompressed data:
1542                          */
1543                         goto readpage;
1544                 }
1545
1546                 goto err;
1547         }
1548
1549         *pagep = page;
1550         return 0;
1551 err:
1552         unlock_page(page);
1553         put_page(page);
1554         *pagep = NULL;
1555 err_unlock:
1556         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1557         kfree(res);
1558         *fsdata = NULL;
1559         return bch2_err_class(ret);
1560 }
1561
1562 int bch2_write_end(struct file *file, struct address_space *mapping,
1563                    loff_t pos, unsigned len, unsigned copied,
1564                    struct page *page, void *fsdata)
1565 {
1566         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1567         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1568         struct bch2_page_reservation *res = fsdata;
1569         unsigned offset = pos & (PAGE_SIZE - 1);
1570
1571         lockdep_assert_held(&inode->v.i_rwsem);
1572
1573         if (unlikely(copied < len && !PageUptodate(page))) {
1574                 /*
1575                  * The page needs to be read in, but that would destroy
1576                  * our partial write - simplest thing is to just force
1577                  * userspace to redo the write:
1578                  */
1579                 zero_user(page, 0, PAGE_SIZE);
1580                 flush_dcache_page(page);
1581                 copied = 0;
1582         }
1583
1584         spin_lock(&inode->v.i_lock);
1585         if (pos + copied > inode->v.i_size)
1586                 i_size_write(&inode->v, pos + copied);
1587         spin_unlock(&inode->v.i_lock);
1588
1589         if (copied) {
1590                 if (!PageUptodate(page))
1591                         SetPageUptodate(page);
1592
1593                 bch2_set_page_dirty(c, inode, page, res, offset, copied);
1594
1595                 inode->ei_last_dirtied = (unsigned long) current;
1596         }
1597
1598         unlock_page(page);
1599         put_page(page);
1600         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1601
1602         bch2_page_reservation_put(c, inode, res);
1603         kfree(res);
1604
1605         return copied;
1606 }
1607
1608 #define WRITE_BATCH_PAGES       32
1609
1610 static int __bch2_buffered_write(struct bch_inode_info *inode,
1611                                  struct address_space *mapping,
1612                                  struct iov_iter *iter,
1613                                  loff_t pos, unsigned len)
1614 {
1615         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1616         struct page *pages[WRITE_BATCH_PAGES];
1617         struct bch2_page_reservation res;
1618         unsigned long index = pos >> PAGE_SHIFT;
1619         unsigned offset = pos & (PAGE_SIZE - 1);
1620         unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1621         unsigned i, reserved = 0, set_dirty = 0;
1622         unsigned copied = 0, nr_pages_copied = 0;
1623         int ret = 0;
1624
1625         BUG_ON(!len);
1626         BUG_ON(nr_pages > ARRAY_SIZE(pages));
1627
1628         bch2_page_reservation_init(c, inode, &res);
1629
1630         for (i = 0; i < nr_pages; i++) {
1631                 pages[i] = grab_cache_page_write_begin(mapping, index + i);
1632                 if (!pages[i]) {
1633                         nr_pages = i;
1634                         if (!i) {
1635                                 ret = -ENOMEM;
1636                                 goto out;
1637                         }
1638                         len = min_t(unsigned, len,
1639                                     nr_pages * PAGE_SIZE - offset);
1640                         break;
1641                 }
1642         }
1643
1644         if (offset && !PageUptodate(pages[0])) {
1645                 ret = bch2_read_single_page(pages[0], mapping);
1646                 if (ret)
1647                         goto out;
1648         }
1649
1650         if ((pos + len) & (PAGE_SIZE - 1) &&
1651             !PageUptodate(pages[nr_pages - 1])) {
1652                 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1653                         zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1654                 } else {
1655                         ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1656                         if (ret)
1657                                 goto out;
1658                 }
1659         }
1660
1661         while (reserved < len) {
1662                 unsigned i = (offset + reserved) >> PAGE_SHIFT;
1663                 struct page *page = pages[i];
1664                 unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
1665                 unsigned pg_len = min_t(unsigned, len - reserved,
1666                                         PAGE_SIZE - pg_offset);
1667
1668                 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
1669                         ret = bch2_page_state_set(c, inode_inum(inode),
1670                                                   pages + i, nr_pages - i);
1671                         if (ret)
1672                                 goto out;
1673                 }
1674
1675                 ret = bch2_page_reservation_get(c, inode, page, &res,
1676                                                 pg_offset, pg_len, true);
1677                 if (ret)
1678                         goto out;
1679
1680                 reserved += pg_len;
1681         }
1682
1683         if (mapping_writably_mapped(mapping))
1684                 for (i = 0; i < nr_pages; i++)
1685                         flush_dcache_page(pages[i]);
1686
1687         while (copied < len) {
1688                 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1689                 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1690                 unsigned pg_len = min_t(unsigned, len - copied,
1691                                         PAGE_SIZE - pg_offset);
1692                 unsigned pg_copied = copy_page_from_iter_atomic(page,
1693                                                 pg_offset, pg_len,iter);
1694
1695                 if (!pg_copied)
1696                         break;
1697
1698                 if (!PageUptodate(page) &&
1699                     pg_copied != PAGE_SIZE &&
1700                     pos + copied + pg_copied < inode->v.i_size) {
1701                         zero_user(page, 0, PAGE_SIZE);
1702                         break;
1703                 }
1704
1705                 flush_dcache_page(page);
1706                 copied += pg_copied;
1707
1708                 if (pg_copied != pg_len)
1709                         break;
1710         }
1711
1712         if (!copied)
1713                 goto out;
1714
1715         spin_lock(&inode->v.i_lock);
1716         if (pos + copied > inode->v.i_size)
1717                 i_size_write(&inode->v, pos + copied);
1718         spin_unlock(&inode->v.i_lock);
1719
1720         while (set_dirty < copied) {
1721                 struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
1722                 unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
1723                 unsigned pg_len = min_t(unsigned, copied - set_dirty,
1724                                         PAGE_SIZE - pg_offset);
1725
1726                 if (!PageUptodate(page))
1727                         SetPageUptodate(page);
1728
1729                 bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len);
1730                 unlock_page(page);
1731                 put_page(page);
1732
1733                 set_dirty += pg_len;
1734         }
1735
1736         nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1737         inode->ei_last_dirtied = (unsigned long) current;
1738 out:
1739         for (i = nr_pages_copied; i < nr_pages; i++) {
1740                 unlock_page(pages[i]);
1741                 put_page(pages[i]);
1742         }
1743
1744         bch2_page_reservation_put(c, inode, &res);
1745
1746         return copied ?: ret;
1747 }
1748
1749 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1750 {
1751         struct file *file = iocb->ki_filp;
1752         struct address_space *mapping = file->f_mapping;
1753         struct bch_inode_info *inode = file_bch_inode(file);
1754         loff_t pos = iocb->ki_pos;
1755         ssize_t written = 0;
1756         int ret = 0;
1757
1758         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1759
1760         do {
1761                 unsigned offset = pos & (PAGE_SIZE - 1);
1762                 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1763                               PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1764 again:
1765                 /*
1766                  * Bring in the user page that we will copy from _first_.
1767                  * Otherwise there's a nasty deadlock on copying from the
1768                  * same page as we're writing to, without it being marked
1769                  * up-to-date.
1770                  *
1771                  * Not only is this an optimisation, but it is also required
1772                  * to check that the address is actually valid, when atomic
1773                  * usercopies are used, below.
1774                  */
1775                 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1776                         bytes = min_t(unsigned long, iov_iter_count(iter),
1777                                       PAGE_SIZE - offset);
1778
1779                         if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1780                                 ret = -EFAULT;
1781                                 break;
1782                         }
1783                 }
1784
1785                 if (unlikely(fatal_signal_pending(current))) {
1786                         ret = -EINTR;
1787                         break;
1788                 }
1789
1790                 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1791                 if (unlikely(ret < 0))
1792                         break;
1793
1794                 cond_resched();
1795
1796                 if (unlikely(ret == 0)) {
1797                         /*
1798                          * If we were unable to copy any data at all, we must
1799                          * fall back to a single segment length write.
1800                          *
1801                          * If we didn't fallback here, we could livelock
1802                          * because not all segments in the iov can be copied at
1803                          * once without a pagefault.
1804                          */
1805                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
1806                                       iov_iter_single_seg_count(iter));
1807                         goto again;
1808                 }
1809                 pos += ret;
1810                 written += ret;
1811                 ret = 0;
1812
1813                 balance_dirty_pages_ratelimited(mapping);
1814         } while (iov_iter_count(iter));
1815
1816         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1817
1818         return written ? written : ret;
1819 }
1820
1821 /* O_DIRECT reads */
1822
1823 static void bio_check_or_release(struct bio *bio, bool check_dirty)
1824 {
1825         if (check_dirty) {
1826                 bio_check_pages_dirty(bio);
1827         } else {
1828                 bio_release_pages(bio, false);
1829                 bio_put(bio);
1830         }
1831 }
1832
1833 static void bch2_dio_read_complete(struct closure *cl)
1834 {
1835         struct dio_read *dio = container_of(cl, struct dio_read, cl);
1836
1837         dio->req->ki_complete(dio->req, dio->ret);
1838         bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
1839 }
1840
1841 static void bch2_direct_IO_read_endio(struct bio *bio)
1842 {
1843         struct dio_read *dio = bio->bi_private;
1844
1845         if (bio->bi_status)
1846                 dio->ret = blk_status_to_errno(bio->bi_status);
1847
1848         closure_put(&dio->cl);
1849 }
1850
1851 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1852 {
1853         struct dio_read *dio = bio->bi_private;
1854         bool should_dirty = dio->should_dirty;
1855
1856         bch2_direct_IO_read_endio(bio);
1857         bio_check_or_release(bio, should_dirty);
1858 }
1859
1860 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1861 {
1862         struct file *file = req->ki_filp;
1863         struct bch_inode_info *inode = file_bch_inode(file);
1864         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1865         struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1866         struct dio_read *dio;
1867         struct bio *bio;
1868         loff_t offset = req->ki_pos;
1869         bool sync = is_sync_kiocb(req);
1870         size_t shorten;
1871         ssize_t ret;
1872
1873         if ((offset|iter->count) & (block_bytes(c) - 1))
1874                 return -EINVAL;
1875
1876         ret = min_t(loff_t, iter->count,
1877                     max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1878
1879         if (!ret)
1880                 return ret;
1881
1882         shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1883         iter->count -= shorten;
1884
1885         bio = bio_alloc_bioset(NULL,
1886                                bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
1887                                REQ_OP_READ,
1888                                GFP_KERNEL,
1889                                &c->dio_read_bioset);
1890
1891         bio->bi_end_io = bch2_direct_IO_read_endio;
1892
1893         dio = container_of(bio, struct dio_read, rbio.bio);
1894         closure_init(&dio->cl, NULL);
1895
1896         /*
1897          * this is a _really_ horrible hack just to avoid an atomic sub at the
1898          * end:
1899          */
1900         if (!sync) {
1901                 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1902                 atomic_set(&dio->cl.remaining,
1903                            CLOSURE_REMAINING_INITIALIZER -
1904                            CLOSURE_RUNNING +
1905                            CLOSURE_DESTRUCTOR);
1906         } else {
1907                 atomic_set(&dio->cl.remaining,
1908                            CLOSURE_REMAINING_INITIALIZER + 1);
1909         }
1910
1911         dio->req        = req;
1912         dio->ret        = ret;
1913         /*
1914          * This is one of the sketchier things I've encountered: we have to skip
1915          * the dirtying of requests that are internal from the kernel (i.e. from
1916          * loopback), because we'll deadlock on page_lock.
1917          */
1918         dio->should_dirty = iter_is_iovec(iter);
1919
1920         goto start;
1921         while (iter->count) {
1922                 bio = bio_alloc_bioset(NULL,
1923                                        bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
1924                                        REQ_OP_READ,
1925                                        GFP_KERNEL,
1926                                        &c->bio_read);
1927                 bio->bi_end_io          = bch2_direct_IO_read_split_endio;
1928 start:
1929                 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1930                 bio->bi_iter.bi_sector  = offset >> 9;
1931                 bio->bi_private         = dio;
1932
1933                 ret = bio_iov_iter_get_pages(bio, iter);
1934                 if (ret < 0) {
1935                         /* XXX: fault inject this path */
1936                         bio->bi_status = BLK_STS_RESOURCE;
1937                         bio_endio(bio);
1938                         break;
1939                 }
1940
1941                 offset += bio->bi_iter.bi_size;
1942
1943                 if (dio->should_dirty)
1944                         bio_set_pages_dirty(bio);
1945
1946                 if (iter->count)
1947                         closure_get(&dio->cl);
1948
1949                 bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
1950         }
1951
1952         iter->count += shorten;
1953
1954         if (sync) {
1955                 closure_sync(&dio->cl);
1956                 closure_debug_destroy(&dio->cl);
1957                 ret = dio->ret;
1958                 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
1959                 return ret;
1960         } else {
1961                 return -EIOCBQUEUED;
1962         }
1963 }
1964
1965 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1966 {
1967         struct file *file = iocb->ki_filp;
1968         struct bch_inode_info *inode = file_bch_inode(file);
1969         struct address_space *mapping = file->f_mapping;
1970         size_t count = iov_iter_count(iter);
1971         ssize_t ret;
1972
1973         if (!count)
1974                 return 0; /* skip atime */
1975
1976         if (iocb->ki_flags & IOCB_DIRECT) {
1977                 struct blk_plug plug;
1978
1979                 ret = filemap_write_and_wait_range(mapping,
1980                                         iocb->ki_pos,
1981                                         iocb->ki_pos + count - 1);
1982                 if (ret < 0)
1983                         goto out;
1984
1985                 file_accessed(file);
1986
1987                 blk_start_plug(&plug);
1988                 ret = bch2_direct_IO_read(iocb, iter);
1989                 blk_finish_plug(&plug);
1990
1991                 if (ret >= 0)
1992                         iocb->ki_pos += ret;
1993         } else {
1994                 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1995                 ret = generic_file_read_iter(iocb, iter);
1996                 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1997         }
1998 out:
1999         return bch2_err_class(ret);
2000 }
2001
2002 /* O_DIRECT writes */
2003
2004 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
2005                                        u64 offset, u64 size,
2006                                        unsigned nr_replicas, bool compressed)
2007 {
2008         struct btree_trans trans;
2009         struct btree_iter iter;
2010         struct bkey_s_c k;
2011         u64 end = offset + size;
2012         u32 snapshot;
2013         bool ret = true;
2014         int err;
2015
2016         bch2_trans_init(&trans, c, 0, 0);
2017 retry:
2018         bch2_trans_begin(&trans);
2019
2020         err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
2021         if (err)
2022                 goto err;
2023
2024         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
2025                            SPOS(inum.inum, offset, snapshot),
2026                            BTREE_ITER_SLOTS, k, err) {
2027                 if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0)
2028                         break;
2029
2030                 if (k.k->p.snapshot != snapshot ||
2031                     nr_replicas > bch2_bkey_replicas(c, k) ||
2032                     (!compressed && bch2_bkey_sectors_compressed(k))) {
2033                         ret = false;
2034                         break;
2035                 }
2036         }
2037
2038         offset = iter.pos.offset;
2039         bch2_trans_iter_exit(&trans, &iter);
2040 err:
2041         if (bch2_err_matches(err, BCH_ERR_transaction_restart))
2042                 goto retry;
2043         bch2_trans_exit(&trans);
2044
2045         return err ? false : ret;
2046 }
2047
2048 static void bch2_dio_write_loop_async(struct bch_write_op *);
2049
2050 static long bch2_dio_write_loop(struct dio_write *dio)
2051 {
2052         bool kthread = (current->flags & PF_KTHREAD) != 0;
2053         struct kiocb *req = dio->req;
2054         struct address_space *mapping = req->ki_filp->f_mapping;
2055         struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
2056         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2057         struct bio *bio = &dio->op.wbio.bio;
2058         struct bvec_iter_all iter;
2059         struct bio_vec *bv;
2060         unsigned unaligned, iter_count;
2061         bool sync = dio->sync, dropped_locks;
2062         long ret;
2063
2064         if (dio->loop)
2065                 goto loop;
2066
2067         while (1) {
2068                 iter_count = dio->iter.count;
2069
2070                 if (kthread && dio->mm)
2071                         kthread_use_mm(dio->mm);
2072                 BUG_ON(current->faults_disabled_mapping);
2073                 current->faults_disabled_mapping = mapping;
2074
2075                 ret = bio_iov_iter_get_pages(bio, &dio->iter);
2076
2077                 dropped_locks = fdm_dropped_locks();
2078
2079                 current->faults_disabled_mapping = NULL;
2080                 if (kthread && dio->mm)
2081                         kthread_unuse_mm(dio->mm);
2082
2083                 /*
2084                  * If the fault handler returned an error but also signalled
2085                  * that it dropped & retook ei_pagecache_lock, we just need to
2086                  * re-shoot down the page cache and retry:
2087                  */
2088                 if (dropped_locks && ret)
2089                         ret = 0;
2090
2091                 if (unlikely(ret < 0))
2092                         goto err;
2093
2094                 if (unlikely(dropped_locks)) {
2095                         ret = write_invalidate_inode_pages_range(mapping,
2096                                         req->ki_pos,
2097                                         req->ki_pos + iter_count - 1);
2098                         if (unlikely(ret))
2099                                 goto err;
2100
2101                         if (!bio->bi_iter.bi_size)
2102                                 continue;
2103                 }
2104
2105                 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
2106                 bio->bi_iter.bi_size -= unaligned;
2107                 iov_iter_revert(&dio->iter, unaligned);
2108
2109                 if (!bio->bi_iter.bi_size) {
2110                         /*
2111                          * bio_iov_iter_get_pages was only able to get <
2112                          * blocksize worth of pages:
2113                          */
2114                         ret = -EFAULT;
2115                         goto err;
2116                 }
2117
2118                 bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
2119                 dio->op.end_io          = bch2_dio_write_loop_async;
2120                 dio->op.target          = dio->op.opts.foreground_target;
2121                 dio->op.write_point     = writepoint_hashed((unsigned long) current);
2122                 dio->op.nr_replicas     = dio->op.opts.data_replicas;
2123                 dio->op.subvol          = inode->ei_subvol;
2124                 dio->op.pos             = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
2125
2126                 if ((req->ki_flags & IOCB_DSYNC) &&
2127                     !c->opts.journal_flush_disabled)
2128                         dio->op.flags |= BCH_WRITE_FLUSH;
2129                 dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
2130
2131                 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
2132                                                 dio->op.opts.data_replicas, 0);
2133                 if (unlikely(ret) &&
2134                     !bch2_check_range_allocated(c, inode_inum(inode),
2135                                 dio->op.pos.offset, bio_sectors(bio),
2136                                 dio->op.opts.data_replicas,
2137                                 dio->op.opts.compression != 0))
2138                         goto err;
2139
2140                 task_io_account_write(bio->bi_iter.bi_size);
2141
2142                 if (!dio->sync && !dio->loop && dio->iter.count) {
2143                         struct iovec *iov = dio->inline_vecs;
2144
2145                         if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
2146                                 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
2147                                               GFP_KERNEL);
2148                                 if (unlikely(!iov)) {
2149                                         dio->sync = sync = true;
2150                                         goto do_io;
2151                                 }
2152
2153                                 dio->free_iov = true;
2154                         }
2155
2156                         memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
2157                         dio->iter.iov = iov;
2158                 }
2159 do_io:
2160                 dio->loop = true;
2161                 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
2162
2163                 if (sync)
2164                         wait_for_completion(&dio->done);
2165                 else
2166                         return -EIOCBQUEUED;
2167 loop:
2168                 i_sectors_acct(c, inode, &dio->quota_res,
2169                                dio->op.i_sectors_delta);
2170                 req->ki_pos += (u64) dio->op.written << 9;
2171                 dio->written += dio->op.written;
2172
2173                 spin_lock(&inode->v.i_lock);
2174                 if (req->ki_pos > inode->v.i_size)
2175                         i_size_write(&inode->v, req->ki_pos);
2176                 spin_unlock(&inode->v.i_lock);
2177
2178                 if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
2179                         bio_for_each_segment_all(bv, bio, iter)
2180                                 put_page(bv->bv_page);
2181                 bio->bi_vcnt = 0;
2182
2183                 if (dio->op.error) {
2184                         set_bit(EI_INODE_ERROR, &inode->ei_flags);
2185                         break;
2186                 }
2187
2188                 if (!dio->iter.count)
2189                         break;
2190
2191                 bio_reset(bio, NULL, REQ_OP_WRITE);
2192                 reinit_completion(&dio->done);
2193         }
2194
2195         ret = dio->op.error ?: ((long) dio->written << 9);
2196 err:
2197         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2198         bch2_quota_reservation_put(c, inode, &dio->quota_res);
2199
2200         if (dio->free_iov)
2201                 kfree(dio->iter.iov);
2202
2203         if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
2204                 bio_for_each_segment_all(bv, bio, iter)
2205                         put_page(bv->bv_page);
2206         bio_put(bio);
2207
2208         /* inode->i_dio_count is our ref on inode and thus bch_fs */
2209         inode_dio_end(&inode->v);
2210
2211         if (ret < 0)
2212                 ret = bch2_err_class(ret);
2213
2214         if (!sync) {
2215                 req->ki_complete(req, ret);
2216                 ret = -EIOCBQUEUED;
2217         }
2218         return ret;
2219 }
2220
2221 static void bch2_dio_write_loop_async(struct bch_write_op *op)
2222 {
2223         struct dio_write *dio = container_of(op, struct dio_write, op);
2224
2225         if (dio->sync)
2226                 complete(&dio->done);
2227         else
2228                 bch2_dio_write_loop(dio);
2229 }
2230
2231 static noinline
2232 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
2233 {
2234         struct file *file = req->ki_filp;
2235         struct address_space *mapping = file->f_mapping;
2236         struct bch_inode_info *inode = file_bch_inode(file);
2237         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2238         struct dio_write *dio;
2239         struct bio *bio;
2240         bool locked = true, extending;
2241         ssize_t ret;
2242
2243         prefetch(&c->opts);
2244         prefetch((void *) &c->opts + 64);
2245         prefetch(&inode->ei_inode);
2246         prefetch((void *) &inode->ei_inode + 64);
2247
2248         inode_lock(&inode->v);
2249
2250         ret = generic_write_checks(req, iter);
2251         if (unlikely(ret <= 0))
2252                 goto err;
2253
2254         ret = file_remove_privs(file);
2255         if (unlikely(ret))
2256                 goto err;
2257
2258         ret = file_update_time(file);
2259         if (unlikely(ret))
2260                 goto err;
2261
2262         if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
2263                 goto err;
2264
2265         inode_dio_begin(&inode->v);
2266         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2267
2268         extending = req->ki_pos + iter->count > inode->v.i_size;
2269         if (!extending) {
2270                 inode_unlock(&inode->v);
2271                 locked = false;
2272         }
2273
2274         bio = bio_alloc_bioset(NULL,
2275                                bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2276                                REQ_OP_WRITE,
2277                                GFP_KERNEL,
2278                                &c->dio_write_bioset);
2279         dio = container_of(bio, struct dio_write, op.wbio.bio);
2280         init_completion(&dio->done);
2281         dio->req                = req;
2282         dio->mm                 = current->mm;
2283         dio->loop               = false;
2284         dio->sync               = is_sync_kiocb(req) || extending;
2285         dio->free_iov           = false;
2286         dio->quota_res.sectors  = 0;
2287         dio->written            = 0;
2288         dio->iter               = *iter;
2289
2290         ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2291                                          iter->count >> 9, true);
2292         if (unlikely(ret))
2293                 goto err_put_bio;
2294
2295         ret = write_invalidate_inode_pages_range(mapping,
2296                                         req->ki_pos,
2297                                         req->ki_pos + iter->count - 1);
2298         if (unlikely(ret))
2299                 goto err_put_bio;
2300
2301         ret = bch2_dio_write_loop(dio);
2302 err:
2303         if (locked)
2304                 inode_unlock(&inode->v);
2305         return ret;
2306 err_put_bio:
2307         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2308         bch2_quota_reservation_put(c, inode, &dio->quota_res);
2309         bio_put(bio);
2310         inode_dio_end(&inode->v);
2311         goto err;
2312 }
2313
2314 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2315 {
2316         struct file *file = iocb->ki_filp;
2317         struct bch_inode_info *inode = file_bch_inode(file);
2318         ssize_t ret;
2319
2320         if (iocb->ki_flags & IOCB_DIRECT) {
2321                 ret = bch2_direct_write(iocb, from);
2322                 goto out;
2323         }
2324
2325         /* We can write back this queue in page reclaim */
2326         current->backing_dev_info = inode_to_bdi(&inode->v);
2327         inode_lock(&inode->v);
2328
2329         ret = generic_write_checks(iocb, from);
2330         if (ret <= 0)
2331                 goto unlock;
2332
2333         ret = file_remove_privs(file);
2334         if (ret)
2335                 goto unlock;
2336
2337         ret = file_update_time(file);
2338         if (ret)
2339                 goto unlock;
2340
2341         ret = bch2_buffered_write(iocb, from);
2342         if (likely(ret > 0))
2343                 iocb->ki_pos += ret;
2344 unlock:
2345         inode_unlock(&inode->v);
2346         current->backing_dev_info = NULL;
2347
2348         if (ret > 0)
2349                 ret = generic_write_sync(iocb, ret);
2350 out:
2351         return bch2_err_class(ret);
2352 }
2353
2354 /* fsync: */
2355
2356 /*
2357  * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
2358  * insert trigger: look up the btree inode instead
2359  */
2360 static int bch2_flush_inode(struct bch_fs *c, subvol_inum inum)
2361 {
2362         struct bch_inode_unpacked inode;
2363         int ret;
2364
2365         if (c->opts.journal_flush_disabled)
2366                 return 0;
2367
2368         ret = bch2_inode_find_by_inum(c, inum, &inode);
2369         if (ret)
2370                 return ret;
2371
2372         return bch2_journal_flush_seq(&c->journal, inode.bi_journal_seq);
2373 }
2374
2375 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2376 {
2377         struct bch_inode_info *inode = file_bch_inode(file);
2378         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2379         int ret, ret2, ret3;
2380
2381         ret = file_write_and_wait_range(file, start, end);
2382         ret2 = sync_inode_metadata(&inode->v, 1);
2383         ret3 = bch2_flush_inode(c, inode_inum(inode));
2384
2385         return bch2_err_class(ret ?: ret2 ?: ret3);
2386 }
2387
2388 /* truncate: */
2389
2390 static inline int range_has_data(struct bch_fs *c, u32 subvol,
2391                                  struct bpos start,
2392                                  struct bpos end)
2393 {
2394         struct btree_trans trans;
2395         struct btree_iter iter;
2396         struct bkey_s_c k;
2397         int ret = 0;
2398
2399         bch2_trans_init(&trans, c, 0, 0);
2400 retry:
2401         bch2_trans_begin(&trans);
2402
2403         ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
2404         if (ret)
2405                 goto err;
2406
2407         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
2408                 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2409                         break;
2410
2411                 if (bkey_extent_is_data(k.k)) {
2412                         ret = 1;
2413                         break;
2414                 }
2415         }
2416         start = iter.pos;
2417         bch2_trans_iter_exit(&trans, &iter);
2418 err:
2419         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2420                 goto retry;
2421
2422         bch2_trans_exit(&trans);
2423         return ret;
2424 }
2425
2426 static int __bch2_truncate_page(struct bch_inode_info *inode,
2427                                 pgoff_t index, loff_t start, loff_t end)
2428 {
2429         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2430         struct address_space *mapping = inode->v.i_mapping;
2431         struct bch_page_state *s;
2432         unsigned start_offset = start & (PAGE_SIZE - 1);
2433         unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2434         unsigned i;
2435         struct page *page;
2436         s64 i_sectors_delta = 0;
2437         int ret = 0;
2438
2439         /* Page boundary? Nothing to do */
2440         if (!((index == start >> PAGE_SHIFT && start_offset) ||
2441               (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2442                 return 0;
2443
2444         /* Above i_size? */
2445         if (index << PAGE_SHIFT >= inode->v.i_size)
2446                 return 0;
2447
2448         page = find_lock_page(mapping, index);
2449         if (!page) {
2450                 /*
2451                  * XXX: we're doing two index lookups when we end up reading the
2452                  * page
2453                  */
2454                 ret = range_has_data(c, inode->ei_subvol,
2455                                 POS(inode->v.i_ino, index << PAGE_SECTORS_SHIFT),
2456                                 POS(inode->v.i_ino, (index + 1) << PAGE_SECTORS_SHIFT));
2457                 if (ret <= 0)
2458                         return ret;
2459
2460                 page = find_or_create_page(mapping, index, GFP_KERNEL);
2461                 if (unlikely(!page)) {
2462                         ret = -ENOMEM;
2463                         goto out;
2464                 }
2465         }
2466
2467         s = bch2_page_state_create(page, 0);
2468         if (!s) {
2469                 ret = -ENOMEM;
2470                 goto unlock;
2471         }
2472
2473         if (!PageUptodate(page)) {
2474                 ret = bch2_read_single_page(page, mapping);
2475                 if (ret)
2476                         goto unlock;
2477         }
2478
2479         if (index != start >> PAGE_SHIFT)
2480                 start_offset = 0;
2481         if (index != end >> PAGE_SHIFT)
2482                 end_offset = PAGE_SIZE;
2483
2484         for (i = round_up(start_offset, block_bytes(c)) >> 9;
2485              i < round_down(end_offset, block_bytes(c)) >> 9;
2486              i++) {
2487                 s->s[i].nr_replicas     = 0;
2488                 if (s->s[i].state == SECTOR_DIRTY)
2489                         i_sectors_delta--;
2490                 s->s[i].state           = SECTOR_UNALLOCATED;
2491         }
2492
2493         i_sectors_acct(c, inode, NULL, i_sectors_delta);
2494
2495         /*
2496          * Caller needs to know whether this page will be written out by
2497          * writeback - doing an i_size update if necessary - or whether it will
2498          * be responsible for the i_size update:
2499          */
2500         ret = s->s[(min_t(u64, inode->v.i_size - (index << PAGE_SHIFT),
2501                           PAGE_SIZE) - 1) >> 9].state >= SECTOR_DIRTY;
2502
2503         zero_user_segment(page, start_offset, end_offset);
2504
2505         /*
2506          * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2507          *
2508          * XXX: because we aren't currently tracking whether the page has actual
2509          * data in it (vs. just 0s, or only partially written) this wrong. ick.
2510          */
2511         BUG_ON(bch2_get_page_disk_reservation(c, inode, page, false));
2512
2513         /*
2514          * This removes any writeable userspace mappings; we need to force
2515          * .page_mkwrite to be called again before any mmapped writes, to
2516          * redirty the full page:
2517          */
2518         page_mkclean(page);
2519         __set_page_dirty_nobuffers(page);
2520 unlock:
2521         unlock_page(page);
2522         put_page(page);
2523 out:
2524         return ret;
2525 }
2526
2527 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2528 {
2529         return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2530                                     from, round_up(from, PAGE_SIZE));
2531 }
2532
2533 static int bch2_truncate_pages(struct bch_inode_info *inode,
2534                                loff_t start, loff_t end)
2535 {
2536         int ret = __bch2_truncate_page(inode, start >> PAGE_SHIFT,
2537                                        start, end);
2538
2539         if (ret >= 0 &&
2540             start >> PAGE_SHIFT != end >> PAGE_SHIFT)
2541                 ret = __bch2_truncate_page(inode,
2542                                            end >> PAGE_SHIFT,
2543                                            start, end);
2544         return ret;
2545 }
2546
2547 static int bch2_extend(struct user_namespace *mnt_userns,
2548                        struct bch_inode_info *inode,
2549                        struct bch_inode_unpacked *inode_u,
2550                        struct iattr *iattr)
2551 {
2552         struct address_space *mapping = inode->v.i_mapping;
2553         int ret;
2554
2555         /*
2556          * sync appends:
2557          *
2558          * this has to be done _before_ extending i_size:
2559          */
2560         ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2561         if (ret)
2562                 return ret;
2563
2564         truncate_setsize(&inode->v, iattr->ia_size);
2565
2566         return bch2_setattr_nonsize(mnt_userns, inode, iattr);
2567 }
2568
2569 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2570                                    struct bch_inode_unpacked *bi,
2571                                    void *p)
2572 {
2573         bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2574         return 0;
2575 }
2576
2577 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2578                                   struct bch_inode_unpacked *bi, void *p)
2579 {
2580         u64 *new_i_size = p;
2581
2582         bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2583         bi->bi_size = *new_i_size;
2584         return 0;
2585 }
2586
2587 int bch2_truncate(struct user_namespace *mnt_userns,
2588                   struct bch_inode_info *inode, struct iattr *iattr)
2589 {
2590         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2591         struct address_space *mapping = inode->v.i_mapping;
2592         struct bch_inode_unpacked inode_u;
2593         u64 new_i_size = iattr->ia_size;
2594         s64 i_sectors_delta = 0;
2595         int ret = 0;
2596
2597         /*
2598          * If the truncate call with change the size of the file, the
2599          * cmtimes should be updated. If the size will not change, we
2600          * do not need to update the cmtimes.
2601          */
2602         if (iattr->ia_size != inode->v.i_size) {
2603                 if (!(iattr->ia_valid & ATTR_MTIME))
2604                         ktime_get_coarse_real_ts64(&iattr->ia_mtime);
2605                 if (!(iattr->ia_valid & ATTR_CTIME))
2606                         ktime_get_coarse_real_ts64(&iattr->ia_ctime);
2607                 iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
2608         }
2609
2610         inode_dio_wait(&inode->v);
2611         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2612
2613         ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
2614         if (ret)
2615                 goto err;
2616
2617         /*
2618          * check this before next assertion; on filesystem error our normal
2619          * invariants are a bit broken (truncate has to truncate the page cache
2620          * before the inode).
2621          */
2622         ret = bch2_journal_error(&c->journal);
2623         if (ret)
2624                 goto err;
2625
2626         WARN_ON(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
2627                 inode->v.i_size < inode_u.bi_size);
2628
2629         if (iattr->ia_size > inode->v.i_size) {
2630                 ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
2631                 goto err;
2632         }
2633
2634         iattr->ia_valid &= ~ATTR_SIZE;
2635
2636         ret = bch2_truncate_page(inode, iattr->ia_size);
2637         if (unlikely(ret < 0))
2638                 goto err;
2639
2640         /*
2641          * When extending, we're going to write the new i_size to disk
2642          * immediately so we need to flush anything above the current on disk
2643          * i_size first:
2644          *
2645          * Also, when extending we need to flush the page that i_size currently
2646          * straddles - if it's mapped to userspace, we need to ensure that
2647          * userspace has to redirty it and call .mkwrite -> set_page_dirty
2648          * again to allocate the part of the page that was extended.
2649          */
2650         if (iattr->ia_size > inode_u.bi_size)
2651                 ret = filemap_write_and_wait_range(mapping,
2652                                 inode_u.bi_size,
2653                                 iattr->ia_size - 1);
2654         else if (iattr->ia_size & (PAGE_SIZE - 1))
2655                 ret = filemap_write_and_wait_range(mapping,
2656                                 round_down(iattr->ia_size, PAGE_SIZE),
2657                                 iattr->ia_size - 1);
2658         if (ret)
2659                 goto err;
2660
2661         mutex_lock(&inode->ei_update_lock);
2662         ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2663                                &new_i_size, 0);
2664         mutex_unlock(&inode->ei_update_lock);
2665
2666         if (unlikely(ret))
2667                 goto err;
2668
2669         truncate_setsize(&inode->v, iattr->ia_size);
2670
2671         ret = bch2_fpunch(c, inode_inum(inode),
2672                         round_up(iattr->ia_size, block_bytes(c)) >> 9,
2673                         U64_MAX, &i_sectors_delta);
2674         i_sectors_acct(c, inode, NULL, i_sectors_delta);
2675
2676         bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
2677                                 !bch2_journal_error(&c->journal), c,
2678                                 "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
2679                                 inode->v.i_ino, (u64) inode->v.i_blocks,
2680                                 inode->ei_inode.bi_sectors);
2681         if (unlikely(ret))
2682                 goto err;
2683
2684         mutex_lock(&inode->ei_update_lock);
2685         ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
2686         mutex_unlock(&inode->ei_update_lock);
2687
2688         ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
2689 err:
2690         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2691         return bch2_err_class(ret);
2692 }
2693
2694 /* fallocate: */
2695
2696 static int inode_update_times_fn(struct bch_inode_info *inode,
2697                                  struct bch_inode_unpacked *bi, void *p)
2698 {
2699         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2700
2701         bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2702         return 0;
2703 }
2704
2705 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2706 {
2707         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2708         u64 end         = offset + len;
2709         u64 block_start = round_up(offset, block_bytes(c));
2710         u64 block_end   = round_down(end, block_bytes(c));
2711         bool truncated_last_page;
2712         int ret = 0;
2713
2714         ret = bch2_truncate_pages(inode, offset, end);
2715         if (unlikely(ret < 0))
2716                 goto err;
2717
2718         truncated_last_page = ret;
2719
2720         truncate_pagecache_range(&inode->v, offset, end - 1);
2721
2722         if (block_start < block_end ) {
2723                 s64 i_sectors_delta = 0;
2724
2725                 ret = bch2_fpunch(c, inode_inum(inode),
2726                                   block_start >> 9, block_end >> 9,
2727                                   &i_sectors_delta);
2728                 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2729         }
2730
2731         mutex_lock(&inode->ei_update_lock);
2732         if (end >= inode->v.i_size && !truncated_last_page) {
2733                 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2734                                             ATTR_MTIME|ATTR_CTIME);
2735         } else {
2736                 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
2737                                        ATTR_MTIME|ATTR_CTIME);
2738         }
2739         mutex_unlock(&inode->ei_update_lock);
2740 err:
2741         return ret;
2742 }
2743
2744 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
2745                                    loff_t offset, loff_t len,
2746                                    bool insert)
2747 {
2748         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2749         struct address_space *mapping = inode->v.i_mapping;
2750         struct bkey_buf copy;
2751         struct btree_trans trans;
2752         struct btree_iter src, dst, del;
2753         loff_t shift, new_size;
2754         u64 src_start;
2755         int ret = 0;
2756
2757         if ((offset | len) & (block_bytes(c) - 1))
2758                 return -EINVAL;
2759
2760         if (insert) {
2761                 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
2762                         return -EFBIG;
2763
2764                 if (offset >= inode->v.i_size)
2765                         return -EINVAL;
2766
2767                 src_start       = U64_MAX;
2768                 shift           = len;
2769         } else {
2770                 if (offset + len >= inode->v.i_size)
2771                         return -EINVAL;
2772
2773                 src_start       = offset + len;
2774                 shift           = -len;
2775         }
2776
2777         new_size = inode->v.i_size + shift;
2778
2779         ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2780         if (ret)
2781                 return ret;
2782
2783         if (insert) {
2784                 i_size_write(&inode->v, new_size);
2785                 mutex_lock(&inode->ei_update_lock);
2786                 ret = bch2_write_inode_size(c, inode, new_size,
2787                                             ATTR_MTIME|ATTR_CTIME);
2788                 mutex_unlock(&inode->ei_update_lock);
2789         } else {
2790                 s64 i_sectors_delta = 0;
2791
2792                 ret = bch2_fpunch(c, inode_inum(inode),
2793                                   offset >> 9, (offset + len) >> 9,
2794                                   &i_sectors_delta);
2795                 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2796
2797                 if (ret)
2798                         return ret;
2799         }
2800
2801         bch2_bkey_buf_init(&copy);
2802         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
2803         bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
2804                         POS(inode->v.i_ino, src_start >> 9),
2805                         BTREE_ITER_INTENT);
2806         bch2_trans_copy_iter(&dst, &src);
2807         bch2_trans_copy_iter(&del, &src);
2808
2809         while (ret == 0 ||
2810                bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
2811                 struct disk_reservation disk_res =
2812                         bch2_disk_reservation_init(c, 0);
2813                 struct bkey_i delete;
2814                 struct bkey_s_c k;
2815                 struct bpos next_pos;
2816                 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
2817                 struct bpos atomic_end;
2818                 unsigned trigger_flags = 0;
2819                 u32 snapshot;
2820
2821                 bch2_trans_begin(&trans);
2822
2823                 ret = bch2_subvolume_get_snapshot(&trans,
2824                                         inode->ei_subvol, &snapshot);
2825                 if (ret)
2826                         continue;
2827
2828                 bch2_btree_iter_set_snapshot(&src, snapshot);
2829                 bch2_btree_iter_set_snapshot(&dst, snapshot);
2830                 bch2_btree_iter_set_snapshot(&del, snapshot);
2831
2832                 bch2_trans_begin(&trans);
2833
2834                 k = insert
2835                         ? bch2_btree_iter_peek_prev(&src)
2836                         : bch2_btree_iter_peek(&src);
2837                 if ((ret = bkey_err(k)))
2838                         continue;
2839
2840                 if (!k.k || k.k->p.inode != inode->v.i_ino)
2841                         break;
2842
2843                 if (insert &&
2844                     bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
2845                         break;
2846 reassemble:
2847                 bch2_bkey_buf_reassemble(&copy, c, k);
2848
2849                 if (insert &&
2850                     bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
2851                         bch2_cut_front(move_pos, copy.k);
2852
2853                 copy.k->k.p.offset += shift >> 9;
2854                 bch2_btree_iter_set_pos(&dst, bkey_start_pos(&copy.k->k));
2855
2856                 ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
2857                 if (ret)
2858                         continue;
2859
2860                 if (bkey_cmp(atomic_end, copy.k->k.p)) {
2861                         if (insert) {
2862                                 move_pos = atomic_end;
2863                                 move_pos.offset -= shift >> 9;
2864                                 goto reassemble;
2865                         } else {
2866                                 bch2_cut_back(atomic_end, copy.k);
2867                         }
2868                 }
2869
2870                 bkey_init(&delete.k);
2871                 delete.k.p = copy.k->k.p;
2872                 delete.k.size = copy.k->k.size;
2873                 delete.k.p.offset -= shift >> 9;
2874                 bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
2875
2876                 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
2877
2878                 if (copy.k->k.size != k.k->size) {
2879                         /* We might end up splitting compressed extents: */
2880                         unsigned nr_ptrs =
2881                                 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
2882
2883                         ret = bch2_disk_reservation_get(c, &disk_res,
2884                                         copy.k->k.size, nr_ptrs,
2885                                         BCH_DISK_RESERVATION_NOFAIL);
2886                         BUG_ON(ret);
2887                 }
2888
2889                 ret =   bch2_btree_iter_traverse(&del) ?:
2890                         bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
2891                         bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
2892                         bch2_trans_commit(&trans, &disk_res, NULL,
2893                                           BTREE_INSERT_NOFAIL);
2894                 bch2_disk_reservation_put(c, &disk_res);
2895
2896                 if (!ret)
2897                         bch2_btree_iter_set_pos(&src, next_pos);
2898         }
2899         bch2_trans_iter_exit(&trans, &del);
2900         bch2_trans_iter_exit(&trans, &dst);
2901         bch2_trans_iter_exit(&trans, &src);
2902         bch2_trans_exit(&trans);
2903         bch2_bkey_buf_exit(&copy, c);
2904
2905         if (ret)
2906                 return ret;
2907
2908         mutex_lock(&inode->ei_update_lock);
2909         if (!insert) {
2910                 i_size_write(&inode->v, new_size);
2911                 ret = bch2_write_inode_size(c, inode, new_size,
2912                                             ATTR_MTIME|ATTR_CTIME);
2913         } else {
2914                 /* We need an inode update to update bi_journal_seq for fsync: */
2915                 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
2916                                        ATTR_MTIME|ATTR_CTIME);
2917         }
2918         mutex_unlock(&inode->ei_update_lock);
2919         return ret;
2920 }
2921
2922 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
2923                              u64 start_sector, u64 end_sector)
2924 {
2925         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2926         struct btree_trans trans;
2927         struct btree_iter iter;
2928         struct bpos end_pos = POS(inode->v.i_ino, end_sector);
2929         unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
2930         int ret = 0;
2931
2932         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
2933
2934         bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
2935                         POS(inode->v.i_ino, start_sector),
2936                         BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2937
2938         while (!ret && bkey_cmp(iter.pos, end_pos) < 0) {
2939                 s64 i_sectors_delta = 0;
2940                 struct disk_reservation disk_res = { 0 };
2941                 struct quota_res quota_res = { 0 };
2942                 struct bkey_i_reservation reservation;
2943                 struct bkey_s_c k;
2944                 unsigned sectors;
2945                 u32 snapshot;
2946
2947                 bch2_trans_begin(&trans);
2948
2949                 ret = bch2_subvolume_get_snapshot(&trans,
2950                                         inode->ei_subvol, &snapshot);
2951                 if (ret)
2952                         goto bkey_err;
2953
2954                 bch2_btree_iter_set_snapshot(&iter, snapshot);
2955
2956                 k = bch2_btree_iter_peek_slot(&iter);
2957                 if ((ret = bkey_err(k)))
2958                         goto bkey_err;
2959
2960                 /* already reserved */
2961                 if (k.k->type == KEY_TYPE_reservation &&
2962                     bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2963                         bch2_btree_iter_advance(&iter);
2964                         continue;
2965                 }
2966
2967                 if (bkey_extent_is_data(k.k) &&
2968                     !(mode & FALLOC_FL_ZERO_RANGE)) {
2969                         bch2_btree_iter_advance(&iter);
2970                         continue;
2971                 }
2972
2973                 bkey_reservation_init(&reservation.k_i);
2974                 reservation.k.type      = KEY_TYPE_reservation;
2975                 reservation.k.p         = k.k->p;
2976                 reservation.k.size      = k.k->size;
2977
2978                 bch2_cut_front(iter.pos,        &reservation.k_i);
2979                 bch2_cut_back(end_pos,          &reservation.k_i);
2980
2981                 sectors = reservation.k.size;
2982                 reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
2983
2984                 if (!bkey_extent_is_allocation(k.k)) {
2985                         ret = bch2_quota_reservation_add(c, inode,
2986                                         &quota_res,
2987                                         sectors, true);
2988                         if (unlikely(ret))
2989                                 goto bkey_err;
2990                 }
2991
2992                 if (reservation.v.nr_replicas < replicas ||
2993                     bch2_bkey_sectors_compressed(k)) {
2994                         ret = bch2_disk_reservation_get(c, &disk_res, sectors,
2995                                                         replicas, 0);
2996                         if (unlikely(ret))
2997                                 goto bkey_err;
2998
2999                         reservation.v.nr_replicas = disk_res.nr_replicas;
3000                 }
3001
3002                 ret = bch2_extent_update(&trans, inode_inum(inode), &iter,
3003                                          &reservation.k_i,
3004                                 &disk_res, NULL,
3005                                 0, &i_sectors_delta, true);
3006                 if (ret)
3007                         goto bkey_err;
3008                 i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
3009 bkey_err:
3010                 bch2_quota_reservation_put(c, inode, &quota_res);
3011                 bch2_disk_reservation_put(c, &disk_res);
3012                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3013                         ret = 0;
3014         }
3015
3016         bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
3017         mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
3018
3019         if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
3020                 struct quota_res quota_res = { 0 };
3021                 s64 i_sectors_delta = 0;
3022
3023                 bch2_fpunch_at(&trans, &iter, inode_inum(inode),
3024                                end_sector, &i_sectors_delta);
3025                 i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
3026                 bch2_quota_reservation_put(c, inode, &quota_res);
3027         }
3028
3029         bch2_trans_iter_exit(&trans, &iter);
3030         bch2_trans_exit(&trans);
3031         return ret;
3032 }
3033
3034 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
3035                             loff_t offset, loff_t len)
3036 {
3037         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3038         u64 end         = offset + len;
3039         u64 block_start = round_down(offset,    block_bytes(c));
3040         u64 block_end   = round_up(end,         block_bytes(c));
3041         bool truncated_last_page = false;
3042         int ret, ret2 = 0;
3043
3044         if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
3045                 ret = inode_newsize_ok(&inode->v, end);
3046                 if (ret)
3047                         return ret;
3048         }
3049
3050         if (mode & FALLOC_FL_ZERO_RANGE) {
3051                 ret = bch2_truncate_pages(inode, offset, end);
3052                 if (unlikely(ret < 0))
3053                         return ret;
3054
3055                 truncated_last_page = ret;
3056
3057                 truncate_pagecache_range(&inode->v, offset, end - 1);
3058
3059                 block_start     = round_up(offset,      block_bytes(c));
3060                 block_end       = round_down(end,       block_bytes(c));
3061         }
3062
3063         ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
3064
3065         /*
3066          * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
3067          * so that the VFS cache i_size is consistent with the btree i_size:
3068          */
3069         if (ret &&
3070             !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
3071                 return ret;
3072
3073         if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
3074                 end = inode->v.i_size;
3075
3076         if (end >= inode->v.i_size &&
3077             (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
3078              !(mode & FALLOC_FL_KEEP_SIZE))) {
3079                 spin_lock(&inode->v.i_lock);
3080                 i_size_write(&inode->v, end);
3081                 spin_unlock(&inode->v.i_lock);
3082
3083                 mutex_lock(&inode->ei_update_lock);
3084                 ret2 = bch2_write_inode_size(c, inode, end, 0);
3085                 mutex_unlock(&inode->ei_update_lock);
3086         }
3087
3088         return ret ?: ret2;
3089 }
3090
3091 long bch2_fallocate_dispatch(struct file *file, int mode,
3092                              loff_t offset, loff_t len)
3093 {
3094         struct bch_inode_info *inode = file_bch_inode(file);
3095         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3096         long ret;
3097
3098         if (!percpu_ref_tryget_live(&c->writes))
3099                 return -EROFS;
3100
3101         inode_lock(&inode->v);
3102         inode_dio_wait(&inode->v);
3103         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
3104
3105         ret = file_modified(file);
3106         if (ret)
3107                 goto err;
3108
3109         if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
3110                 ret = bchfs_fallocate(inode, mode, offset, len);
3111         else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
3112                 ret = bchfs_fpunch(inode, offset, len);
3113         else if (mode == FALLOC_FL_INSERT_RANGE)
3114                 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
3115         else if (mode == FALLOC_FL_COLLAPSE_RANGE)
3116                 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
3117         else
3118                 ret = -EOPNOTSUPP;
3119 err:
3120         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
3121         inode_unlock(&inode->v);
3122         percpu_ref_put(&c->writes);
3123
3124         return bch2_err_class(ret);
3125 }
3126
3127 static int quota_reserve_range(struct bch_inode_info *inode,
3128                                struct quota_res *res,
3129                                u64 start, u64 end)
3130 {
3131         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3132         struct btree_trans trans;
3133         struct btree_iter iter;
3134         struct bkey_s_c k;
3135         u32 snapshot;
3136         u64 sectors = end - start;
3137         u64 pos = start;
3138         int ret;
3139
3140         bch2_trans_init(&trans, c, 0, 0);
3141 retry:
3142         bch2_trans_begin(&trans);
3143
3144         ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
3145         if (ret)
3146                 goto err;
3147
3148         bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
3149                              SPOS(inode->v.i_ino, pos, snapshot), 0);
3150
3151         while (!(ret = btree_trans_too_many_iters(&trans)) &&
3152                (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
3153                !(ret = bkey_err(k))) {
3154                 if (bkey_extent_is_allocation(k.k)) {
3155                         u64 s = min(end, k.k->p.offset) -
3156                                 max(start, bkey_start_offset(k.k));
3157                         BUG_ON(s > sectors);
3158                         sectors -= s;
3159                 }
3160                 bch2_btree_iter_advance(&iter);
3161         }
3162         pos = iter.pos.offset;
3163         bch2_trans_iter_exit(&trans, &iter);
3164 err:
3165         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3166                 goto retry;
3167
3168         bch2_trans_exit(&trans);
3169
3170         if (ret)
3171                 return ret;
3172
3173         return bch2_quota_reservation_add(c, inode, res, sectors, true);
3174 }
3175
3176 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
3177                              struct file *file_dst, loff_t pos_dst,
3178                              loff_t len, unsigned remap_flags)
3179 {
3180         struct bch_inode_info *src = file_bch_inode(file_src);
3181         struct bch_inode_info *dst = file_bch_inode(file_dst);
3182         struct bch_fs *c = src->v.i_sb->s_fs_info;
3183         struct quota_res quota_res = { 0 };
3184         s64 i_sectors_delta = 0;
3185         u64 aligned_len;
3186         loff_t ret = 0;
3187
3188         if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
3189                 return -EINVAL;
3190
3191         if (remap_flags & REMAP_FILE_DEDUP)
3192                 return -EOPNOTSUPP;
3193
3194         if ((pos_src & (block_bytes(c) - 1)) ||
3195             (pos_dst & (block_bytes(c) - 1)))
3196                 return -EINVAL;
3197
3198         if (src == dst &&
3199             abs(pos_src - pos_dst) < len)
3200                 return -EINVAL;
3201
3202         bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3203
3204         inode_dio_wait(&src->v);
3205         inode_dio_wait(&dst->v);
3206
3207         ret = generic_remap_file_range_prep(file_src, pos_src,
3208                                             file_dst, pos_dst,
3209                                             &len, remap_flags);
3210         if (ret < 0 || len == 0)
3211                 goto err;
3212
3213         aligned_len = round_up((u64) len, block_bytes(c));
3214
3215         ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
3216                                 pos_dst, pos_dst + len - 1);
3217         if (ret)
3218                 goto err;
3219
3220         ret = quota_reserve_range(dst, &quota_res, pos_dst >> 9,
3221                                   (pos_dst + aligned_len) >> 9);
3222         if (ret)
3223                 goto err;
3224
3225         file_update_time(file_dst);
3226
3227         mark_pagecache_unallocated(src, pos_src >> 9,
3228                                    (pos_src + aligned_len) >> 9);
3229
3230         ret = bch2_remap_range(c,
3231                                inode_inum(dst), pos_dst >> 9,
3232                                inode_inum(src), pos_src >> 9,
3233                                aligned_len >> 9,
3234                                pos_dst + len, &i_sectors_delta);
3235         if (ret < 0)
3236                 goto err;
3237
3238         /*
3239          * due to alignment, we might have remapped slightly more than requsted
3240          */
3241         ret = min((u64) ret << 9, (u64) len);
3242
3243         i_sectors_acct(c, dst, &quota_res, i_sectors_delta);
3244
3245         spin_lock(&dst->v.i_lock);
3246         if (pos_dst + ret > dst->v.i_size)
3247                 i_size_write(&dst->v, pos_dst + ret);
3248         spin_unlock(&dst->v.i_lock);
3249
3250         if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
3251             IS_SYNC(file_inode(file_dst)))
3252                 ret = bch2_flush_inode(c, inode_inum(dst));
3253 err:
3254         bch2_quota_reservation_put(c, dst, &quota_res);
3255         bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3256
3257         return bch2_err_class(ret);
3258 }
3259
3260 /* fseek: */
3261
3262 static int page_data_offset(struct page *page, unsigned offset)
3263 {
3264         struct bch_page_state *s = bch2_page_state(page);
3265         unsigned i;
3266
3267         if (s)
3268                 for (i = offset >> 9; i < PAGE_SECTORS; i++)
3269                         if (s->s[i].state >= SECTOR_DIRTY)
3270                                 return i << 9;
3271
3272         return -1;
3273 }
3274
3275 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
3276                                        loff_t start_offset,
3277                                        loff_t end_offset)
3278 {
3279         struct folio_batch fbatch;
3280         pgoff_t start_index     = start_offset >> PAGE_SHIFT;
3281         pgoff_t end_index       = end_offset >> PAGE_SHIFT;
3282         pgoff_t index           = start_index;
3283         unsigned i;
3284         loff_t ret;
3285         int offset;
3286
3287         folio_batch_init(&fbatch);
3288
3289         while (filemap_get_folios(vinode->i_mapping,
3290                                   &index, end_index, &fbatch)) {
3291                 for (i = 0; i < folio_batch_count(&fbatch); i++) {
3292                         struct folio *folio = fbatch.folios[i];
3293
3294                         folio_lock(folio);
3295
3296                         offset = page_data_offset(&folio->page,
3297                                         folio->index == start_index
3298                                         ? start_offset & (PAGE_SIZE - 1)
3299                                         : 0);
3300                         if (offset >= 0) {
3301                                 ret = clamp(((loff_t) folio->index << PAGE_SHIFT) +
3302                                             offset,
3303                                             start_offset, end_offset);
3304                                 folio_unlock(folio);
3305                                 folio_batch_release(&fbatch);
3306                                 return ret;
3307                         }
3308
3309                         folio_unlock(folio);
3310                 }
3311                 folio_batch_release(&fbatch);
3312                 cond_resched();
3313         }
3314
3315         return end_offset;
3316 }
3317
3318 static loff_t bch2_seek_data(struct file *file, u64 offset)
3319 {
3320         struct bch_inode_info *inode = file_bch_inode(file);
3321         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3322         struct btree_trans trans;
3323         struct btree_iter iter;
3324         struct bkey_s_c k;
3325         subvol_inum inum = inode_inum(inode);
3326         u64 isize, next_data = MAX_LFS_FILESIZE;
3327         u32 snapshot;
3328         int ret;
3329
3330         isize = i_size_read(&inode->v);
3331         if (offset >= isize)
3332                 return -ENXIO;
3333
3334         bch2_trans_init(&trans, c, 0, 0);
3335 retry:
3336         bch2_trans_begin(&trans);
3337
3338         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3339         if (ret)
3340                 goto err;
3341
3342         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3343                            SPOS(inode->v.i_ino, offset >> 9, snapshot), 0, k, ret) {
3344                 if (k.k->p.inode != inode->v.i_ino) {
3345                         break;
3346                 } else if (bkey_extent_is_data(k.k)) {
3347                         next_data = max(offset, bkey_start_offset(k.k) << 9);
3348                         break;
3349                 } else if (k.k->p.offset >> 9 > isize)
3350                         break;
3351         }
3352         bch2_trans_iter_exit(&trans, &iter);
3353 err:
3354         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3355                 goto retry;
3356
3357         bch2_trans_exit(&trans);
3358         if (ret)
3359                 return ret;
3360
3361         if (next_data > offset)
3362                 next_data = bch2_seek_pagecache_data(&inode->v,
3363                                                      offset, next_data);
3364
3365         if (next_data >= isize)
3366                 return -ENXIO;
3367
3368         return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
3369 }
3370
3371 static int __page_hole_offset(struct page *page, unsigned offset)
3372 {
3373         struct bch_page_state *s = bch2_page_state(page);
3374         unsigned i;
3375
3376         if (!s)
3377                 return 0;
3378
3379         for (i = offset >> 9; i < PAGE_SECTORS; i++)
3380                 if (s->s[i].state < SECTOR_DIRTY)
3381                         return i << 9;
3382
3383         return -1;
3384 }
3385
3386 static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
3387 {
3388         pgoff_t index = offset >> PAGE_SHIFT;
3389         struct page *page;
3390         int pg_offset;
3391         loff_t ret = -1;
3392
3393         page = find_lock_page(mapping, index);
3394         if (!page)
3395                 return offset;
3396
3397         pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
3398         if (pg_offset >= 0)
3399                 ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
3400
3401         unlock_page(page);
3402
3403         return ret;
3404 }
3405
3406 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3407                                        loff_t start_offset,
3408                                        loff_t end_offset)
3409 {
3410         struct address_space *mapping = vinode->i_mapping;
3411         loff_t offset = start_offset, hole;
3412
3413         while (offset < end_offset) {
3414                 hole = page_hole_offset(mapping, offset);
3415                 if (hole >= 0 && hole <= end_offset)
3416                         return max(start_offset, hole);
3417
3418                 offset += PAGE_SIZE;
3419                 offset &= PAGE_MASK;
3420         }
3421
3422         return end_offset;
3423 }
3424
3425 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3426 {
3427         struct bch_inode_info *inode = file_bch_inode(file);
3428         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3429         struct btree_trans trans;
3430         struct btree_iter iter;
3431         struct bkey_s_c k;
3432         subvol_inum inum = inode_inum(inode);
3433         u64 isize, next_hole = MAX_LFS_FILESIZE;
3434         u32 snapshot;
3435         int ret;
3436
3437         isize = i_size_read(&inode->v);
3438         if (offset >= isize)
3439                 return -ENXIO;
3440
3441         bch2_trans_init(&trans, c, 0, 0);
3442 retry:
3443         bch2_trans_begin(&trans);
3444
3445         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3446         if (ret)
3447                 goto err;
3448
3449         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3450                            SPOS(inode->v.i_ino, offset >> 9, snapshot),
3451                            BTREE_ITER_SLOTS, k, ret) {
3452                 if (k.k->p.inode != inode->v.i_ino) {
3453                         next_hole = bch2_seek_pagecache_hole(&inode->v,
3454                                         offset, MAX_LFS_FILESIZE);
3455                         break;
3456                 } else if (!bkey_extent_is_data(k.k)) {
3457                         next_hole = bch2_seek_pagecache_hole(&inode->v,
3458                                         max(offset, bkey_start_offset(k.k) << 9),
3459                                         k.k->p.offset << 9);
3460
3461                         if (next_hole < k.k->p.offset << 9)
3462                                 break;
3463                 } else {
3464                         offset = max(offset, bkey_start_offset(k.k) << 9);
3465                 }
3466         }
3467         bch2_trans_iter_exit(&trans, &iter);
3468 err:
3469         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3470                 goto retry;
3471
3472         bch2_trans_exit(&trans);
3473         if (ret)
3474                 return ret;
3475
3476         if (next_hole > isize)
3477                 next_hole = isize;
3478
3479         return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3480 }
3481
3482 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3483 {
3484         loff_t ret;
3485
3486         switch (whence) {
3487         case SEEK_SET:
3488         case SEEK_CUR:
3489         case SEEK_END:
3490                 ret = generic_file_llseek(file, offset, whence);
3491                 break;
3492         case SEEK_DATA:
3493                 ret = bch2_seek_data(file, offset);
3494                 break;
3495         case SEEK_HOLE:
3496                 ret = bch2_seek_hole(file, offset);
3497                 break;
3498         default:
3499                 ret = -EINVAL;
3500                 break;
3501         }
3502
3503         return bch2_err_class(ret);
3504 }
3505
3506 void bch2_fs_fsio_exit(struct bch_fs *c)
3507 {
3508         bioset_exit(&c->dio_write_bioset);
3509         bioset_exit(&c->dio_read_bioset);
3510         bioset_exit(&c->writepage_bioset);
3511 }
3512
3513 int bch2_fs_fsio_init(struct bch_fs *c)
3514 {
3515         int ret = 0;
3516
3517         pr_verbose_init(c->opts, "");
3518
3519         if (bioset_init(&c->writepage_bioset,
3520                         4, offsetof(struct bch_writepage_io, op.wbio.bio),
3521                         BIOSET_NEED_BVECS) ||
3522             bioset_init(&c->dio_read_bioset,
3523                         4, offsetof(struct dio_read, rbio.bio),
3524                         BIOSET_NEED_BVECS) ||
3525             bioset_init(&c->dio_write_bioset,
3526                         4, offsetof(struct dio_write, op.wbio.bio),
3527                         BIOSET_NEED_BVECS))
3528                 ret = -ENOMEM;
3529
3530         pr_verbose_init(c->opts, "ret %i", ret);
3531         return ret;
3532 }
3533
3534 #endif /* NO_BCACHEFS_FS */