]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/fs-io.c
Delete old bcachefs.5 from makefile
[bcachefs-tools-debian] / libbcachefs / fs-io.c
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3
4 #include "bcachefs.h"
5 #include "alloc_foreground.h"
6 #include "bkey_buf.h"
7 #include "btree_update.h"
8 #include "buckets.h"
9 #include "clock.h"
10 #include "error.h"
11 #include "extents.h"
12 #include "extent_update.h"
13 #include "fs.h"
14 #include "fs-io.h"
15 #include "fsck.h"
16 #include "inode.h"
17 #include "journal.h"
18 #include "io.h"
19 #include "keylist.h"
20 #include "quota.h"
21 #include "reflink.h"
22
23 #include <linux/aio.h>
24 #include <linux/backing-dev.h>
25 #include <linux/falloc.h>
26 #include <linux/migrate.h>
27 #include <linux/mmu_context.h>
28 #include <linux/pagevec.h>
29 #include <linux/rmap.h>
30 #include <linux/sched/signal.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/uio.h>
33 #include <linux/writeback.h>
34
35 #include <trace/events/bcachefs.h>
36 #include <trace/events/writeback.h>
37
38 static inline struct address_space *faults_disabled_mapping(void)
39 {
40         return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
41 }
42
43 static inline void set_fdm_dropped_locks(void)
44 {
45         current->faults_disabled_mapping =
46                 (void *) (((unsigned long) current->faults_disabled_mapping)|1);
47 }
48
49 static inline bool fdm_dropped_locks(void)
50 {
51         return ((unsigned long) current->faults_disabled_mapping) & 1;
52 }
53
54 struct quota_res {
55         u64                             sectors;
56 };
57
58 struct bch_writepage_io {
59         struct closure                  cl;
60         struct bch_inode_info           *inode;
61
62         /* must be last: */
63         struct bch_write_op             op;
64 };
65
66 struct dio_write {
67         struct completion               done;
68         struct kiocb                    *req;
69         struct mm_struct                *mm;
70         unsigned                        loop:1,
71                                         sync:1,
72                                         free_iov:1;
73         struct quota_res                quota_res;
74         u64                             written;
75
76         struct iov_iter                 iter;
77         struct iovec                    inline_vecs[2];
78
79         /* must be last: */
80         struct bch_write_op             op;
81 };
82
83 struct dio_read {
84         struct closure                  cl;
85         struct kiocb                    *req;
86         long                            ret;
87         bool                            should_dirty;
88         struct bch_read_bio             rbio;
89 };
90
91 /* pagecache_block must be held */
92 static int write_invalidate_inode_pages_range(struct address_space *mapping,
93                                               loff_t start, loff_t end)
94 {
95         int ret;
96
97         /*
98          * XXX: the way this is currently implemented, we can spin if a process
99          * is continually redirtying a specific page
100          */
101         do {
102                 if (!mapping->nrpages)
103                         return 0;
104
105                 ret = filemap_write_and_wait_range(mapping, start, end);
106                 if (ret)
107                         break;
108
109                 if (!mapping->nrpages)
110                         return 0;
111
112                 ret = invalidate_inode_pages2_range(mapping,
113                                 start >> PAGE_SHIFT,
114                                 end >> PAGE_SHIFT);
115         } while (ret == -EBUSY);
116
117         return ret;
118 }
119
120 /* quotas */
121
122 #ifdef CONFIG_BCACHEFS_QUOTA
123
124 static void bch2_quota_reservation_put(struct bch_fs *c,
125                                        struct bch_inode_info *inode,
126                                        struct quota_res *res)
127 {
128         if (!res->sectors)
129                 return;
130
131         mutex_lock(&inode->ei_quota_lock);
132         BUG_ON(res->sectors > inode->ei_quota_reserved);
133
134         bch2_quota_acct(c, inode->ei_qid, Q_SPC,
135                         -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
136         inode->ei_quota_reserved -= res->sectors;
137         mutex_unlock(&inode->ei_quota_lock);
138
139         res->sectors = 0;
140 }
141
142 static int bch2_quota_reservation_add(struct bch_fs *c,
143                                       struct bch_inode_info *inode,
144                                       struct quota_res *res,
145                                       unsigned sectors,
146                                       bool check_enospc)
147 {
148         int ret;
149
150         mutex_lock(&inode->ei_quota_lock);
151         ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
152                               check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
153         if (likely(!ret)) {
154                 inode->ei_quota_reserved += sectors;
155                 res->sectors += sectors;
156         }
157         mutex_unlock(&inode->ei_quota_lock);
158
159         return ret;
160 }
161
162 #else
163
164 static void bch2_quota_reservation_put(struct bch_fs *c,
165                                        struct bch_inode_info *inode,
166                                        struct quota_res *res)
167 {
168 }
169
170 static int bch2_quota_reservation_add(struct bch_fs *c,
171                                       struct bch_inode_info *inode,
172                                       struct quota_res *res,
173                                       unsigned sectors,
174                                       bool check_enospc)
175 {
176         return 0;
177 }
178
179 #endif
180
181 /* i_size updates: */
182
183 struct inode_new_size {
184         loff_t          new_size;
185         u64             now;
186         unsigned        fields;
187 };
188
189 static int inode_set_size(struct bch_inode_info *inode,
190                           struct bch_inode_unpacked *bi,
191                           void *p)
192 {
193         struct inode_new_size *s = p;
194
195         bi->bi_size = s->new_size;
196         if (s->fields & ATTR_ATIME)
197                 bi->bi_atime = s->now;
198         if (s->fields & ATTR_MTIME)
199                 bi->bi_mtime = s->now;
200         if (s->fields & ATTR_CTIME)
201                 bi->bi_ctime = s->now;
202
203         return 0;
204 }
205
206 int __must_check bch2_write_inode_size(struct bch_fs *c,
207                                        struct bch_inode_info *inode,
208                                        loff_t new_size, unsigned fields)
209 {
210         struct inode_new_size s = {
211                 .new_size       = new_size,
212                 .now            = bch2_current_time(c),
213                 .fields         = fields,
214         };
215
216         return bch2_write_inode(c, inode, inode_set_size, &s, fields);
217 }
218
219 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
220                            struct quota_res *quota_res, s64 sectors)
221 {
222         if (!sectors)
223                 return;
224
225         mutex_lock(&inode->ei_quota_lock);
226         BUG_ON((s64) inode->v.i_blocks + sectors < 0);
227         inode->v.i_blocks += sectors;
228
229 #ifdef CONFIG_BCACHEFS_QUOTA
230         if (quota_res && sectors > 0) {
231                 BUG_ON(sectors > quota_res->sectors);
232                 BUG_ON(sectors > inode->ei_quota_reserved);
233
234                 quota_res->sectors -= sectors;
235                 inode->ei_quota_reserved -= sectors;
236         } else {
237                 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
238         }
239 #endif
240         mutex_unlock(&inode->ei_quota_lock);
241 }
242
243 /* page state: */
244
245 /* stored in page->private: */
246
247 struct bch_page_sector {
248         /* Uncompressed, fully allocated replicas (or on disk reservation): */
249         unsigned                nr_replicas:4;
250
251         /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
252         unsigned                replicas_reserved:4;
253
254         /* i_sectors: */
255         enum {
256                 SECTOR_UNALLOCATED,
257                 SECTOR_RESERVED,
258                 SECTOR_DIRTY,
259                 SECTOR_DIRTY_RESERVED,
260                 SECTOR_ALLOCATED,
261         }                       state:8;
262 };
263
264 struct bch_page_state {
265         spinlock_t              lock;
266         atomic_t                write_count;
267         bool                    uptodate;
268         struct bch_page_sector  s[PAGE_SECTORS];
269 };
270
271 static inline struct bch_page_state *__bch2_page_state(struct page *page)
272 {
273         return page_has_private(page)
274                 ? (struct bch_page_state *) page_private(page)
275                 : NULL;
276 }
277
278 static inline struct bch_page_state *bch2_page_state(struct page *page)
279 {
280         EBUG_ON(!PageLocked(page));
281
282         return __bch2_page_state(page);
283 }
284
285 /* for newly allocated pages: */
286 static void __bch2_page_state_release(struct page *page)
287 {
288         kfree(detach_page_private(page));
289 }
290
291 static void bch2_page_state_release(struct page *page)
292 {
293         EBUG_ON(!PageLocked(page));
294         __bch2_page_state_release(page);
295 }
296
297 /* for newly allocated pages: */
298 static struct bch_page_state *__bch2_page_state_create(struct page *page,
299                                                        gfp_t gfp)
300 {
301         struct bch_page_state *s;
302
303         s = kzalloc(sizeof(*s), GFP_NOFS|gfp);
304         if (!s)
305                 return NULL;
306
307         spin_lock_init(&s->lock);
308         attach_page_private(page, s);
309         return s;
310 }
311
312 static struct bch_page_state *bch2_page_state_create(struct page *page,
313                                                      gfp_t gfp)
314 {
315         return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
316 }
317
318 static unsigned bkey_to_sector_state(const struct bkey *k)
319 {
320         if (k->type == KEY_TYPE_reservation)
321                 return SECTOR_RESERVED;
322         if (bkey_extent_is_allocation(k))
323                 return SECTOR_ALLOCATED;
324         return SECTOR_UNALLOCATED;
325 }
326
327 static void __bch2_page_state_set(struct page *page,
328                                   unsigned pg_offset, unsigned pg_len,
329                                   unsigned nr_ptrs, unsigned state)
330 {
331         struct bch_page_state *s = bch2_page_state_create(page, __GFP_NOFAIL);
332         unsigned i;
333
334         BUG_ON(pg_offset >= PAGE_SECTORS);
335         BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
336
337         spin_lock(&s->lock);
338
339         for (i = pg_offset; i < pg_offset + pg_len; i++) {
340                 s->s[i].nr_replicas = nr_ptrs;
341                 s->s[i].state = state;
342         }
343
344         if (i == PAGE_SECTORS)
345                 s->uptodate = true;
346
347         spin_unlock(&s->lock);
348 }
349
350 static int bch2_page_state_set(struct bch_fs *c, subvol_inum inum,
351                                struct page **pages, unsigned nr_pages)
352 {
353         struct btree_trans trans;
354         struct btree_iter iter;
355         struct bkey_s_c k;
356         u64 offset = pages[0]->index << PAGE_SECTORS_SHIFT;
357         unsigned pg_idx = 0;
358         u32 snapshot;
359         int ret;
360
361         bch2_trans_init(&trans, c, 0, 0);
362 retry:
363         bch2_trans_begin(&trans);
364
365         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
366         if (ret)
367                 goto err;
368
369         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
370                            SPOS(inum.inum, offset, snapshot),
371                            BTREE_ITER_SLOTS, k, ret) {
372                 unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
373                 unsigned state = bkey_to_sector_state(k.k);
374
375                 while (pg_idx < nr_pages) {
376                         struct page *page = pages[pg_idx];
377                         u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
378                         u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
379                         unsigned pg_offset = max(bkey_start_offset(k.k), pg_start) - pg_start;
380                         unsigned pg_len = min(k.k->p.offset, pg_end) - pg_offset - pg_start;
381
382                         BUG_ON(k.k->p.offset < pg_start);
383                         BUG_ON(bkey_start_offset(k.k) > pg_end);
384
385                         if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate)
386                                 __bch2_page_state_set(page, pg_offset, pg_len, nr_ptrs, state);
387
388                         if (k.k->p.offset < pg_end)
389                                 break;
390                         pg_idx++;
391                 }
392
393                 if (pg_idx == nr_pages)
394                         break;
395         }
396
397         offset = iter.pos.offset;
398         bch2_trans_iter_exit(&trans, &iter);
399 err:
400         if (ret == -EINTR)
401                 goto retry;
402         bch2_trans_exit(&trans);
403
404         return ret;
405 }
406
407 static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
408 {
409         struct bvec_iter iter;
410         struct bio_vec bv;
411         unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
412                 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
413         unsigned state = bkey_to_sector_state(k.k);
414
415         bio_for_each_segment(bv, bio, iter)
416                 __bch2_page_state_set(bv.bv_page, bv.bv_offset >> 9,
417                                       bv.bv_len >> 9, nr_ptrs, state);
418 }
419
420 static void mark_pagecache_unallocated(struct bch_inode_info *inode,
421                                        u64 start, u64 end)
422 {
423         pgoff_t index = start >> PAGE_SECTORS_SHIFT;
424         pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
425         struct pagevec pvec;
426
427         if (end <= start)
428                 return;
429
430         pagevec_init(&pvec);
431
432         do {
433                 unsigned nr_pages, i, j;
434
435                 nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
436                                                 &index, end_index);
437                 for (i = 0; i < nr_pages; i++) {
438                         struct page *page = pvec.pages[i];
439                         u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
440                         u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
441                         unsigned pg_offset = max(start, pg_start) - pg_start;
442                         unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
443                         struct bch_page_state *s;
444
445                         BUG_ON(end <= pg_start);
446                         BUG_ON(pg_offset >= PAGE_SECTORS);
447                         BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
448
449                         lock_page(page);
450                         s = bch2_page_state(page);
451
452                         if (s) {
453                                 spin_lock(&s->lock);
454                                 for (j = pg_offset; j < pg_offset + pg_len; j++)
455                                         s->s[j].nr_replicas = 0;
456                                 spin_unlock(&s->lock);
457                         }
458
459                         unlock_page(page);
460                 }
461                 pagevec_release(&pvec);
462         } while (index <= end_index);
463 }
464
465 static void mark_pagecache_reserved(struct bch_inode_info *inode,
466                                     u64 start, u64 end)
467 {
468         struct bch_fs *c = inode->v.i_sb->s_fs_info;
469         pgoff_t index = start >> PAGE_SECTORS_SHIFT;
470         pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
471         struct pagevec pvec;
472         s64 i_sectors_delta = 0;
473
474         if (end <= start)
475                 return;
476
477         pagevec_init(&pvec);
478
479         do {
480                 unsigned nr_pages, i, j;
481
482                 nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
483                                                 &index, end_index);
484                 for (i = 0; i < nr_pages; i++) {
485                         struct page *page = pvec.pages[i];
486                         u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
487                         u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
488                         unsigned pg_offset = max(start, pg_start) - pg_start;
489                         unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
490                         struct bch_page_state *s;
491
492                         BUG_ON(end <= pg_start);
493                         BUG_ON(pg_offset >= PAGE_SECTORS);
494                         BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
495
496                         lock_page(page);
497                         s = bch2_page_state(page);
498
499                         if (s) {
500                                 spin_lock(&s->lock);
501                                 for (j = pg_offset; j < pg_offset + pg_len; j++)
502                                         switch (s->s[j].state) {
503                                         case SECTOR_UNALLOCATED:
504                                                 s->s[j].state = SECTOR_RESERVED;
505                                                 break;
506                                         case SECTOR_DIRTY:
507                                                 s->s[j].state = SECTOR_DIRTY_RESERVED;
508                                                 i_sectors_delta--;
509                                                 break;
510                                         default:
511                                                 break;
512                                         }
513                                 spin_unlock(&s->lock);
514                         }
515
516                         unlock_page(page);
517                 }
518                 pagevec_release(&pvec);
519         } while (index <= end_index);
520
521         i_sectors_acct(c, inode, NULL, i_sectors_delta);
522 }
523
524 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
525 {
526         /* XXX: this should not be open coded */
527         return inode->ei_inode.bi_data_replicas
528                 ? inode->ei_inode.bi_data_replicas - 1
529                 : c->opts.data_replicas;
530 }
531
532 static inline unsigned sectors_to_reserve(struct bch_page_sector *s,
533                                                   unsigned nr_replicas)
534 {
535         return max(0, (int) nr_replicas -
536                    s->nr_replicas -
537                    s->replicas_reserved);
538 }
539
540 static int bch2_get_page_disk_reservation(struct bch_fs *c,
541                                 struct bch_inode_info *inode,
542                                 struct page *page, bool check_enospc)
543 {
544         struct bch_page_state *s = bch2_page_state_create(page, 0);
545         unsigned nr_replicas = inode_nr_replicas(c, inode);
546         struct disk_reservation disk_res = { 0 };
547         unsigned i, disk_res_sectors = 0;
548         int ret;
549
550         if (!s)
551                 return -ENOMEM;
552
553         for (i = 0; i < ARRAY_SIZE(s->s); i++)
554                 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
555
556         if (!disk_res_sectors)
557                 return 0;
558
559         ret = bch2_disk_reservation_get(c, &disk_res,
560                                         disk_res_sectors, 1,
561                                         !check_enospc
562                                         ? BCH_DISK_RESERVATION_NOFAIL
563                                         : 0);
564         if (unlikely(ret))
565                 return ret;
566
567         for (i = 0; i < ARRAY_SIZE(s->s); i++)
568                 s->s[i].replicas_reserved +=
569                         sectors_to_reserve(&s->s[i], nr_replicas);
570
571         return 0;
572 }
573
574 struct bch2_page_reservation {
575         struct disk_reservation disk;
576         struct quota_res        quota;
577 };
578
579 static void bch2_page_reservation_init(struct bch_fs *c,
580                         struct bch_inode_info *inode,
581                         struct bch2_page_reservation *res)
582 {
583         memset(res, 0, sizeof(*res));
584
585         res->disk.nr_replicas = inode_nr_replicas(c, inode);
586 }
587
588 static void bch2_page_reservation_put(struct bch_fs *c,
589                         struct bch_inode_info *inode,
590                         struct bch2_page_reservation *res)
591 {
592         bch2_disk_reservation_put(c, &res->disk);
593         bch2_quota_reservation_put(c, inode, &res->quota);
594 }
595
596 static int bch2_page_reservation_get(struct bch_fs *c,
597                         struct bch_inode_info *inode, struct page *page,
598                         struct bch2_page_reservation *res,
599                         unsigned offset, unsigned len, bool check_enospc)
600 {
601         struct bch_page_state *s = bch2_page_state_create(page, 0);
602         unsigned i, disk_sectors = 0, quota_sectors = 0;
603         int ret;
604
605         if (!s)
606                 return -ENOMEM;
607
608         BUG_ON(!s->uptodate);
609
610         for (i = round_down(offset, block_bytes(c)) >> 9;
611              i < round_up(offset + len, block_bytes(c)) >> 9;
612              i++) {
613                 disk_sectors += sectors_to_reserve(&s->s[i],
614                                                 res->disk.nr_replicas);
615                 quota_sectors += s->s[i].state == SECTOR_UNALLOCATED;
616         }
617
618         if (disk_sectors) {
619                 ret = bch2_disk_reservation_add(c, &res->disk,
620                                                 disk_sectors,
621                                                 !check_enospc
622                                                 ? BCH_DISK_RESERVATION_NOFAIL
623                                                 : 0);
624                 if (unlikely(ret))
625                         return ret;
626         }
627
628         if (quota_sectors) {
629                 ret = bch2_quota_reservation_add(c, inode, &res->quota,
630                                                  quota_sectors,
631                                                  check_enospc);
632                 if (unlikely(ret)) {
633                         struct disk_reservation tmp = {
634                                 .sectors = disk_sectors
635                         };
636
637                         bch2_disk_reservation_put(c, &tmp);
638                         res->disk.sectors -= disk_sectors;
639                         return ret;
640                 }
641         }
642
643         return 0;
644 }
645
646 static void bch2_clear_page_bits(struct page *page)
647 {
648         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
649         struct bch_fs *c = inode->v.i_sb->s_fs_info;
650         struct bch_page_state *s = bch2_page_state(page);
651         struct disk_reservation disk_res = { 0 };
652         int i, dirty_sectors = 0;
653
654         if (!s)
655                 return;
656
657         EBUG_ON(!PageLocked(page));
658         EBUG_ON(PageWriteback(page));
659
660         for (i = 0; i < ARRAY_SIZE(s->s); i++) {
661                 disk_res.sectors += s->s[i].replicas_reserved;
662                 s->s[i].replicas_reserved = 0;
663
664                 switch (s->s[i].state) {
665                 case SECTOR_DIRTY:
666                         s->s[i].state = SECTOR_UNALLOCATED;
667                         --dirty_sectors;
668                         break;
669                 case SECTOR_DIRTY_RESERVED:
670                         s->s[i].state = SECTOR_RESERVED;
671                         break;
672                 default:
673                         break;
674                 }
675         }
676
677         bch2_disk_reservation_put(c, &disk_res);
678
679         i_sectors_acct(c, inode, NULL, dirty_sectors);
680
681         bch2_page_state_release(page);
682 }
683
684 static void bch2_set_page_dirty(struct bch_fs *c,
685                         struct bch_inode_info *inode, struct page *page,
686                         struct bch2_page_reservation *res,
687                         unsigned offset, unsigned len)
688 {
689         struct bch_page_state *s = bch2_page_state(page);
690         unsigned i, dirty_sectors = 0;
691
692         WARN_ON((u64) page_offset(page) + offset + len >
693                 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
694
695         spin_lock(&s->lock);
696
697         for (i = round_down(offset, block_bytes(c)) >> 9;
698              i < round_up(offset + len, block_bytes(c)) >> 9;
699              i++) {
700                 unsigned sectors = sectors_to_reserve(&s->s[i],
701                                                 res->disk.nr_replicas);
702
703                 /*
704                  * This can happen if we race with the error path in
705                  * bch2_writepage_io_done():
706                  */
707                 sectors = min_t(unsigned, sectors, res->disk.sectors);
708
709                 s->s[i].replicas_reserved += sectors;
710                 res->disk.sectors -= sectors;
711
712                 switch (s->s[i].state) {
713                 case SECTOR_UNALLOCATED:
714                         s->s[i].state = SECTOR_DIRTY;
715                         dirty_sectors++;
716                         break;
717                 case SECTOR_RESERVED:
718                         s->s[i].state = SECTOR_DIRTY_RESERVED;
719                         break;
720                 default:
721                         break;
722                 }
723         }
724
725         spin_unlock(&s->lock);
726
727         i_sectors_acct(c, inode, &res->quota, dirty_sectors);
728
729         if (!PageDirty(page))
730                 __set_page_dirty_nobuffers(page);
731 }
732
733 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
734 {
735         struct file *file = vmf->vma->vm_file;
736         struct address_space *mapping = file->f_mapping;
737         struct address_space *fdm = faults_disabled_mapping();
738         struct bch_inode_info *inode = file_bch_inode(file);
739         int ret;
740
741         if (fdm == mapping)
742                 return VM_FAULT_SIGBUS;
743
744         /* Lock ordering: */
745         if (fdm > mapping) {
746                 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
747
748                 if (bch2_pagecache_add_tryget(&inode->ei_pagecache_lock))
749                         goto got_lock;
750
751                 bch2_pagecache_block_put(&fdm_host->ei_pagecache_lock);
752
753                 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
754                 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
755
756                 bch2_pagecache_block_get(&fdm_host->ei_pagecache_lock);
757
758                 /* Signal that lock has been dropped: */
759                 set_fdm_dropped_locks();
760                 return VM_FAULT_SIGBUS;
761         }
762
763         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
764 got_lock:
765         ret = filemap_fault(vmf);
766         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
767
768         return ret;
769 }
770
771 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
772 {
773         struct page *page = vmf->page;
774         struct file *file = vmf->vma->vm_file;
775         struct bch_inode_info *inode = file_bch_inode(file);
776         struct address_space *mapping = file->f_mapping;
777         struct bch_fs *c = inode->v.i_sb->s_fs_info;
778         struct bch2_page_reservation res;
779         unsigned len;
780         loff_t isize;
781         int ret;
782
783         bch2_page_reservation_init(c, inode, &res);
784
785         sb_start_pagefault(inode->v.i_sb);
786         file_update_time(file);
787
788         /*
789          * Not strictly necessary, but helps avoid dio writes livelocking in
790          * write_invalidate_inode_pages_range() - can drop this if/when we get
791          * a write_invalidate_inode_pages_range() that works without dropping
792          * page lock before invalidating page
793          */
794         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
795
796         lock_page(page);
797         isize = i_size_read(&inode->v);
798
799         if (page->mapping != mapping || page_offset(page) >= isize) {
800                 unlock_page(page);
801                 ret = VM_FAULT_NOPAGE;
802                 goto out;
803         }
804
805         len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
806
807         if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
808                 if (bch2_page_state_set(c, inode_inum(inode), &page, 1)) {
809                         unlock_page(page);
810                         ret = VM_FAULT_SIGBUS;
811                         goto out;
812                 }
813         }
814
815         if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
816                 unlock_page(page);
817                 ret = VM_FAULT_SIGBUS;
818                 goto out;
819         }
820
821         bch2_set_page_dirty(c, inode, page, &res, 0, len);
822         bch2_page_reservation_put(c, inode, &res);
823
824         wait_for_stable_page(page);
825         ret = VM_FAULT_LOCKED;
826 out:
827         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
828         sb_end_pagefault(inode->v.i_sb);
829
830         return ret;
831 }
832
833 void bch2_invalidatepage(struct page *page, unsigned int offset,
834                          unsigned int length)
835 {
836         if (offset || length < PAGE_SIZE)
837                 return;
838
839         bch2_clear_page_bits(page);
840 }
841
842 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
843 {
844         if (PageDirty(page))
845                 return 0;
846
847         bch2_clear_page_bits(page);
848         return 1;
849 }
850
851 #ifdef CONFIG_MIGRATION
852 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
853                       struct page *page, enum migrate_mode mode)
854 {
855         int ret;
856
857         EBUG_ON(!PageLocked(page));
858         EBUG_ON(!PageLocked(newpage));
859
860         ret = migrate_page_move_mapping(mapping, newpage, page, 0);
861         if (ret != MIGRATEPAGE_SUCCESS)
862                 return ret;
863
864         if (PagePrivate(page))
865                 attach_page_private(newpage, detach_page_private(page));
866
867         if (mode != MIGRATE_SYNC_NO_COPY)
868                 migrate_page_copy(newpage, page);
869         else
870                 migrate_page_states(newpage, page);
871         return MIGRATEPAGE_SUCCESS;
872 }
873 #endif
874
875 /* readpage(s): */
876
877 static void bch2_readpages_end_io(struct bio *bio)
878 {
879         struct bvec_iter_all iter;
880         struct bio_vec *bv;
881
882         bio_for_each_segment_all(bv, bio, iter) {
883                 struct page *page = bv->bv_page;
884
885                 if (!bio->bi_status) {
886                         SetPageUptodate(page);
887                 } else {
888                         ClearPageUptodate(page);
889                         SetPageError(page);
890                 }
891                 unlock_page(page);
892         }
893
894         bio_put(bio);
895 }
896
897 struct readpages_iter {
898         struct address_space    *mapping;
899         struct page             **pages;
900         unsigned                nr_pages;
901         unsigned                idx;
902         pgoff_t                 offset;
903 };
904
905 static int readpages_iter_init(struct readpages_iter *iter,
906                                struct readahead_control *ractl)
907 {
908         unsigned i, nr_pages = readahead_count(ractl);
909
910         memset(iter, 0, sizeof(*iter));
911
912         iter->mapping   = ractl->mapping;
913         iter->offset    = readahead_index(ractl);
914         iter->nr_pages  = nr_pages;
915
916         iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
917         if (!iter->pages)
918                 return -ENOMEM;
919
920         nr_pages = __readahead_batch(ractl, iter->pages, nr_pages);
921         for (i = 0; i < nr_pages; i++) {
922                 __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL);
923                 put_page(iter->pages[i]);
924         }
925
926         return 0;
927 }
928
929 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
930 {
931         if (iter->idx >= iter->nr_pages)
932                 return NULL;
933
934         EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
935
936         return iter->pages[iter->idx];
937 }
938
939 static bool extent_partial_reads_expensive(struct bkey_s_c k)
940 {
941         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
942         struct bch_extent_crc_unpacked crc;
943         const union bch_extent_entry *i;
944
945         bkey_for_each_crc(k.k, ptrs, crc, i)
946                 if (crc.csum_type || crc.compression_type)
947                         return true;
948         return false;
949 }
950
951 static void readpage_bio_extend(struct readpages_iter *iter,
952                                 struct bio *bio,
953                                 unsigned sectors_this_extent,
954                                 bool get_more)
955 {
956         while (bio_sectors(bio) < sectors_this_extent &&
957                bio->bi_vcnt < bio->bi_max_vecs) {
958                 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
959                 struct page *page = readpage_iter_next(iter);
960                 int ret;
961
962                 if (page) {
963                         if (iter->offset + iter->idx != page_offset)
964                                 break;
965
966                         iter->idx++;
967                 } else {
968                         if (!get_more)
969                                 break;
970
971                         page = xa_load(&iter->mapping->i_pages, page_offset);
972                         if (page && !xa_is_value(page))
973                                 break;
974
975                         page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
976                         if (!page)
977                                 break;
978
979                         if (!__bch2_page_state_create(page, 0)) {
980                                 put_page(page);
981                                 break;
982                         }
983
984                         ret = add_to_page_cache_lru(page, iter->mapping,
985                                                     page_offset, GFP_NOFS);
986                         if (ret) {
987                                 __bch2_page_state_release(page);
988                                 put_page(page);
989                                 break;
990                         }
991
992                         put_page(page);
993                 }
994
995                 BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
996         }
997 }
998
999 static void bchfs_read(struct btree_trans *trans,
1000                        struct bch_read_bio *rbio,
1001                        subvol_inum inum,
1002                        struct readpages_iter *readpages_iter)
1003 {
1004         struct bch_fs *c = trans->c;
1005         struct btree_iter iter;
1006         struct bkey_buf sk;
1007         int flags = BCH_READ_RETRY_IF_STALE|
1008                 BCH_READ_MAY_PROMOTE;
1009         u32 snapshot;
1010         int ret = 0;
1011
1012         rbio->c = c;
1013         rbio->start_time = local_clock();
1014         rbio->subvol = inum.subvol;
1015
1016         bch2_bkey_buf_init(&sk);
1017 retry:
1018         bch2_trans_begin(trans);
1019         iter = (struct btree_iter) { NULL };
1020
1021         ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1022         if (ret)
1023                 goto err;
1024
1025         bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1026                              SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
1027                              BTREE_ITER_SLOTS|BTREE_ITER_FILTER_SNAPSHOTS);
1028         while (1) {
1029                 struct bkey_s_c k;
1030                 unsigned bytes, sectors, offset_into_extent;
1031                 enum btree_id data_btree = BTREE_ID_extents;
1032
1033                 /*
1034                  * read_extent -> io_time_reset may cause a transaction restart
1035                  * without returning an error, we need to check for that here:
1036                  */
1037                 if (!bch2_trans_relock(trans)) {
1038                         ret = -EINTR;
1039                         break;
1040                 }
1041
1042                 bch2_btree_iter_set_pos(&iter,
1043                                 POS(inum.inum, rbio->bio.bi_iter.bi_sector));
1044
1045                 k = bch2_btree_iter_peek_slot(&iter);
1046                 ret = bkey_err(k);
1047                 if (ret)
1048                         break;
1049
1050                 offset_into_extent = iter.pos.offset -
1051                         bkey_start_offset(k.k);
1052                 sectors = k.k->size - offset_into_extent;
1053
1054                 bch2_bkey_buf_reassemble(&sk, c, k);
1055
1056                 ret = bch2_read_indirect_extent(trans, &data_btree,
1057                                         &offset_into_extent, &sk);
1058                 if (ret)
1059                         break;
1060
1061                 k = bkey_i_to_s_c(sk.k);
1062
1063                 sectors = min(sectors, k.k->size - offset_into_extent);
1064
1065                 bch2_trans_unlock(trans);
1066
1067                 if (readpages_iter)
1068                         readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
1069                                             extent_partial_reads_expensive(k));
1070
1071                 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
1072                 swap(rbio->bio.bi_iter.bi_size, bytes);
1073
1074                 if (rbio->bio.bi_iter.bi_size == bytes)
1075                         flags |= BCH_READ_LAST_FRAGMENT;
1076
1077                 bch2_bio_page_state_set(&rbio->bio, k);
1078
1079                 bch2_read_extent(trans, rbio, iter.pos,
1080                                  data_btree, k, offset_into_extent, flags);
1081
1082                 if (flags & BCH_READ_LAST_FRAGMENT)
1083                         break;
1084
1085                 swap(rbio->bio.bi_iter.bi_size, bytes);
1086                 bio_advance(&rbio->bio, bytes);
1087
1088                 ret = btree_trans_too_many_iters(trans);
1089                 if (ret)
1090                         break;
1091         }
1092 err:
1093         bch2_trans_iter_exit(trans, &iter);
1094
1095         if (ret == -EINTR)
1096                 goto retry;
1097
1098         if (ret) {
1099                 bch_err_inum_ratelimited(c, inum.inum,
1100                                 "read error %i from btree lookup", ret);
1101                 rbio->bio.bi_status = BLK_STS_IOERR;
1102                 bio_endio(&rbio->bio);
1103         }
1104
1105         bch2_bkey_buf_exit(&sk, c);
1106 }
1107
1108 void bch2_readahead(struct readahead_control *ractl)
1109 {
1110         struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
1111         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1112         struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1113         struct btree_trans trans;
1114         struct page *page;
1115         struct readpages_iter readpages_iter;
1116         int ret;
1117
1118         ret = readpages_iter_init(&readpages_iter, ractl);
1119         BUG_ON(ret);
1120
1121         bch2_trans_init(&trans, c, 0, 0);
1122
1123         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1124
1125         while ((page = readpage_iter_next(&readpages_iter))) {
1126                 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
1127                 unsigned n = min_t(unsigned,
1128                                    readpages_iter.nr_pages -
1129                                    readpages_iter.idx,
1130                                    BIO_MAX_VECS);
1131                 struct bch_read_bio *rbio =
1132                         rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
1133                                   opts);
1134
1135                 readpages_iter.idx++;
1136
1137                 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
1138                 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTORS_SHIFT;
1139                 rbio->bio.bi_end_io = bch2_readpages_end_io;
1140                 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
1141
1142                 bchfs_read(&trans, rbio, inode_inum(inode),
1143                            &readpages_iter);
1144         }
1145
1146         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1147
1148         bch2_trans_exit(&trans);
1149         kfree(readpages_iter.pages);
1150 }
1151
1152 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
1153                              subvol_inum inum, struct page *page)
1154 {
1155         struct btree_trans trans;
1156
1157         bch2_page_state_create(page, __GFP_NOFAIL);
1158
1159         bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
1160         rbio->bio.bi_iter.bi_sector =
1161                 (sector_t) page->index << PAGE_SECTORS_SHIFT;
1162         BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
1163
1164         bch2_trans_init(&trans, c, 0, 0);
1165         bchfs_read(&trans, rbio, inum, NULL);
1166         bch2_trans_exit(&trans);
1167 }
1168
1169 int bch2_readpage(struct file *file, struct page *page)
1170 {
1171         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1172         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1173         struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1174         struct bch_read_bio *rbio;
1175
1176         rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
1177         rbio->bio.bi_end_io = bch2_readpages_end_io;
1178
1179         __bchfs_readpage(c, rbio, inode_inum(inode), page);
1180         return 0;
1181 }
1182
1183 static void bch2_read_single_page_end_io(struct bio *bio)
1184 {
1185         complete(bio->bi_private);
1186 }
1187
1188 static int bch2_read_single_page(struct page *page,
1189                                  struct address_space *mapping)
1190 {
1191         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1192         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1193         struct bch_read_bio *rbio;
1194         int ret;
1195         DECLARE_COMPLETION_ONSTACK(done);
1196
1197         rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
1198                          io_opts(c, &inode->ei_inode));
1199         rbio->bio.bi_private = &done;
1200         rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1201
1202         __bchfs_readpage(c, rbio, inode_inum(inode), page);
1203         wait_for_completion(&done);
1204
1205         ret = blk_status_to_errno(rbio->bio.bi_status);
1206         bio_put(&rbio->bio);
1207
1208         if (ret < 0)
1209                 return ret;
1210
1211         SetPageUptodate(page);
1212         return 0;
1213 }
1214
1215 /* writepages: */
1216
1217 struct bch_writepage_state {
1218         struct bch_writepage_io *io;
1219         struct bch_io_opts      opts;
1220 };
1221
1222 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1223                                                                   struct bch_inode_info *inode)
1224 {
1225         return (struct bch_writepage_state) {
1226                 .opts = io_opts(c, &inode->ei_inode)
1227         };
1228 }
1229
1230 static void bch2_writepage_io_free(struct closure *cl)
1231 {
1232         struct bch_writepage_io *io = container_of(cl,
1233                                         struct bch_writepage_io, cl);
1234
1235         bio_put(&io->op.wbio.bio);
1236 }
1237
1238 static void bch2_writepage_io_done(struct closure *cl)
1239 {
1240         struct bch_writepage_io *io = container_of(cl,
1241                                         struct bch_writepage_io, cl);
1242         struct bch_fs *c = io->op.c;
1243         struct bio *bio = &io->op.wbio.bio;
1244         struct bvec_iter_all iter;
1245         struct bio_vec *bvec;
1246         unsigned i;
1247
1248         up(&io->op.c->io_in_flight);
1249
1250         if (io->op.error) {
1251                 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
1252
1253                 bio_for_each_segment_all(bvec, bio, iter) {
1254                         struct bch_page_state *s;
1255
1256                         SetPageError(bvec->bv_page);
1257                         mapping_set_error(bvec->bv_page->mapping, -EIO);
1258
1259                         s = __bch2_page_state(bvec->bv_page);
1260                         spin_lock(&s->lock);
1261                         for (i = 0; i < PAGE_SECTORS; i++)
1262                                 s->s[i].nr_replicas = 0;
1263                         spin_unlock(&s->lock);
1264                 }
1265         }
1266
1267         if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1268                 bio_for_each_segment_all(bvec, bio, iter) {
1269                         struct bch_page_state *s;
1270
1271                         s = __bch2_page_state(bvec->bv_page);
1272                         spin_lock(&s->lock);
1273                         for (i = 0; i < PAGE_SECTORS; i++)
1274                                 s->s[i].nr_replicas = 0;
1275                         spin_unlock(&s->lock);
1276                 }
1277         }
1278
1279         /*
1280          * racing with fallocate can cause us to add fewer sectors than
1281          * expected - but we shouldn't add more sectors than expected:
1282          */
1283         WARN_ON(io->op.i_sectors_delta > 0);
1284
1285         /*
1286          * (error (due to going RO) halfway through a page can screw that up
1287          * slightly)
1288          * XXX wtf?
1289            BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1290          */
1291
1292         /*
1293          * PageWriteback is effectively our ref on the inode - fixup i_blocks
1294          * before calling end_page_writeback:
1295          */
1296         i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1297
1298         bio_for_each_segment_all(bvec, bio, iter) {
1299                 struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
1300
1301                 if (atomic_dec_and_test(&s->write_count))
1302                         end_page_writeback(bvec->bv_page);
1303         }
1304
1305         closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1306 }
1307
1308 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1309 {
1310         struct bch_writepage_io *io = w->io;
1311
1312         down(&io->op.c->io_in_flight);
1313
1314         w->io = NULL;
1315         closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
1316         continue_at(&io->cl, bch2_writepage_io_done, NULL);
1317 }
1318
1319 /*
1320  * Get a bch_writepage_io and add @page to it - appending to an existing one if
1321  * possible, else allocating a new one:
1322  */
1323 static void bch2_writepage_io_alloc(struct bch_fs *c,
1324                                     struct writeback_control *wbc,
1325                                     struct bch_writepage_state *w,
1326                                     struct bch_inode_info *inode,
1327                                     u64 sector,
1328                                     unsigned nr_replicas)
1329 {
1330         struct bch_write_op *op;
1331
1332         w->io = container_of(bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS,
1333                                               &c->writepage_bioset),
1334                              struct bch_writepage_io, op.wbio.bio);
1335
1336         closure_init(&w->io->cl, NULL);
1337         w->io->inode            = inode;
1338
1339         op                      = &w->io->op;
1340         bch2_write_op_init(op, c, w->opts);
1341         op->target              = w->opts.foreground_target;
1342         op->nr_replicas         = nr_replicas;
1343         op->res.nr_replicas     = nr_replicas;
1344         op->write_point         = writepoint_hashed(inode->ei_last_dirtied);
1345         op->subvol              = inode->ei_subvol;
1346         op->pos                 = POS(inode->v.i_ino, sector);
1347         op->wbio.bio.bi_iter.bi_sector = sector;
1348         op->wbio.bio.bi_opf     = wbc_to_write_flags(wbc);
1349 }
1350
1351 static int __bch2_writepage(struct page *page,
1352                             struct writeback_control *wbc,
1353                             void *data)
1354 {
1355         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1356         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1357         struct bch_writepage_state *w = data;
1358         struct bch_page_state *s, orig;
1359         unsigned i, offset, nr_replicas_this_write = U32_MAX;
1360         loff_t i_size = i_size_read(&inode->v);
1361         pgoff_t end_index = i_size >> PAGE_SHIFT;
1362         int ret;
1363
1364         EBUG_ON(!PageUptodate(page));
1365
1366         /* Is the page fully inside i_size? */
1367         if (page->index < end_index)
1368                 goto do_io;
1369
1370         /* Is the page fully outside i_size? (truncate in progress) */
1371         offset = i_size & (PAGE_SIZE - 1);
1372         if (page->index > end_index || !offset) {
1373                 unlock_page(page);
1374                 return 0;
1375         }
1376
1377         /*
1378          * The page straddles i_size.  It must be zeroed out on each and every
1379          * writepage invocation because it may be mmapped.  "A file is mapped
1380          * in multiples of the page size.  For a file that is not a multiple of
1381          * the  page size, the remaining memory is zeroed when mapped, and
1382          * writes to that region are not written out to the file."
1383          */
1384         zero_user_segment(page, offset, PAGE_SIZE);
1385 do_io:
1386         s = bch2_page_state_create(page, __GFP_NOFAIL);
1387
1388         /*
1389          * Things get really hairy with errors during writeback:
1390          */
1391         ret = bch2_get_page_disk_reservation(c, inode, page, false);
1392         BUG_ON(ret);
1393
1394         /* Before unlocking the page, get copy of reservations: */
1395         spin_lock(&s->lock);
1396         orig = *s;
1397         spin_unlock(&s->lock);
1398
1399         for (i = 0; i < PAGE_SECTORS; i++) {
1400                 if (s->s[i].state < SECTOR_DIRTY)
1401                         continue;
1402
1403                 nr_replicas_this_write =
1404                         min_t(unsigned, nr_replicas_this_write,
1405                               s->s[i].nr_replicas +
1406                               s->s[i].replicas_reserved);
1407         }
1408
1409         for (i = 0; i < PAGE_SECTORS; i++) {
1410                 if (s->s[i].state < SECTOR_DIRTY)
1411                         continue;
1412
1413                 s->s[i].nr_replicas = w->opts.compression
1414                         ? 0 : nr_replicas_this_write;
1415
1416                 s->s[i].replicas_reserved = 0;
1417                 s->s[i].state = SECTOR_ALLOCATED;
1418         }
1419
1420         BUG_ON(atomic_read(&s->write_count));
1421         atomic_set(&s->write_count, 1);
1422
1423         BUG_ON(PageWriteback(page));
1424         set_page_writeback(page);
1425
1426         unlock_page(page);
1427
1428         offset = 0;
1429         while (1) {
1430                 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
1431                 u64 sector;
1432
1433                 while (offset < PAGE_SECTORS &&
1434                        orig.s[offset].state < SECTOR_DIRTY)
1435                         offset++;
1436
1437                 if (offset == PAGE_SECTORS)
1438                         break;
1439
1440                 while (offset + sectors < PAGE_SECTORS &&
1441                        orig.s[offset + sectors].state >= SECTOR_DIRTY) {
1442                         reserved_sectors += orig.s[offset + sectors].replicas_reserved;
1443                         dirty_sectors += orig.s[offset + sectors].state == SECTOR_DIRTY;
1444                         sectors++;
1445                 }
1446                 BUG_ON(!sectors);
1447
1448                 sector = ((u64) page->index << PAGE_SECTORS_SHIFT) + offset;
1449
1450                 if (w->io &&
1451                     (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1452                      bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
1453                      w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1454                      (BIO_MAX_VECS * PAGE_SIZE) ||
1455                      bio_end_sector(&w->io->op.wbio.bio) != sector))
1456                         bch2_writepage_do_io(w);
1457
1458                 if (!w->io)
1459                         bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1460                                                 nr_replicas_this_write);
1461
1462                 atomic_inc(&s->write_count);
1463
1464                 BUG_ON(inode != w->io->inode);
1465                 BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
1466                                      sectors << 9, offset << 9));
1467
1468                 /* Check for writing past i_size: */
1469                 WARN_ON((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1470                         round_up(i_size, block_bytes(c)));
1471
1472                 w->io->op.res.sectors += reserved_sectors;
1473                 w->io->op.i_sectors_delta -= dirty_sectors;
1474                 w->io->op.new_i_size = i_size;
1475
1476                 offset += sectors;
1477         }
1478
1479         if (atomic_dec_and_test(&s->write_count))
1480                 end_page_writeback(page);
1481
1482         return 0;
1483 }
1484
1485 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1486 {
1487         struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1488         struct bch_writepage_state w =
1489                 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1490         struct blk_plug plug;
1491         int ret;
1492
1493         blk_start_plug(&plug);
1494         ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1495         if (w.io)
1496                 bch2_writepage_do_io(&w);
1497         blk_finish_plug(&plug);
1498         return ret;
1499 }
1500
1501 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1502 {
1503         struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1504         struct bch_writepage_state w =
1505                 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1506         int ret;
1507
1508         ret = __bch2_writepage(page, wbc, &w);
1509         if (w.io)
1510                 bch2_writepage_do_io(&w);
1511
1512         return ret;
1513 }
1514
1515 /* buffered writes: */
1516
1517 int bch2_write_begin(struct file *file, struct address_space *mapping,
1518                      loff_t pos, unsigned len, unsigned flags,
1519                      struct page **pagep, void **fsdata)
1520 {
1521         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1522         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1523         struct bch2_page_reservation *res;
1524         pgoff_t index = pos >> PAGE_SHIFT;
1525         unsigned offset = pos & (PAGE_SIZE - 1);
1526         struct page *page;
1527         int ret = -ENOMEM;
1528
1529         res = kmalloc(sizeof(*res), GFP_KERNEL);
1530         if (!res)
1531                 return -ENOMEM;
1532
1533         bch2_page_reservation_init(c, inode, res);
1534         *fsdata = res;
1535
1536         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1537
1538         page = grab_cache_page_write_begin(mapping, index, flags);
1539         if (!page)
1540                 goto err_unlock;
1541
1542         if (PageUptodate(page))
1543                 goto out;
1544
1545         /* If we're writing entire page, don't need to read it in first: */
1546         if (len == PAGE_SIZE)
1547                 goto out;
1548
1549         if (!offset && pos + len >= inode->v.i_size) {
1550                 zero_user_segment(page, len, PAGE_SIZE);
1551                 flush_dcache_page(page);
1552                 goto out;
1553         }
1554
1555         if (index > inode->v.i_size >> PAGE_SHIFT) {
1556                 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1557                 flush_dcache_page(page);
1558                 goto out;
1559         }
1560 readpage:
1561         ret = bch2_read_single_page(page, mapping);
1562         if (ret)
1563                 goto err;
1564 out:
1565         if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
1566                 ret = bch2_page_state_set(c, inode_inum(inode), &page, 1);
1567                 if (ret)
1568                         goto out;
1569         }
1570
1571         ret = bch2_page_reservation_get(c, inode, page, res,
1572                                         offset, len, true);
1573         if (ret) {
1574                 if (!PageUptodate(page)) {
1575                         /*
1576                          * If the page hasn't been read in, we won't know if we
1577                          * actually need a reservation - we don't actually need
1578                          * to read here, we just need to check if the page is
1579                          * fully backed by uncompressed data:
1580                          */
1581                         goto readpage;
1582                 }
1583
1584                 goto err;
1585         }
1586
1587         *pagep = page;
1588         return 0;
1589 err:
1590         unlock_page(page);
1591         put_page(page);
1592         *pagep = NULL;
1593 err_unlock:
1594         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1595         kfree(res);
1596         *fsdata = NULL;
1597         return ret;
1598 }
1599
1600 int bch2_write_end(struct file *file, struct address_space *mapping,
1601                    loff_t pos, unsigned len, unsigned copied,
1602                    struct page *page, void *fsdata)
1603 {
1604         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1605         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1606         struct bch2_page_reservation *res = fsdata;
1607         unsigned offset = pos & (PAGE_SIZE - 1);
1608
1609         lockdep_assert_held(&inode->v.i_rwsem);
1610
1611         if (unlikely(copied < len && !PageUptodate(page))) {
1612                 /*
1613                  * The page needs to be read in, but that would destroy
1614                  * our partial write - simplest thing is to just force
1615                  * userspace to redo the write:
1616                  */
1617                 zero_user(page, 0, PAGE_SIZE);
1618                 flush_dcache_page(page);
1619                 copied = 0;
1620         }
1621
1622         spin_lock(&inode->v.i_lock);
1623         if (pos + copied > inode->v.i_size)
1624                 i_size_write(&inode->v, pos + copied);
1625         spin_unlock(&inode->v.i_lock);
1626
1627         if (copied) {
1628                 if (!PageUptodate(page))
1629                         SetPageUptodate(page);
1630
1631                 bch2_set_page_dirty(c, inode, page, res, offset, copied);
1632
1633                 inode->ei_last_dirtied = (unsigned long) current;
1634         }
1635
1636         unlock_page(page);
1637         put_page(page);
1638         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1639
1640         bch2_page_reservation_put(c, inode, res);
1641         kfree(res);
1642
1643         return copied;
1644 }
1645
1646 #define WRITE_BATCH_PAGES       32
1647
1648 static int __bch2_buffered_write(struct bch_inode_info *inode,
1649                                  struct address_space *mapping,
1650                                  struct iov_iter *iter,
1651                                  loff_t pos, unsigned len)
1652 {
1653         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1654         struct page *pages[WRITE_BATCH_PAGES];
1655         struct bch2_page_reservation res;
1656         unsigned long index = pos >> PAGE_SHIFT;
1657         unsigned offset = pos & (PAGE_SIZE - 1);
1658         unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1659         unsigned i, reserved = 0, set_dirty = 0;
1660         unsigned copied = 0, nr_pages_copied = 0;
1661         int ret = 0;
1662
1663         BUG_ON(!len);
1664         BUG_ON(nr_pages > ARRAY_SIZE(pages));
1665
1666         bch2_page_reservation_init(c, inode, &res);
1667
1668         for (i = 0; i < nr_pages; i++) {
1669                 pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
1670                 if (!pages[i]) {
1671                         nr_pages = i;
1672                         if (!i) {
1673                                 ret = -ENOMEM;
1674                                 goto out;
1675                         }
1676                         len = min_t(unsigned, len,
1677                                     nr_pages * PAGE_SIZE - offset);
1678                         break;
1679                 }
1680         }
1681
1682         if (offset && !PageUptodate(pages[0])) {
1683                 ret = bch2_read_single_page(pages[0], mapping);
1684                 if (ret)
1685                         goto out;
1686         }
1687
1688         if ((pos + len) & (PAGE_SIZE - 1) &&
1689             !PageUptodate(pages[nr_pages - 1])) {
1690                 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1691                         zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1692                 } else {
1693                         ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1694                         if (ret)
1695                                 goto out;
1696                 }
1697         }
1698
1699         while (reserved < len) {
1700                 unsigned i = (offset + reserved) >> PAGE_SHIFT;
1701                 struct page *page = pages[i];
1702                 unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
1703                 unsigned pg_len = min_t(unsigned, len - reserved,
1704                                         PAGE_SIZE - pg_offset);
1705
1706                 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
1707                         ret = bch2_page_state_set(c, inode_inum(inode),
1708                                                   pages + i, nr_pages - i);
1709                         if (ret)
1710                                 goto out;
1711                 }
1712
1713                 ret = bch2_page_reservation_get(c, inode, page, &res,
1714                                                 pg_offset, pg_len, true);
1715                 if (ret)
1716                         goto out;
1717
1718                 reserved += pg_len;
1719         }
1720
1721         if (mapping_writably_mapped(mapping))
1722                 for (i = 0; i < nr_pages; i++)
1723                         flush_dcache_page(pages[i]);
1724
1725         while (copied < len) {
1726                 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1727                 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1728                 unsigned pg_len = min_t(unsigned, len - copied,
1729                                         PAGE_SIZE - pg_offset);
1730                 unsigned pg_copied = copy_page_from_iter_atomic(page,
1731                                                 pg_offset, pg_len,iter);
1732
1733                 if (!pg_copied)
1734                         break;
1735
1736                 if (!PageUptodate(page) &&
1737                     pg_copied != PAGE_SIZE &&
1738                     pos + copied + pg_copied < inode->v.i_size) {
1739                         zero_user(page, 0, PAGE_SIZE);
1740                         break;
1741                 }
1742
1743                 flush_dcache_page(page);
1744                 copied += pg_copied;
1745
1746                 if (pg_copied != pg_len)
1747                         break;
1748         }
1749
1750         if (!copied)
1751                 goto out;
1752
1753         spin_lock(&inode->v.i_lock);
1754         if (pos + copied > inode->v.i_size)
1755                 i_size_write(&inode->v, pos + copied);
1756         spin_unlock(&inode->v.i_lock);
1757
1758         while (set_dirty < copied) {
1759                 struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
1760                 unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
1761                 unsigned pg_len = min_t(unsigned, copied - set_dirty,
1762                                         PAGE_SIZE - pg_offset);
1763
1764                 if (!PageUptodate(page))
1765                         SetPageUptodate(page);
1766
1767                 bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len);
1768                 unlock_page(page);
1769                 put_page(page);
1770
1771                 set_dirty += pg_len;
1772         }
1773
1774         nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1775         inode->ei_last_dirtied = (unsigned long) current;
1776 out:
1777         for (i = nr_pages_copied; i < nr_pages; i++) {
1778                 unlock_page(pages[i]);
1779                 put_page(pages[i]);
1780         }
1781
1782         bch2_page_reservation_put(c, inode, &res);
1783
1784         return copied ?: ret;
1785 }
1786
1787 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1788 {
1789         struct file *file = iocb->ki_filp;
1790         struct address_space *mapping = file->f_mapping;
1791         struct bch_inode_info *inode = file_bch_inode(file);
1792         loff_t pos = iocb->ki_pos;
1793         ssize_t written = 0;
1794         int ret = 0;
1795
1796         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1797
1798         do {
1799                 unsigned offset = pos & (PAGE_SIZE - 1);
1800                 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1801                               PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1802 again:
1803                 /*
1804                  * Bring in the user page that we will copy from _first_.
1805                  * Otherwise there's a nasty deadlock on copying from the
1806                  * same page as we're writing to, without it being marked
1807                  * up-to-date.
1808                  *
1809                  * Not only is this an optimisation, but it is also required
1810                  * to check that the address is actually valid, when atomic
1811                  * usercopies are used, below.
1812                  */
1813                 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1814                         bytes = min_t(unsigned long, iov_iter_count(iter),
1815                                       PAGE_SIZE - offset);
1816
1817                         if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1818                                 ret = -EFAULT;
1819                                 break;
1820                         }
1821                 }
1822
1823                 if (unlikely(fatal_signal_pending(current))) {
1824                         ret = -EINTR;
1825                         break;
1826                 }
1827
1828                 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1829                 if (unlikely(ret < 0))
1830                         break;
1831
1832                 cond_resched();
1833
1834                 if (unlikely(ret == 0)) {
1835                         /*
1836                          * If we were unable to copy any data at all, we must
1837                          * fall back to a single segment length write.
1838                          *
1839                          * If we didn't fallback here, we could livelock
1840                          * because not all segments in the iov can be copied at
1841                          * once without a pagefault.
1842                          */
1843                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
1844                                       iov_iter_single_seg_count(iter));
1845                         goto again;
1846                 }
1847                 pos += ret;
1848                 written += ret;
1849                 ret = 0;
1850
1851                 balance_dirty_pages_ratelimited(mapping);
1852         } while (iov_iter_count(iter));
1853
1854         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1855
1856         return written ? written : ret;
1857 }
1858
1859 /* O_DIRECT reads */
1860
1861 static void bio_check_or_release(struct bio *bio, bool check_dirty)
1862 {
1863         if (check_dirty) {
1864                 bio_check_pages_dirty(bio);
1865         } else {
1866                 bio_release_pages(bio, false);
1867                 bio_put(bio);
1868         }
1869 }
1870
1871 static void bch2_dio_read_complete(struct closure *cl)
1872 {
1873         struct dio_read *dio = container_of(cl, struct dio_read, cl);
1874
1875         dio->req->ki_complete(dio->req, dio->ret, 0);
1876         bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
1877 }
1878
1879 static void bch2_direct_IO_read_endio(struct bio *bio)
1880 {
1881         struct dio_read *dio = bio->bi_private;
1882
1883         if (bio->bi_status)
1884                 dio->ret = blk_status_to_errno(bio->bi_status);
1885
1886         closure_put(&dio->cl);
1887 }
1888
1889 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1890 {
1891         struct dio_read *dio = bio->bi_private;
1892         bool should_dirty = dio->should_dirty;
1893
1894         bch2_direct_IO_read_endio(bio);
1895         bio_check_or_release(bio, should_dirty);
1896 }
1897
1898 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1899 {
1900         struct file *file = req->ki_filp;
1901         struct bch_inode_info *inode = file_bch_inode(file);
1902         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1903         struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1904         struct dio_read *dio;
1905         struct bio *bio;
1906         loff_t offset = req->ki_pos;
1907         bool sync = is_sync_kiocb(req);
1908         size_t shorten;
1909         ssize_t ret;
1910
1911         if ((offset|iter->count) & (block_bytes(c) - 1))
1912                 return -EINVAL;
1913
1914         ret = min_t(loff_t, iter->count,
1915                     max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1916
1917         if (!ret)
1918                 return ret;
1919
1920         shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1921         iter->count -= shorten;
1922
1923         bio = bio_alloc_bioset(GFP_KERNEL,
1924                                iov_iter_npages(iter, BIO_MAX_VECS),
1925                                &c->dio_read_bioset);
1926
1927         bio->bi_end_io = bch2_direct_IO_read_endio;
1928
1929         dio = container_of(bio, struct dio_read, rbio.bio);
1930         closure_init(&dio->cl, NULL);
1931
1932         /*
1933          * this is a _really_ horrible hack just to avoid an atomic sub at the
1934          * end:
1935          */
1936         if (!sync) {
1937                 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1938                 atomic_set(&dio->cl.remaining,
1939                            CLOSURE_REMAINING_INITIALIZER -
1940                            CLOSURE_RUNNING +
1941                            CLOSURE_DESTRUCTOR);
1942         } else {
1943                 atomic_set(&dio->cl.remaining,
1944                            CLOSURE_REMAINING_INITIALIZER + 1);
1945         }
1946
1947         dio->req        = req;
1948         dio->ret        = ret;
1949         /*
1950          * This is one of the sketchier things I've encountered: we have to skip
1951          * the dirtying of requests that are internal from the kernel (i.e. from
1952          * loopback), because we'll deadlock on page_lock.
1953          */
1954         dio->should_dirty = iter_is_iovec(iter);
1955
1956         goto start;
1957         while (iter->count) {
1958                 bio = bio_alloc_bioset(GFP_KERNEL,
1959                                        iov_iter_npages(iter, BIO_MAX_VECS),
1960                                        &c->bio_read);
1961                 bio->bi_end_io          = bch2_direct_IO_read_split_endio;
1962 start:
1963                 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1964                 bio->bi_iter.bi_sector  = offset >> 9;
1965                 bio->bi_private         = dio;
1966
1967                 ret = bio_iov_iter_get_pages(bio, iter);
1968                 if (ret < 0) {
1969                         /* XXX: fault inject this path */
1970                         bio->bi_status = BLK_STS_RESOURCE;
1971                         bio_endio(bio);
1972                         break;
1973                 }
1974
1975                 offset += bio->bi_iter.bi_size;
1976
1977                 if (dio->should_dirty)
1978                         bio_set_pages_dirty(bio);
1979
1980                 if (iter->count)
1981                         closure_get(&dio->cl);
1982
1983                 bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
1984         }
1985
1986         iter->count += shorten;
1987
1988         if (sync) {
1989                 closure_sync(&dio->cl);
1990                 closure_debug_destroy(&dio->cl);
1991                 ret = dio->ret;
1992                 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
1993                 return ret;
1994         } else {
1995                 return -EIOCBQUEUED;
1996         }
1997 }
1998
1999 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2000 {
2001         struct file *file = iocb->ki_filp;
2002         struct bch_inode_info *inode = file_bch_inode(file);
2003         struct address_space *mapping = file->f_mapping;
2004         size_t count = iov_iter_count(iter);
2005         ssize_t ret;
2006
2007         if (!count)
2008                 return 0; /* skip atime */
2009
2010         if (iocb->ki_flags & IOCB_DIRECT) {
2011                 struct blk_plug plug;
2012
2013                 ret = filemap_write_and_wait_range(mapping,
2014                                         iocb->ki_pos,
2015                                         iocb->ki_pos + count - 1);
2016                 if (ret < 0)
2017                         return ret;
2018
2019                 file_accessed(file);
2020
2021                 blk_start_plug(&plug);
2022                 ret = bch2_direct_IO_read(iocb, iter);
2023                 blk_finish_plug(&plug);
2024
2025                 if (ret >= 0)
2026                         iocb->ki_pos += ret;
2027         } else {
2028                 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
2029                 ret = generic_file_read_iter(iocb, iter);
2030                 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
2031         }
2032
2033         return ret;
2034 }
2035
2036 /* O_DIRECT writes */
2037
2038 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
2039                                        u64 offset, u64 size,
2040                                        unsigned nr_replicas, bool compressed)
2041 {
2042         struct btree_trans trans;
2043         struct btree_iter iter;
2044         struct bkey_s_c k;
2045         u64 end = offset + size;
2046         u32 snapshot;
2047         bool ret = true;
2048         int err;
2049
2050         bch2_trans_init(&trans, c, 0, 0);
2051 retry:
2052         bch2_trans_begin(&trans);
2053
2054         err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
2055         if (err)
2056                 goto err;
2057
2058         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
2059                            SPOS(inum.inum, offset, snapshot),
2060                            BTREE_ITER_SLOTS, k, err) {
2061                 if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0)
2062                         break;
2063
2064                 if (k.k->p.snapshot != snapshot ||
2065                     nr_replicas > bch2_bkey_replicas(c, k) ||
2066                     (!compressed && bch2_bkey_sectors_compressed(k))) {
2067                         ret = false;
2068                         break;
2069                 }
2070         }
2071
2072         offset = iter.pos.offset;
2073         bch2_trans_iter_exit(&trans, &iter);
2074 err:
2075         if (err == -EINTR)
2076                 goto retry;
2077         bch2_trans_exit(&trans);
2078
2079         return err ? false : ret;
2080 }
2081
2082 static void bch2_dio_write_loop_async(struct bch_write_op *);
2083
2084 static long bch2_dio_write_loop(struct dio_write *dio)
2085 {
2086         bool kthread = (current->flags & PF_KTHREAD) != 0;
2087         struct kiocb *req = dio->req;
2088         struct address_space *mapping = req->ki_filp->f_mapping;
2089         struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
2090         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2091         struct bio *bio = &dio->op.wbio.bio;
2092         struct bvec_iter_all iter;
2093         struct bio_vec *bv;
2094         unsigned unaligned, iter_count;
2095         bool sync = dio->sync, dropped_locks;
2096         long ret;
2097
2098         if (dio->loop)
2099                 goto loop;
2100
2101         down(&c->io_in_flight);
2102
2103         while (1) {
2104                 iter_count = dio->iter.count;
2105
2106                 if (kthread)
2107                         kthread_use_mm(dio->mm);
2108                 BUG_ON(current->faults_disabled_mapping);
2109                 current->faults_disabled_mapping = mapping;
2110
2111                 ret = bio_iov_iter_get_pages(bio, &dio->iter);
2112
2113                 dropped_locks = fdm_dropped_locks();
2114
2115                 current->faults_disabled_mapping = NULL;
2116                 if (kthread)
2117                         kthread_unuse_mm(dio->mm);
2118
2119                 /*
2120                  * If the fault handler returned an error but also signalled
2121                  * that it dropped & retook ei_pagecache_lock, we just need to
2122                  * re-shoot down the page cache and retry:
2123                  */
2124                 if (dropped_locks && ret)
2125                         ret = 0;
2126
2127                 if (unlikely(ret < 0))
2128                         goto err;
2129
2130                 if (unlikely(dropped_locks)) {
2131                         ret = write_invalidate_inode_pages_range(mapping,
2132                                         req->ki_pos,
2133                                         req->ki_pos + iter_count - 1);
2134                         if (unlikely(ret))
2135                                 goto err;
2136
2137                         if (!bio->bi_iter.bi_size)
2138                                 continue;
2139                 }
2140
2141                 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
2142                 bio->bi_iter.bi_size -= unaligned;
2143                 iov_iter_revert(&dio->iter, unaligned);
2144
2145                 if (!bio->bi_iter.bi_size) {
2146                         /*
2147                          * bio_iov_iter_get_pages was only able to get <
2148                          * blocksize worth of pages:
2149                          */
2150                         ret = -EFAULT;
2151                         goto err;
2152                 }
2153
2154                 bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
2155                 dio->op.end_io          = bch2_dio_write_loop_async;
2156                 dio->op.target          = dio->op.opts.foreground_target;
2157                 dio->op.write_point     = writepoint_hashed((unsigned long) current);
2158                 dio->op.nr_replicas     = dio->op.opts.data_replicas;
2159                 dio->op.subvol          = inode->ei_subvol;
2160                 dio->op.pos             = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
2161
2162                 if ((req->ki_flags & IOCB_DSYNC) &&
2163                     !c->opts.journal_flush_disabled)
2164                         dio->op.flags |= BCH_WRITE_FLUSH;
2165                 dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
2166
2167                 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
2168                                                 dio->op.opts.data_replicas, 0);
2169                 if (unlikely(ret) &&
2170                     !bch2_check_range_allocated(c, inode_inum(inode),
2171                                 dio->op.pos.offset, bio_sectors(bio),
2172                                 dio->op.opts.data_replicas,
2173                                 dio->op.opts.compression != 0))
2174                         goto err;
2175
2176                 task_io_account_write(bio->bi_iter.bi_size);
2177
2178                 if (!dio->sync && !dio->loop && dio->iter.count) {
2179                         struct iovec *iov = dio->inline_vecs;
2180
2181                         if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
2182                                 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
2183                                               GFP_KERNEL);
2184                                 if (unlikely(!iov)) {
2185                                         dio->sync = sync = true;
2186                                         goto do_io;
2187                                 }
2188
2189                                 dio->free_iov = true;
2190                         }
2191
2192                         memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
2193                         dio->iter.iov = iov;
2194                 }
2195 do_io:
2196                 dio->loop = true;
2197                 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
2198
2199                 if (sync)
2200                         wait_for_completion(&dio->done);
2201                 else
2202                         return -EIOCBQUEUED;
2203 loop:
2204                 i_sectors_acct(c, inode, &dio->quota_res,
2205                                dio->op.i_sectors_delta);
2206                 req->ki_pos += (u64) dio->op.written << 9;
2207                 dio->written += dio->op.written;
2208
2209                 spin_lock(&inode->v.i_lock);
2210                 if (req->ki_pos > inode->v.i_size)
2211                         i_size_write(&inode->v, req->ki_pos);
2212                 spin_unlock(&inode->v.i_lock);
2213
2214                 if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
2215                         bio_for_each_segment_all(bv, bio, iter)
2216                                 put_page(bv->bv_page);
2217                 bio->bi_vcnt = 0;
2218
2219                 if (dio->op.error) {
2220                         set_bit(EI_INODE_ERROR, &inode->ei_flags);
2221                         break;
2222                 }
2223
2224                 if (!dio->iter.count)
2225                         break;
2226
2227                 bio_reset(bio);
2228                 reinit_completion(&dio->done);
2229         }
2230
2231         ret = dio->op.error ?: ((long) dio->written << 9);
2232 err:
2233         up(&c->io_in_flight);
2234         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2235         bch2_quota_reservation_put(c, inode, &dio->quota_res);
2236
2237         if (dio->free_iov)
2238                 kfree(dio->iter.iov);
2239
2240         if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
2241                 bio_for_each_segment_all(bv, bio, iter)
2242                         put_page(bv->bv_page);
2243         bio_put(bio);
2244
2245         /* inode->i_dio_count is our ref on inode and thus bch_fs */
2246         inode_dio_end(&inode->v);
2247
2248         if (!sync) {
2249                 req->ki_complete(req, ret, 0);
2250                 ret = -EIOCBQUEUED;
2251         }
2252         return ret;
2253 }
2254
2255 static void bch2_dio_write_loop_async(struct bch_write_op *op)
2256 {
2257         struct dio_write *dio = container_of(op, struct dio_write, op);
2258
2259         if (dio->sync)
2260                 complete(&dio->done);
2261         else
2262                 bch2_dio_write_loop(dio);
2263 }
2264
2265 static noinline
2266 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
2267 {
2268         struct file *file = req->ki_filp;
2269         struct address_space *mapping = file->f_mapping;
2270         struct bch_inode_info *inode = file_bch_inode(file);
2271         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2272         struct dio_write *dio;
2273         struct bio *bio;
2274         bool locked = true, extending;
2275         ssize_t ret;
2276
2277         prefetch(&c->opts);
2278         prefetch((void *) &c->opts + 64);
2279         prefetch(&inode->ei_inode);
2280         prefetch((void *) &inode->ei_inode + 64);
2281
2282         inode_lock(&inode->v);
2283
2284         ret = generic_write_checks(req, iter);
2285         if (unlikely(ret <= 0))
2286                 goto err;
2287
2288         ret = file_remove_privs(file);
2289         if (unlikely(ret))
2290                 goto err;
2291
2292         ret = file_update_time(file);
2293         if (unlikely(ret))
2294                 goto err;
2295
2296         if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
2297                 goto err;
2298
2299         inode_dio_begin(&inode->v);
2300         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2301
2302         extending = req->ki_pos + iter->count > inode->v.i_size;
2303         if (!extending) {
2304                 inode_unlock(&inode->v);
2305                 locked = false;
2306         }
2307
2308         bio = bio_alloc_bioset(GFP_KERNEL,
2309                                iov_iter_is_bvec(iter)
2310                                ? 0
2311                                : iov_iter_npages(iter, BIO_MAX_VECS),
2312                                &c->dio_write_bioset);
2313         dio = container_of(bio, struct dio_write, op.wbio.bio);
2314         init_completion(&dio->done);
2315         dio->req                = req;
2316         dio->mm                 = current->mm;
2317         dio->loop               = false;
2318         dio->sync               = is_sync_kiocb(req) || extending;
2319         dio->free_iov           = false;
2320         dio->quota_res.sectors  = 0;
2321         dio->written            = 0;
2322         dio->iter               = *iter;
2323
2324         ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2325                                          iter->count >> 9, true);
2326         if (unlikely(ret))
2327                 goto err_put_bio;
2328
2329         ret = write_invalidate_inode_pages_range(mapping,
2330                                         req->ki_pos,
2331                                         req->ki_pos + iter->count - 1);
2332         if (unlikely(ret))
2333                 goto err_put_bio;
2334
2335         ret = bch2_dio_write_loop(dio);
2336 err:
2337         if (locked)
2338                 inode_unlock(&inode->v);
2339         return ret;
2340 err_put_bio:
2341         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2342         bch2_quota_reservation_put(c, inode, &dio->quota_res);
2343         bio_put(bio);
2344         inode_dio_end(&inode->v);
2345         goto err;
2346 }
2347
2348 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2349 {
2350         struct file *file = iocb->ki_filp;
2351         struct bch_inode_info *inode = file_bch_inode(file);
2352         ssize_t ret;
2353
2354         if (iocb->ki_flags & IOCB_DIRECT)
2355                 return bch2_direct_write(iocb, from);
2356
2357         /* We can write back this queue in page reclaim */
2358         current->backing_dev_info = inode_to_bdi(&inode->v);
2359         inode_lock(&inode->v);
2360
2361         ret = generic_write_checks(iocb, from);
2362         if (ret <= 0)
2363                 goto unlock;
2364
2365         ret = file_remove_privs(file);
2366         if (ret)
2367                 goto unlock;
2368
2369         ret = file_update_time(file);
2370         if (ret)
2371                 goto unlock;
2372
2373         ret = bch2_buffered_write(iocb, from);
2374         if (likely(ret > 0))
2375                 iocb->ki_pos += ret;
2376 unlock:
2377         inode_unlock(&inode->v);
2378         current->backing_dev_info = NULL;
2379
2380         if (ret > 0)
2381                 ret = generic_write_sync(iocb, ret);
2382
2383         return ret;
2384 }
2385
2386 /* fsync: */
2387
2388 /*
2389  * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
2390  * insert trigger: look up the btree inode instead
2391  */
2392 static int bch2_flush_inode(struct bch_fs *c, subvol_inum inum)
2393 {
2394         struct bch_inode_unpacked inode;
2395         int ret;
2396
2397         if (c->opts.journal_flush_disabled)
2398                 return 0;
2399
2400         ret = bch2_inode_find_by_inum(c, inum, &inode);
2401         if (ret)
2402                 return ret;
2403
2404         return bch2_journal_flush_seq(&c->journal, inode.bi_journal_seq);
2405 }
2406
2407 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2408 {
2409         struct bch_inode_info *inode = file_bch_inode(file);
2410         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2411         int ret, ret2, ret3;
2412
2413         ret = file_write_and_wait_range(file, start, end);
2414         ret2 = sync_inode_metadata(&inode->v, 1);
2415         ret3 = bch2_flush_inode(c, inode_inum(inode));
2416
2417         return ret ?: ret2 ?: ret3;
2418 }
2419
2420 /* truncate: */
2421
2422 static inline int range_has_data(struct bch_fs *c, u32 subvol,
2423                                  struct bpos start,
2424                                  struct bpos end)
2425 {
2426         struct btree_trans trans;
2427         struct btree_iter iter;
2428         struct bkey_s_c k;
2429         int ret = 0;
2430
2431         bch2_trans_init(&trans, c, 0, 0);
2432 retry:
2433         bch2_trans_begin(&trans);
2434
2435         ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
2436         if (ret)
2437                 goto err;
2438
2439         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
2440                 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2441                         break;
2442
2443                 if (bkey_extent_is_data(k.k)) {
2444                         ret = 1;
2445                         break;
2446                 }
2447         }
2448         start = iter.pos;
2449         bch2_trans_iter_exit(&trans, &iter);
2450 err:
2451         if (ret == -EINTR)
2452                 goto retry;
2453
2454         bch2_trans_exit(&trans);
2455         return ret;
2456 }
2457
2458 static int __bch2_truncate_page(struct bch_inode_info *inode,
2459                                 pgoff_t index, loff_t start, loff_t end)
2460 {
2461         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2462         struct address_space *mapping = inode->v.i_mapping;
2463         struct bch_page_state *s;
2464         unsigned start_offset = start & (PAGE_SIZE - 1);
2465         unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2466         unsigned i;
2467         struct page *page;
2468         s64 i_sectors_delta = 0;
2469         int ret = 0;
2470
2471         /* Page boundary? Nothing to do */
2472         if (!((index == start >> PAGE_SHIFT && start_offset) ||
2473               (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2474                 return 0;
2475
2476         /* Above i_size? */
2477         if (index << PAGE_SHIFT >= inode->v.i_size)
2478                 return 0;
2479
2480         page = find_lock_page(mapping, index);
2481         if (!page) {
2482                 /*
2483                  * XXX: we're doing two index lookups when we end up reading the
2484                  * page
2485                  */
2486                 ret = range_has_data(c, inode->ei_subvol,
2487                                 POS(inode->v.i_ino, index << PAGE_SECTORS_SHIFT),
2488                                 POS(inode->v.i_ino, (index + 1) << PAGE_SECTORS_SHIFT));
2489                 if (ret <= 0)
2490                         return ret;
2491
2492                 page = find_or_create_page(mapping, index, GFP_KERNEL);
2493                 if (unlikely(!page)) {
2494                         ret = -ENOMEM;
2495                         goto out;
2496                 }
2497         }
2498
2499         s = bch2_page_state_create(page, 0);
2500         if (!s) {
2501                 ret = -ENOMEM;
2502                 goto unlock;
2503         }
2504
2505         if (!PageUptodate(page)) {
2506                 ret = bch2_read_single_page(page, mapping);
2507                 if (ret)
2508                         goto unlock;
2509         }
2510
2511         if (index != start >> PAGE_SHIFT)
2512                 start_offset = 0;
2513         if (index != end >> PAGE_SHIFT)
2514                 end_offset = PAGE_SIZE;
2515
2516         for (i = round_up(start_offset, block_bytes(c)) >> 9;
2517              i < round_down(end_offset, block_bytes(c)) >> 9;
2518              i++) {
2519                 s->s[i].nr_replicas     = 0;
2520                 if (s->s[i].state == SECTOR_DIRTY)
2521                         i_sectors_delta--;
2522                 s->s[i].state           = SECTOR_UNALLOCATED;
2523         }
2524
2525         i_sectors_acct(c, inode, NULL, i_sectors_delta);
2526
2527         /*
2528          * Caller needs to know whether this page will be written out by
2529          * writeback - doing an i_size update if necessary - or whether it will
2530          * be responsible for the i_size update:
2531          */
2532         ret = s->s[(min_t(u64, inode->v.i_size - (index << PAGE_SHIFT),
2533                           PAGE_SIZE) - 1) >> 9].state >= SECTOR_DIRTY;
2534
2535         zero_user_segment(page, start_offset, end_offset);
2536
2537         /*
2538          * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2539          *
2540          * XXX: because we aren't currently tracking whether the page has actual
2541          * data in it (vs. just 0s, or only partially written) this wrong. ick.
2542          */
2543         BUG_ON(bch2_get_page_disk_reservation(c, inode, page, false));
2544
2545         /*
2546          * This removes any writeable userspace mappings; we need to force
2547          * .page_mkwrite to be called again before any mmapped writes, to
2548          * redirty the full page:
2549          */
2550         page_mkclean(page);
2551         __set_page_dirty_nobuffers(page);
2552 unlock:
2553         unlock_page(page);
2554         put_page(page);
2555 out:
2556         return ret;
2557 }
2558
2559 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2560 {
2561         return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2562                                     from, round_up(from, PAGE_SIZE));
2563 }
2564
2565 static int bch2_truncate_pages(struct bch_inode_info *inode,
2566                                loff_t start, loff_t end)
2567 {
2568         int ret = __bch2_truncate_page(inode, start >> PAGE_SHIFT,
2569                                        start, end);
2570
2571         if (ret >= 0 &&
2572             start >> PAGE_SHIFT != end >> PAGE_SHIFT)
2573                 ret = __bch2_truncate_page(inode,
2574                                            end >> PAGE_SHIFT,
2575                                            start, end);
2576         return ret;
2577 }
2578
2579 static int bch2_extend(struct user_namespace *mnt_userns,
2580                        struct bch_inode_info *inode,
2581                        struct bch_inode_unpacked *inode_u,
2582                        struct iattr *iattr)
2583 {
2584         struct address_space *mapping = inode->v.i_mapping;
2585         int ret;
2586
2587         /*
2588          * sync appends:
2589          *
2590          * this has to be done _before_ extending i_size:
2591          */
2592         ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2593         if (ret)
2594                 return ret;
2595
2596         truncate_setsize(&inode->v, iattr->ia_size);
2597
2598         return bch2_setattr_nonsize(mnt_userns, inode, iattr);
2599 }
2600
2601 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2602                                    struct bch_inode_unpacked *bi,
2603                                    void *p)
2604 {
2605         bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2606         return 0;
2607 }
2608
2609 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2610                                   struct bch_inode_unpacked *bi, void *p)
2611 {
2612         u64 *new_i_size = p;
2613
2614         bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2615         bi->bi_size = *new_i_size;
2616         return 0;
2617 }
2618
2619 int bch2_truncate(struct user_namespace *mnt_userns,
2620                   struct bch_inode_info *inode, struct iattr *iattr)
2621 {
2622         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2623         struct address_space *mapping = inode->v.i_mapping;
2624         struct bch_inode_unpacked inode_u;
2625         u64 new_i_size = iattr->ia_size;
2626         s64 i_sectors_delta = 0;
2627         int ret = 0;
2628
2629         /*
2630          * If the truncate call with change the size of the file, the
2631          * cmtimes should be updated. If the size will not change, we
2632          * do not need to update the cmtimes.
2633          */
2634         if (iattr->ia_size != inode->v.i_size) {
2635                 if (!(iattr->ia_valid & ATTR_MTIME))
2636                         ktime_get_coarse_real_ts64(&iattr->ia_mtime);
2637                 if (!(iattr->ia_valid & ATTR_CTIME))
2638                         ktime_get_coarse_real_ts64(&iattr->ia_ctime);
2639                 iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
2640         }
2641
2642         inode_dio_wait(&inode->v);
2643         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2644
2645         ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
2646         if (ret)
2647                 goto err;
2648
2649         /*
2650          * check this before next assertion; on filesystem error our normal
2651          * invariants are a bit broken (truncate has to truncate the page cache
2652          * before the inode).
2653          */
2654         ret = bch2_journal_error(&c->journal);
2655         if (ret)
2656                 goto err;
2657
2658         WARN_ON(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
2659                 inode->v.i_size < inode_u.bi_size);
2660
2661         if (iattr->ia_size > inode->v.i_size) {
2662                 ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
2663                 goto err;
2664         }
2665
2666         iattr->ia_valid &= ~ATTR_SIZE;
2667
2668         ret = bch2_truncate_page(inode, iattr->ia_size);
2669         if (unlikely(ret < 0))
2670                 goto err;
2671
2672         /*
2673          * When extending, we're going to write the new i_size to disk
2674          * immediately so we need to flush anything above the current on disk
2675          * i_size first:
2676          *
2677          * Also, when extending we need to flush the page that i_size currently
2678          * straddles - if it's mapped to userspace, we need to ensure that
2679          * userspace has to redirty it and call .mkwrite -> set_page_dirty
2680          * again to allocate the part of the page that was extended.
2681          */
2682         if (iattr->ia_size > inode_u.bi_size)
2683                 ret = filemap_write_and_wait_range(mapping,
2684                                 inode_u.bi_size,
2685                                 iattr->ia_size - 1);
2686         else if (iattr->ia_size & (PAGE_SIZE - 1))
2687                 ret = filemap_write_and_wait_range(mapping,
2688                                 round_down(iattr->ia_size, PAGE_SIZE),
2689                                 iattr->ia_size - 1);
2690         if (ret)
2691                 goto err;
2692
2693         mutex_lock(&inode->ei_update_lock);
2694         ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2695                                &new_i_size, 0);
2696         mutex_unlock(&inode->ei_update_lock);
2697
2698         if (unlikely(ret))
2699                 goto err;
2700
2701         truncate_setsize(&inode->v, iattr->ia_size);
2702
2703         ret = bch2_fpunch(c, inode_inum(inode),
2704                         round_up(iattr->ia_size, block_bytes(c)) >> 9,
2705                         U64_MAX, &i_sectors_delta);
2706         i_sectors_acct(c, inode, NULL, i_sectors_delta);
2707
2708         WARN_ON(!inode->v.i_size && inode->v.i_blocks &&
2709                 !bch2_journal_error(&c->journal));
2710
2711         if (unlikely(ret))
2712                 goto err;
2713
2714         mutex_lock(&inode->ei_update_lock);
2715         ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
2716         mutex_unlock(&inode->ei_update_lock);
2717
2718         ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
2719 err:
2720         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2721         return ret;
2722 }
2723
2724 /* fallocate: */
2725
2726 static int inode_update_times_fn(struct bch_inode_info *inode,
2727                                  struct bch_inode_unpacked *bi, void *p)
2728 {
2729         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2730
2731         bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2732         return 0;
2733 }
2734
2735 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2736 {
2737         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2738         u64 end         = offset + len;
2739         u64 block_start = round_up(offset, block_bytes(c));
2740         u64 block_end   = round_down(end, block_bytes(c));
2741         bool truncated_last_page;
2742         int ret = 0;
2743
2744         ret = bch2_truncate_pages(inode, offset, end);
2745         if (unlikely(ret < 0))
2746                 goto err;
2747
2748         truncated_last_page = ret;
2749
2750         truncate_pagecache_range(&inode->v, offset, end - 1);
2751
2752         if (block_start < block_end ) {
2753                 s64 i_sectors_delta = 0;
2754
2755                 ret = bch2_fpunch(c, inode_inum(inode),
2756                                   block_start >> 9, block_end >> 9,
2757                                   &i_sectors_delta);
2758                 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2759         }
2760
2761         mutex_lock(&inode->ei_update_lock);
2762         if (end >= inode->v.i_size && !truncated_last_page) {
2763                 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2764                                             ATTR_MTIME|ATTR_CTIME);
2765         } else {
2766                 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
2767                                        ATTR_MTIME|ATTR_CTIME);
2768         }
2769         mutex_unlock(&inode->ei_update_lock);
2770 err:
2771         return ret;
2772 }
2773
2774 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
2775                                    loff_t offset, loff_t len,
2776                                    bool insert)
2777 {
2778         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2779         struct address_space *mapping = inode->v.i_mapping;
2780         struct bkey_buf copy;
2781         struct btree_trans trans;
2782         struct btree_iter src, dst, del;
2783         loff_t shift, new_size;
2784         u64 src_start;
2785         int ret = 0;
2786
2787         if ((offset | len) & (block_bytes(c) - 1))
2788                 return -EINVAL;
2789
2790         if (insert) {
2791                 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
2792                         return -EFBIG;
2793
2794                 if (offset >= inode->v.i_size)
2795                         return -EINVAL;
2796
2797                 src_start       = U64_MAX;
2798                 shift           = len;
2799         } else {
2800                 if (offset + len >= inode->v.i_size)
2801                         return -EINVAL;
2802
2803                 src_start       = offset + len;
2804                 shift           = -len;
2805         }
2806
2807         new_size = inode->v.i_size + shift;
2808
2809         ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2810         if (ret)
2811                 return ret;
2812
2813         if (insert) {
2814                 i_size_write(&inode->v, new_size);
2815                 mutex_lock(&inode->ei_update_lock);
2816                 ret = bch2_write_inode_size(c, inode, new_size,
2817                                             ATTR_MTIME|ATTR_CTIME);
2818                 mutex_unlock(&inode->ei_update_lock);
2819         } else {
2820                 s64 i_sectors_delta = 0;
2821
2822                 ret = bch2_fpunch(c, inode_inum(inode),
2823                                   offset >> 9, (offset + len) >> 9,
2824                                   &i_sectors_delta);
2825                 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2826
2827                 if (ret)
2828                         return ret;
2829         }
2830
2831         bch2_bkey_buf_init(&copy);
2832         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
2833         bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
2834                         POS(inode->v.i_ino, src_start >> 9),
2835                         BTREE_ITER_INTENT);
2836         bch2_trans_copy_iter(&dst, &src);
2837         bch2_trans_copy_iter(&del, &src);
2838
2839         while (ret == 0 || ret == -EINTR) {
2840                 struct disk_reservation disk_res =
2841                         bch2_disk_reservation_init(c, 0);
2842                 struct bkey_i delete;
2843                 struct bkey_s_c k;
2844                 struct bpos next_pos;
2845                 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
2846                 struct bpos atomic_end;
2847                 unsigned trigger_flags = 0;
2848                 u32 snapshot;
2849
2850                 bch2_trans_begin(&trans);
2851
2852                 ret = bch2_subvolume_get_snapshot(&trans,
2853                                         inode->ei_subvol, &snapshot);
2854                 if (ret)
2855                         continue;
2856
2857                 bch2_btree_iter_set_snapshot(&src, snapshot);
2858                 bch2_btree_iter_set_snapshot(&dst, snapshot);
2859                 bch2_btree_iter_set_snapshot(&del, snapshot);
2860
2861                 bch2_trans_begin(&trans);
2862
2863                 k = insert
2864                         ? bch2_btree_iter_peek_prev(&src)
2865                         : bch2_btree_iter_peek(&src);
2866                 if ((ret = bkey_err(k)))
2867                         continue;
2868
2869                 if (!k.k || k.k->p.inode != inode->v.i_ino)
2870                         break;
2871
2872                 if (insert &&
2873                     bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
2874                         break;
2875 reassemble:
2876                 bch2_bkey_buf_reassemble(&copy, c, k);
2877
2878                 if (insert &&
2879                     bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
2880                         bch2_cut_front(move_pos, copy.k);
2881
2882                 copy.k->k.p.offset += shift >> 9;
2883                 bch2_btree_iter_set_pos(&dst, bkey_start_pos(&copy.k->k));
2884
2885                 ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
2886                 if (ret)
2887                         continue;
2888
2889                 if (bkey_cmp(atomic_end, copy.k->k.p)) {
2890                         if (insert) {
2891                                 move_pos = atomic_end;
2892                                 move_pos.offset -= shift >> 9;
2893                                 goto reassemble;
2894                         } else {
2895                                 bch2_cut_back(atomic_end, copy.k);
2896                         }
2897                 }
2898
2899                 bkey_init(&delete.k);
2900                 delete.k.p = copy.k->k.p;
2901                 delete.k.size = copy.k->k.size;
2902                 delete.k.p.offset -= shift >> 9;
2903                 bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
2904
2905                 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
2906
2907                 if (copy.k->k.size == k.k->size) {
2908                         /*
2909                          * If we're moving the entire extent, we can skip
2910                          * running triggers:
2911                          */
2912                         trigger_flags |= BTREE_TRIGGER_NORUN;
2913                 } else {
2914                         /* We might end up splitting compressed extents: */
2915                         unsigned nr_ptrs =
2916                                 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
2917
2918                         ret = bch2_disk_reservation_get(c, &disk_res,
2919                                         copy.k->k.size, nr_ptrs,
2920                                         BCH_DISK_RESERVATION_NOFAIL);
2921                         BUG_ON(ret);
2922                 }
2923
2924                 ret =   bch2_btree_iter_traverse(&del) ?:
2925                         bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
2926                         bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
2927                         bch2_trans_commit(&trans, &disk_res, NULL,
2928                                           BTREE_INSERT_NOFAIL);
2929                 bch2_disk_reservation_put(c, &disk_res);
2930
2931                 if (!ret)
2932                         bch2_btree_iter_set_pos(&src, next_pos);
2933         }
2934         bch2_trans_iter_exit(&trans, &del);
2935         bch2_trans_iter_exit(&trans, &dst);
2936         bch2_trans_iter_exit(&trans, &src);
2937         bch2_trans_exit(&trans);
2938         bch2_bkey_buf_exit(&copy, c);
2939
2940         if (ret)
2941                 return ret;
2942
2943         mutex_lock(&inode->ei_update_lock);
2944         if (!insert) {
2945                 i_size_write(&inode->v, new_size);
2946                 ret = bch2_write_inode_size(c, inode, new_size,
2947                                             ATTR_MTIME|ATTR_CTIME);
2948         } else {
2949                 /* We need an inode update to update bi_journal_seq for fsync: */
2950                 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
2951                                        ATTR_MTIME|ATTR_CTIME);
2952         }
2953         mutex_unlock(&inode->ei_update_lock);
2954         return ret;
2955 }
2956
2957 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
2958                              u64 start_sector, u64 end_sector)
2959 {
2960         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2961         struct btree_trans trans;
2962         struct btree_iter iter;
2963         struct bpos end_pos = POS(inode->v.i_ino, end_sector);
2964         unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
2965         int ret = 0;
2966
2967         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
2968
2969         bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
2970                         POS(inode->v.i_ino, start_sector),
2971                         BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2972
2973         while (!ret && bkey_cmp(iter.pos, end_pos) < 0) {
2974                 s64 i_sectors_delta = 0;
2975                 struct disk_reservation disk_res = { 0 };
2976                 struct quota_res quota_res = { 0 };
2977                 struct bkey_i_reservation reservation;
2978                 struct bkey_s_c k;
2979                 unsigned sectors;
2980                 u32 snapshot;
2981
2982                 bch2_trans_begin(&trans);
2983
2984                 ret = bch2_subvolume_get_snapshot(&trans,
2985                                         inode->ei_subvol, &snapshot);
2986                 if (ret)
2987                         goto bkey_err;
2988
2989                 bch2_btree_iter_set_snapshot(&iter, snapshot);
2990
2991                 k = bch2_btree_iter_peek_slot(&iter);
2992                 if ((ret = bkey_err(k)))
2993                         goto bkey_err;
2994
2995                 /* already reserved */
2996                 if (k.k->type == KEY_TYPE_reservation &&
2997                     bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2998                         bch2_btree_iter_advance(&iter);
2999                         continue;
3000                 }
3001
3002                 if (bkey_extent_is_data(k.k) &&
3003                     !(mode & FALLOC_FL_ZERO_RANGE)) {
3004                         bch2_btree_iter_advance(&iter);
3005                         continue;
3006                 }
3007
3008                 bkey_reservation_init(&reservation.k_i);
3009                 reservation.k.type      = KEY_TYPE_reservation;
3010                 reservation.k.p         = k.k->p;
3011                 reservation.k.size      = k.k->size;
3012
3013                 bch2_cut_front(iter.pos,        &reservation.k_i);
3014                 bch2_cut_back(end_pos,          &reservation.k_i);
3015
3016                 sectors = reservation.k.size;
3017                 reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
3018
3019                 if (!bkey_extent_is_allocation(k.k)) {
3020                         ret = bch2_quota_reservation_add(c, inode,
3021                                         &quota_res,
3022                                         sectors, true);
3023                         if (unlikely(ret))
3024                                 goto bkey_err;
3025                 }
3026
3027                 if (reservation.v.nr_replicas < replicas ||
3028                     bch2_bkey_sectors_compressed(k)) {
3029                         ret = bch2_disk_reservation_get(c, &disk_res, sectors,
3030                                                         replicas, 0);
3031                         if (unlikely(ret))
3032                                 goto bkey_err;
3033
3034                         reservation.v.nr_replicas = disk_res.nr_replicas;
3035                 }
3036
3037                 ret = bch2_extent_update(&trans, inode_inum(inode), &iter,
3038                                          &reservation.k_i,
3039                                 &disk_res, NULL,
3040                                 0, &i_sectors_delta, true);
3041                 if (ret)
3042                         goto bkey_err;
3043                 i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
3044 bkey_err:
3045                 bch2_quota_reservation_put(c, inode, &quota_res);
3046                 bch2_disk_reservation_put(c, &disk_res);
3047                 if (ret == -EINTR)
3048                         ret = 0;
3049         }
3050
3051         bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
3052         mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
3053
3054         if (ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)) {
3055                 struct quota_res quota_res = { 0 };
3056                 s64 i_sectors_delta = 0;
3057
3058                 bch2_fpunch_at(&trans, &iter, inode_inum(inode),
3059                                end_sector, &i_sectors_delta);
3060                 i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
3061                 bch2_quota_reservation_put(c, inode, &quota_res);
3062         }
3063
3064         bch2_trans_iter_exit(&trans, &iter);
3065         bch2_trans_exit(&trans);
3066         return ret;
3067 }
3068
3069 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
3070                             loff_t offset, loff_t len)
3071 {
3072         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3073         u64 end         = offset + len;
3074         u64 block_start = round_down(offset,    block_bytes(c));
3075         u64 block_end   = round_up(end,         block_bytes(c));
3076         bool truncated_last_page = false;
3077         int ret, ret2 = 0;
3078
3079         if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
3080                 ret = inode_newsize_ok(&inode->v, end);
3081                 if (ret)
3082                         return ret;
3083         }
3084
3085         if (mode & FALLOC_FL_ZERO_RANGE) {
3086                 ret = bch2_truncate_pages(inode, offset, end);
3087                 if (unlikely(ret < 0))
3088                         return ret;
3089
3090                 truncated_last_page = ret;
3091
3092                 truncate_pagecache_range(&inode->v, offset, end - 1);
3093
3094                 block_start     = round_up(offset,      block_bytes(c));
3095                 block_end       = round_down(end,       block_bytes(c));
3096         }
3097
3098         ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
3099
3100         /*
3101          * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
3102          * so that the VFS cache i_size is consistent with the btree i_size:
3103          */
3104         if (ret &&
3105             !(ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)))
3106                 return ret;
3107
3108         if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
3109                 end = inode->v.i_size;
3110
3111         if (end >= inode->v.i_size &&
3112             (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
3113              !(mode & FALLOC_FL_KEEP_SIZE))) {
3114                 spin_lock(&inode->v.i_lock);
3115                 i_size_write(&inode->v, end);
3116                 spin_unlock(&inode->v.i_lock);
3117
3118                 mutex_lock(&inode->ei_update_lock);
3119                 ret2 = bch2_write_inode_size(c, inode, end, 0);
3120                 mutex_unlock(&inode->ei_update_lock);
3121         }
3122
3123         return ret ?: ret2;
3124 }
3125
3126 long bch2_fallocate_dispatch(struct file *file, int mode,
3127                              loff_t offset, loff_t len)
3128 {
3129         struct bch_inode_info *inode = file_bch_inode(file);
3130         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3131         long ret;
3132
3133         if (!percpu_ref_tryget(&c->writes))
3134                 return -EROFS;
3135
3136         inode_lock(&inode->v);
3137         inode_dio_wait(&inode->v);
3138         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
3139
3140         if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
3141                 ret = bchfs_fallocate(inode, mode, offset, len);
3142         else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
3143                 ret = bchfs_fpunch(inode, offset, len);
3144         else if (mode == FALLOC_FL_INSERT_RANGE)
3145                 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
3146         else if (mode == FALLOC_FL_COLLAPSE_RANGE)
3147                 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
3148         else
3149                 ret = -EOPNOTSUPP;
3150
3151
3152         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
3153         inode_unlock(&inode->v);
3154         percpu_ref_put(&c->writes);
3155
3156         return ret;
3157 }
3158
3159 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
3160                              struct file *file_dst, loff_t pos_dst,
3161                              loff_t len, unsigned remap_flags)
3162 {
3163         struct bch_inode_info *src = file_bch_inode(file_src);
3164         struct bch_inode_info *dst = file_bch_inode(file_dst);
3165         struct bch_fs *c = src->v.i_sb->s_fs_info;
3166         s64 i_sectors_delta = 0;
3167         u64 aligned_len;
3168         loff_t ret = 0;
3169
3170         if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
3171                 return -EINVAL;
3172
3173         if (remap_flags & REMAP_FILE_DEDUP)
3174                 return -EOPNOTSUPP;
3175
3176         if ((pos_src & (block_bytes(c) - 1)) ||
3177             (pos_dst & (block_bytes(c) - 1)))
3178                 return -EINVAL;
3179
3180         if (src == dst &&
3181             abs(pos_src - pos_dst) < len)
3182                 return -EINVAL;
3183
3184         bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3185
3186         file_update_time(file_dst);
3187
3188         inode_dio_wait(&src->v);
3189         inode_dio_wait(&dst->v);
3190
3191         ret = generic_remap_file_range_prep(file_src, pos_src,
3192                                             file_dst, pos_dst,
3193                                             &len, remap_flags);
3194         if (ret < 0 || len == 0)
3195                 goto err;
3196
3197         aligned_len = round_up((u64) len, block_bytes(c));
3198
3199         ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
3200                                 pos_dst, pos_dst + len - 1);
3201         if (ret)
3202                 goto err;
3203
3204         mark_pagecache_unallocated(src, pos_src >> 9,
3205                                    (pos_src + aligned_len) >> 9);
3206
3207         ret = bch2_remap_range(c,
3208                                inode_inum(dst), pos_dst >> 9,
3209                                inode_inum(src), pos_src >> 9,
3210                                aligned_len >> 9,
3211                                pos_dst + len, &i_sectors_delta);
3212         if (ret < 0)
3213                 goto err;
3214
3215         /*
3216          * due to alignment, we might have remapped slightly more than requsted
3217          */
3218         ret = min((u64) ret << 9, (u64) len);
3219
3220         /* XXX get a quota reservation */
3221         i_sectors_acct(c, dst, NULL, i_sectors_delta);
3222
3223         spin_lock(&dst->v.i_lock);
3224         if (pos_dst + ret > dst->v.i_size)
3225                 i_size_write(&dst->v, pos_dst + ret);
3226         spin_unlock(&dst->v.i_lock);
3227
3228         if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
3229             IS_SYNC(file_inode(file_dst)))
3230                 ret = bch2_flush_inode(c, inode_inum(dst));
3231 err:
3232         bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3233
3234         return ret;
3235 }
3236
3237 /* fseek: */
3238
3239 static int page_data_offset(struct page *page, unsigned offset)
3240 {
3241         struct bch_page_state *s = bch2_page_state(page);
3242         unsigned i;
3243
3244         if (s)
3245                 for (i = offset >> 9; i < PAGE_SECTORS; i++)
3246                         if (s->s[i].state >= SECTOR_DIRTY)
3247                                 return i << 9;
3248
3249         return -1;
3250 }
3251
3252 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
3253                                        loff_t start_offset,
3254                                        loff_t end_offset)
3255 {
3256         struct address_space *mapping = vinode->i_mapping;
3257         struct page *page;
3258         pgoff_t start_index     = start_offset >> PAGE_SHIFT;
3259         pgoff_t end_index       = end_offset >> PAGE_SHIFT;
3260         pgoff_t index           = start_index;
3261         loff_t ret;
3262         int offset;
3263
3264         while (index <= end_index) {
3265                 if (find_get_pages_range(mapping, &index, end_index, 1, &page)) {
3266                         lock_page(page);
3267
3268                         offset = page_data_offset(page,
3269                                         page->index == start_index
3270                                         ? start_offset & (PAGE_SIZE - 1)
3271                                         : 0);
3272                         if (offset >= 0) {
3273                                 ret = clamp(((loff_t) page->index << PAGE_SHIFT) +
3274                                             offset,
3275                                             start_offset, end_offset);
3276                                 unlock_page(page);
3277                                 put_page(page);
3278                                 return ret;
3279                         }
3280
3281                         unlock_page(page);
3282                         put_page(page);
3283                 } else {
3284                         break;
3285                 }
3286         }
3287
3288         return end_offset;
3289 }
3290
3291 static loff_t bch2_seek_data(struct file *file, u64 offset)
3292 {
3293         struct bch_inode_info *inode = file_bch_inode(file);
3294         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3295         struct btree_trans trans;
3296         struct btree_iter iter;
3297         struct bkey_s_c k;
3298         subvol_inum inum = inode_inum(inode);
3299         u64 isize, next_data = MAX_LFS_FILESIZE;
3300         u32 snapshot;
3301         int ret;
3302
3303         isize = i_size_read(&inode->v);
3304         if (offset >= isize)
3305                 return -ENXIO;
3306
3307         bch2_trans_init(&trans, c, 0, 0);
3308 retry:
3309         bch2_trans_begin(&trans);
3310
3311         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3312         if (ret)
3313                 goto err;
3314
3315         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3316                            SPOS(inode->v.i_ino, offset >> 9, snapshot), 0, k, ret) {
3317                 if (k.k->p.inode != inode->v.i_ino) {
3318                         break;
3319                 } else if (bkey_extent_is_data(k.k)) {
3320                         next_data = max(offset, bkey_start_offset(k.k) << 9);
3321                         break;
3322                 } else if (k.k->p.offset >> 9 > isize)
3323                         break;
3324         }
3325         bch2_trans_iter_exit(&trans, &iter);
3326 err:
3327         if (ret == -EINTR)
3328                 goto retry;
3329
3330         bch2_trans_exit(&trans);
3331         if (ret)
3332                 return ret;
3333
3334         if (next_data > offset)
3335                 next_data = bch2_seek_pagecache_data(&inode->v,
3336                                                      offset, next_data);
3337
3338         if (next_data >= isize)
3339                 return -ENXIO;
3340
3341         return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
3342 }
3343
3344 static int __page_hole_offset(struct page *page, unsigned offset)
3345 {
3346         struct bch_page_state *s = bch2_page_state(page);
3347         unsigned i;
3348
3349         if (!s)
3350                 return 0;
3351
3352         for (i = offset >> 9; i < PAGE_SECTORS; i++)
3353                 if (s->s[i].state < SECTOR_DIRTY)
3354                         return i << 9;
3355
3356         return -1;
3357 }
3358
3359 static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
3360 {
3361         pgoff_t index = offset >> PAGE_SHIFT;
3362         struct page *page;
3363         int pg_offset;
3364         loff_t ret = -1;
3365
3366         page = find_lock_page(mapping, index);
3367         if (!page)
3368                 return offset;
3369
3370         pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
3371         if (pg_offset >= 0)
3372                 ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
3373
3374         unlock_page(page);
3375
3376         return ret;
3377 }
3378
3379 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3380                                        loff_t start_offset,
3381                                        loff_t end_offset)
3382 {
3383         struct address_space *mapping = vinode->i_mapping;
3384         loff_t offset = start_offset, hole;
3385
3386         while (offset < end_offset) {
3387                 hole = page_hole_offset(mapping, offset);
3388                 if (hole >= 0 && hole <= end_offset)
3389                         return max(start_offset, hole);
3390
3391                 offset += PAGE_SIZE;
3392                 offset &= PAGE_MASK;
3393         }
3394
3395         return end_offset;
3396 }
3397
3398 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3399 {
3400         struct bch_inode_info *inode = file_bch_inode(file);
3401         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3402         struct btree_trans trans;
3403         struct btree_iter iter;
3404         struct bkey_s_c k;
3405         subvol_inum inum = inode_inum(inode);
3406         u64 isize, next_hole = MAX_LFS_FILESIZE;
3407         u32 snapshot;
3408         int ret;
3409
3410         isize = i_size_read(&inode->v);
3411         if (offset >= isize)
3412                 return -ENXIO;
3413
3414         bch2_trans_init(&trans, c, 0, 0);
3415 retry:
3416         bch2_trans_begin(&trans);
3417
3418         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3419         if (ret)
3420                 goto err;
3421
3422         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3423                            SPOS(inode->v.i_ino, offset >> 9, snapshot),
3424                            BTREE_ITER_SLOTS, k, ret) {
3425                 if (k.k->p.inode != inode->v.i_ino) {
3426                         next_hole = bch2_seek_pagecache_hole(&inode->v,
3427                                         offset, MAX_LFS_FILESIZE);
3428                         break;
3429                 } else if (!bkey_extent_is_data(k.k)) {
3430                         next_hole = bch2_seek_pagecache_hole(&inode->v,
3431                                         max(offset, bkey_start_offset(k.k) << 9),
3432                                         k.k->p.offset << 9);
3433
3434                         if (next_hole < k.k->p.offset << 9)
3435                                 break;
3436                 } else {
3437                         offset = max(offset, bkey_start_offset(k.k) << 9);
3438                 }
3439         }
3440         bch2_trans_iter_exit(&trans, &iter);
3441 err:
3442         if (ret == -EINTR)
3443                 goto retry;
3444
3445         bch2_trans_exit(&trans);
3446         if (ret)
3447                 return ret;
3448
3449         if (next_hole > isize)
3450                 next_hole = isize;
3451
3452         return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3453 }
3454
3455 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3456 {
3457         switch (whence) {
3458         case SEEK_SET:
3459         case SEEK_CUR:
3460         case SEEK_END:
3461                 return generic_file_llseek(file, offset, whence);
3462         case SEEK_DATA:
3463                 return bch2_seek_data(file, offset);
3464         case SEEK_HOLE:
3465                 return bch2_seek_hole(file, offset);
3466         }
3467
3468         return -EINVAL;
3469 }
3470
3471 void bch2_fs_fsio_exit(struct bch_fs *c)
3472 {
3473         bioset_exit(&c->dio_write_bioset);
3474         bioset_exit(&c->dio_read_bioset);
3475         bioset_exit(&c->writepage_bioset);
3476 }
3477
3478 int bch2_fs_fsio_init(struct bch_fs *c)
3479 {
3480         int ret = 0;
3481
3482         pr_verbose_init(c->opts, "");
3483
3484         if (bioset_init(&c->writepage_bioset,
3485                         4, offsetof(struct bch_writepage_io, op.wbio.bio),
3486                         BIOSET_NEED_BVECS) ||
3487             bioset_init(&c->dio_read_bioset,
3488                         4, offsetof(struct dio_read, rbio.bio),
3489                         BIOSET_NEED_BVECS) ||
3490             bioset_init(&c->dio_write_bioset,
3491                         4, offsetof(struct dio_write, op.wbio.bio),
3492                         BIOSET_NEED_BVECS))
3493                 ret = -ENOMEM;
3494
3495         pr_verbose_init(c->opts, "ret %i", ret);
3496         return ret;
3497 }
3498
3499 #endif /* NO_BCACHEFS_FS */