]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/fs-io.c
Update bcachefs sources to ac53c8857f fixup! bcachefs: Use a genradix for reading...
[bcachefs-tools-debian] / libbcachefs / fs-io.c
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3
4 #include "bcachefs.h"
5 #include "alloc_foreground.h"
6 #include "bkey_buf.h"
7 #include "btree_update.h"
8 #include "buckets.h"
9 #include "clock.h"
10 #include "error.h"
11 #include "extents.h"
12 #include "extent_update.h"
13 #include "fs.h"
14 #include "fs-io.h"
15 #include "fsck.h"
16 #include "inode.h"
17 #include "journal.h"
18 #include "io.h"
19 #include "keylist.h"
20 #include "quota.h"
21 #include "reflink.h"
22
23 #include <linux/aio.h>
24 #include <linux/backing-dev.h>
25 #include <linux/falloc.h>
26 #include <linux/migrate.h>
27 #include <linux/mmu_context.h>
28 #include <linux/pagevec.h>
29 #include <linux/rmap.h>
30 #include <linux/sched/signal.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/uio.h>
33 #include <linux/writeback.h>
34
35 #include <trace/events/bcachefs.h>
36 #include <trace/events/writeback.h>
37
38 static inline bool bio_full(struct bio *bio, unsigned len)
39 {
40         if (bio->bi_vcnt >= bio->bi_max_vecs)
41                 return true;
42         if (bio->bi_iter.bi_size > UINT_MAX - len)
43                 return true;
44         return false;
45 }
46
47 static inline struct address_space *faults_disabled_mapping(void)
48 {
49         return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
50 }
51
52 static inline void set_fdm_dropped_locks(void)
53 {
54         current->faults_disabled_mapping =
55                 (void *) (((unsigned long) current->faults_disabled_mapping)|1);
56 }
57
58 static inline bool fdm_dropped_locks(void)
59 {
60         return ((unsigned long) current->faults_disabled_mapping) & 1;
61 }
62
63 struct quota_res {
64         u64                             sectors;
65 };
66
67 struct bch_writepage_io {
68         struct closure                  cl;
69         struct bch_inode_info           *inode;
70
71         /* must be last: */
72         struct bch_write_op             op;
73 };
74
75 struct dio_write {
76         struct completion               done;
77         struct kiocb                    *req;
78         struct mm_struct                *mm;
79         unsigned                        loop:1,
80                                         sync:1,
81                                         free_iov:1;
82         struct quota_res                quota_res;
83         u64                             written;
84
85         struct iov_iter                 iter;
86         struct iovec                    inline_vecs[2];
87
88         /* must be last: */
89         struct bch_write_op             op;
90 };
91
92 struct dio_read {
93         struct closure                  cl;
94         struct kiocb                    *req;
95         long                            ret;
96         bool                            should_dirty;
97         struct bch_read_bio             rbio;
98 };
99
100 /* pagecache_block must be held */
101 static int write_invalidate_inode_pages_range(struct address_space *mapping,
102                                               loff_t start, loff_t end)
103 {
104         int ret;
105
106         /*
107          * XXX: the way this is currently implemented, we can spin if a process
108          * is continually redirtying a specific page
109          */
110         do {
111                 if (!mapping->nrpages)
112                         return 0;
113
114                 ret = filemap_write_and_wait_range(mapping, start, end);
115                 if (ret)
116                         break;
117
118                 if (!mapping->nrpages)
119                         return 0;
120
121                 ret = invalidate_inode_pages2_range(mapping,
122                                 start >> PAGE_SHIFT,
123                                 end >> PAGE_SHIFT);
124         } while (ret == -EBUSY);
125
126         return ret;
127 }
128
129 /* quotas */
130
131 #ifdef CONFIG_BCACHEFS_QUOTA
132
133 static void bch2_quota_reservation_put(struct bch_fs *c,
134                                        struct bch_inode_info *inode,
135                                        struct quota_res *res)
136 {
137         if (!res->sectors)
138                 return;
139
140         mutex_lock(&inode->ei_quota_lock);
141         BUG_ON(res->sectors > inode->ei_quota_reserved);
142
143         bch2_quota_acct(c, inode->ei_qid, Q_SPC,
144                         -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
145         inode->ei_quota_reserved -= res->sectors;
146         mutex_unlock(&inode->ei_quota_lock);
147
148         res->sectors = 0;
149 }
150
151 static int bch2_quota_reservation_add(struct bch_fs *c,
152                                       struct bch_inode_info *inode,
153                                       struct quota_res *res,
154                                       unsigned sectors,
155                                       bool check_enospc)
156 {
157         int ret;
158
159         mutex_lock(&inode->ei_quota_lock);
160         ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
161                               check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
162         if (likely(!ret)) {
163                 inode->ei_quota_reserved += sectors;
164                 res->sectors += sectors;
165         }
166         mutex_unlock(&inode->ei_quota_lock);
167
168         return ret;
169 }
170
171 #else
172
173 static void bch2_quota_reservation_put(struct bch_fs *c,
174                                        struct bch_inode_info *inode,
175                                        struct quota_res *res)
176 {
177 }
178
179 static int bch2_quota_reservation_add(struct bch_fs *c,
180                                       struct bch_inode_info *inode,
181                                       struct quota_res *res,
182                                       unsigned sectors,
183                                       bool check_enospc)
184 {
185         return 0;
186 }
187
188 #endif
189
190 /* i_size updates: */
191
192 struct inode_new_size {
193         loff_t          new_size;
194         u64             now;
195         unsigned        fields;
196 };
197
198 static int inode_set_size(struct bch_inode_info *inode,
199                           struct bch_inode_unpacked *bi,
200                           void *p)
201 {
202         struct inode_new_size *s = p;
203
204         bi->bi_size = s->new_size;
205         if (s->fields & ATTR_ATIME)
206                 bi->bi_atime = s->now;
207         if (s->fields & ATTR_MTIME)
208                 bi->bi_mtime = s->now;
209         if (s->fields & ATTR_CTIME)
210                 bi->bi_ctime = s->now;
211
212         return 0;
213 }
214
215 int __must_check bch2_write_inode_size(struct bch_fs *c,
216                                        struct bch_inode_info *inode,
217                                        loff_t new_size, unsigned fields)
218 {
219         struct inode_new_size s = {
220                 .new_size       = new_size,
221                 .now            = bch2_current_time(c),
222                 .fields         = fields,
223         };
224
225         return bch2_write_inode(c, inode, inode_set_size, &s, fields);
226 }
227
228 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
229                            struct quota_res *quota_res, s64 sectors)
230 {
231         if (!sectors)
232                 return;
233
234         mutex_lock(&inode->ei_quota_lock);
235         BUG_ON((s64) inode->v.i_blocks + sectors < 0);
236         inode->v.i_blocks += sectors;
237
238 #ifdef CONFIG_BCACHEFS_QUOTA
239         if (quota_res && sectors > 0) {
240                 BUG_ON(sectors > quota_res->sectors);
241                 BUG_ON(sectors > inode->ei_quota_reserved);
242
243                 quota_res->sectors -= sectors;
244                 inode->ei_quota_reserved -= sectors;
245         } else {
246                 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
247         }
248 #endif
249         mutex_unlock(&inode->ei_quota_lock);
250 }
251
252 /* page state: */
253
254 /* stored in page->private: */
255
256 struct bch_page_sector {
257         /* Uncompressed, fully allocated replicas (or on disk reservation): */
258         unsigned                nr_replicas:4;
259
260         /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
261         unsigned                replicas_reserved:4;
262
263         /* i_sectors: */
264         enum {
265                 SECTOR_UNALLOCATED,
266                 SECTOR_RESERVED,
267                 SECTOR_DIRTY,
268                 SECTOR_DIRTY_RESERVED,
269                 SECTOR_ALLOCATED,
270         }                       state:8;
271 };
272
273 struct bch_page_state {
274         spinlock_t              lock;
275         atomic_t                write_count;
276         bool                    uptodate;
277         struct bch_page_sector  s[PAGE_SECTORS];
278 };
279
280 static inline struct bch_page_state *__bch2_page_state(struct page *page)
281 {
282         return page_has_private(page)
283                 ? (struct bch_page_state *) page_private(page)
284                 : NULL;
285 }
286
287 static inline struct bch_page_state *bch2_page_state(struct page *page)
288 {
289         EBUG_ON(!PageLocked(page));
290
291         return __bch2_page_state(page);
292 }
293
294 /* for newly allocated pages: */
295 static void __bch2_page_state_release(struct page *page)
296 {
297         kfree(detach_page_private(page));
298 }
299
300 static void bch2_page_state_release(struct page *page)
301 {
302         EBUG_ON(!PageLocked(page));
303         __bch2_page_state_release(page);
304 }
305
306 /* for newly allocated pages: */
307 static struct bch_page_state *__bch2_page_state_create(struct page *page,
308                                                        gfp_t gfp)
309 {
310         struct bch_page_state *s;
311
312         s = kzalloc(sizeof(*s), GFP_NOFS|gfp);
313         if (!s)
314                 return NULL;
315
316         spin_lock_init(&s->lock);
317         attach_page_private(page, s);
318         return s;
319 }
320
321 static struct bch_page_state *bch2_page_state_create(struct page *page,
322                                                      gfp_t gfp)
323 {
324         return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
325 }
326
327 static unsigned bkey_to_sector_state(const struct bkey *k)
328 {
329         if (k->type == KEY_TYPE_reservation)
330                 return SECTOR_RESERVED;
331         if (bkey_extent_is_allocation(k))
332                 return SECTOR_ALLOCATED;
333         return SECTOR_UNALLOCATED;
334 }
335
336 static void __bch2_page_state_set(struct page *page,
337                                   unsigned pg_offset, unsigned pg_len,
338                                   unsigned nr_ptrs, unsigned state)
339 {
340         struct bch_page_state *s = bch2_page_state_create(page, __GFP_NOFAIL);
341         unsigned i;
342
343         BUG_ON(pg_offset >= PAGE_SECTORS);
344         BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
345
346         spin_lock(&s->lock);
347
348         for (i = pg_offset; i < pg_offset + pg_len; i++) {
349                 s->s[i].nr_replicas = nr_ptrs;
350                 s->s[i].state = state;
351         }
352
353         if (i == PAGE_SECTORS)
354                 s->uptodate = true;
355
356         spin_unlock(&s->lock);
357 }
358
359 static int bch2_page_state_set(struct bch_fs *c, subvol_inum inum,
360                                struct page **pages, unsigned nr_pages)
361 {
362         struct btree_trans trans;
363         struct btree_iter iter;
364         struct bkey_s_c k;
365         u64 offset = pages[0]->index << PAGE_SECTORS_SHIFT;
366         unsigned pg_idx = 0;
367         u32 snapshot;
368         int ret;
369
370         bch2_trans_init(&trans, c, 0, 0);
371 retry:
372         bch2_trans_begin(&trans);
373
374         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
375         if (ret)
376                 goto err;
377
378         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
379                            SPOS(inum.inum, offset, snapshot),
380                            BTREE_ITER_SLOTS, k, ret) {
381                 unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
382                 unsigned state = bkey_to_sector_state(k.k);
383
384                 while (pg_idx < nr_pages) {
385                         struct page *page = pages[pg_idx];
386                         u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
387                         u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
388                         unsigned pg_offset = max(bkey_start_offset(k.k), pg_start) - pg_start;
389                         unsigned pg_len = min(k.k->p.offset, pg_end) - pg_offset - pg_start;
390
391                         BUG_ON(k.k->p.offset < pg_start);
392                         BUG_ON(bkey_start_offset(k.k) > pg_end);
393
394                         if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate)
395                                 __bch2_page_state_set(page, pg_offset, pg_len, nr_ptrs, state);
396
397                         if (k.k->p.offset < pg_end)
398                                 break;
399                         pg_idx++;
400                 }
401
402                 if (pg_idx == nr_pages)
403                         break;
404         }
405
406         offset = iter.pos.offset;
407         bch2_trans_iter_exit(&trans, &iter);
408 err:
409         if (ret == -EINTR)
410                 goto retry;
411         bch2_trans_exit(&trans);
412
413         return ret;
414 }
415
416 static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
417 {
418         struct bvec_iter iter;
419         struct bio_vec bv;
420         unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
421                 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
422         unsigned state = bkey_to_sector_state(k.k);
423
424         bio_for_each_segment(bv, bio, iter)
425                 __bch2_page_state_set(bv.bv_page, bv.bv_offset >> 9,
426                                       bv.bv_len >> 9, nr_ptrs, state);
427 }
428
429 static void mark_pagecache_unallocated(struct bch_inode_info *inode,
430                                        u64 start, u64 end)
431 {
432         pgoff_t index = start >> PAGE_SECTORS_SHIFT;
433         pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
434         struct pagevec pvec;
435
436         if (end <= start)
437                 return;
438
439         pagevec_init(&pvec);
440
441         do {
442                 unsigned nr_pages, i, j;
443
444                 nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
445                                                 &index, end_index);
446                 for (i = 0; i < nr_pages; i++) {
447                         struct page *page = pvec.pages[i];
448                         u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
449                         u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
450                         unsigned pg_offset = max(start, pg_start) - pg_start;
451                         unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
452                         struct bch_page_state *s;
453
454                         BUG_ON(end <= pg_start);
455                         BUG_ON(pg_offset >= PAGE_SECTORS);
456                         BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
457
458                         lock_page(page);
459                         s = bch2_page_state(page);
460
461                         if (s) {
462                                 spin_lock(&s->lock);
463                                 for (j = pg_offset; j < pg_offset + pg_len; j++)
464                                         s->s[j].nr_replicas = 0;
465                                 spin_unlock(&s->lock);
466                         }
467
468                         unlock_page(page);
469                 }
470                 pagevec_release(&pvec);
471         } while (index <= end_index);
472 }
473
474 static void mark_pagecache_reserved(struct bch_inode_info *inode,
475                                     u64 start, u64 end)
476 {
477         struct bch_fs *c = inode->v.i_sb->s_fs_info;
478         pgoff_t index = start >> PAGE_SECTORS_SHIFT;
479         pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
480         struct pagevec pvec;
481         s64 i_sectors_delta = 0;
482
483         if (end <= start)
484                 return;
485
486         pagevec_init(&pvec);
487
488         do {
489                 unsigned nr_pages, i, j;
490
491                 nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
492                                                 &index, end_index);
493                 for (i = 0; i < nr_pages; i++) {
494                         struct page *page = pvec.pages[i];
495                         u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
496                         u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
497                         unsigned pg_offset = max(start, pg_start) - pg_start;
498                         unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
499                         struct bch_page_state *s;
500
501                         BUG_ON(end <= pg_start);
502                         BUG_ON(pg_offset >= PAGE_SECTORS);
503                         BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
504
505                         lock_page(page);
506                         s = bch2_page_state(page);
507
508                         if (s) {
509                                 spin_lock(&s->lock);
510                                 for (j = pg_offset; j < pg_offset + pg_len; j++)
511                                         switch (s->s[j].state) {
512                                         case SECTOR_UNALLOCATED:
513                                                 s->s[j].state = SECTOR_RESERVED;
514                                                 break;
515                                         case SECTOR_DIRTY:
516                                                 s->s[j].state = SECTOR_DIRTY_RESERVED;
517                                                 i_sectors_delta--;
518                                                 break;
519                                         default:
520                                                 break;
521                                         }
522                                 spin_unlock(&s->lock);
523                         }
524
525                         unlock_page(page);
526                 }
527                 pagevec_release(&pvec);
528         } while (index <= end_index);
529
530         i_sectors_acct(c, inode, NULL, i_sectors_delta);
531 }
532
533 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
534 {
535         /* XXX: this should not be open coded */
536         return inode->ei_inode.bi_data_replicas
537                 ? inode->ei_inode.bi_data_replicas - 1
538                 : c->opts.data_replicas;
539 }
540
541 static inline unsigned sectors_to_reserve(struct bch_page_sector *s,
542                                                   unsigned nr_replicas)
543 {
544         return max(0, (int) nr_replicas -
545                    s->nr_replicas -
546                    s->replicas_reserved);
547 }
548
549 static int bch2_get_page_disk_reservation(struct bch_fs *c,
550                                 struct bch_inode_info *inode,
551                                 struct page *page, bool check_enospc)
552 {
553         struct bch_page_state *s = bch2_page_state_create(page, 0);
554         unsigned nr_replicas = inode_nr_replicas(c, inode);
555         struct disk_reservation disk_res = { 0 };
556         unsigned i, disk_res_sectors = 0;
557         int ret;
558
559         if (!s)
560                 return -ENOMEM;
561
562         for (i = 0; i < ARRAY_SIZE(s->s); i++)
563                 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
564
565         if (!disk_res_sectors)
566                 return 0;
567
568         ret = bch2_disk_reservation_get(c, &disk_res,
569                                         disk_res_sectors, 1,
570                                         !check_enospc
571                                         ? BCH_DISK_RESERVATION_NOFAIL
572                                         : 0);
573         if (unlikely(ret))
574                 return ret;
575
576         for (i = 0; i < ARRAY_SIZE(s->s); i++)
577                 s->s[i].replicas_reserved +=
578                         sectors_to_reserve(&s->s[i], nr_replicas);
579
580         return 0;
581 }
582
583 struct bch2_page_reservation {
584         struct disk_reservation disk;
585         struct quota_res        quota;
586 };
587
588 static void bch2_page_reservation_init(struct bch_fs *c,
589                         struct bch_inode_info *inode,
590                         struct bch2_page_reservation *res)
591 {
592         memset(res, 0, sizeof(*res));
593
594         res->disk.nr_replicas = inode_nr_replicas(c, inode);
595 }
596
597 static void bch2_page_reservation_put(struct bch_fs *c,
598                         struct bch_inode_info *inode,
599                         struct bch2_page_reservation *res)
600 {
601         bch2_disk_reservation_put(c, &res->disk);
602         bch2_quota_reservation_put(c, inode, &res->quota);
603 }
604
605 static int bch2_page_reservation_get(struct bch_fs *c,
606                         struct bch_inode_info *inode, struct page *page,
607                         struct bch2_page_reservation *res,
608                         unsigned offset, unsigned len, bool check_enospc)
609 {
610         struct bch_page_state *s = bch2_page_state_create(page, 0);
611         unsigned i, disk_sectors = 0, quota_sectors = 0;
612         int ret;
613
614         if (!s)
615                 return -ENOMEM;
616
617         BUG_ON(!s->uptodate);
618
619         for (i = round_down(offset, block_bytes(c)) >> 9;
620              i < round_up(offset + len, block_bytes(c)) >> 9;
621              i++) {
622                 disk_sectors += sectors_to_reserve(&s->s[i],
623                                                 res->disk.nr_replicas);
624                 quota_sectors += s->s[i].state == SECTOR_UNALLOCATED;
625         }
626
627         if (disk_sectors) {
628                 ret = bch2_disk_reservation_add(c, &res->disk,
629                                                 disk_sectors,
630                                                 !check_enospc
631                                                 ? BCH_DISK_RESERVATION_NOFAIL
632                                                 : 0);
633                 if (unlikely(ret))
634                         return ret;
635         }
636
637         if (quota_sectors) {
638                 ret = bch2_quota_reservation_add(c, inode, &res->quota,
639                                                  quota_sectors,
640                                                  check_enospc);
641                 if (unlikely(ret)) {
642                         struct disk_reservation tmp = {
643                                 .sectors = disk_sectors
644                         };
645
646                         bch2_disk_reservation_put(c, &tmp);
647                         res->disk.sectors -= disk_sectors;
648                         return ret;
649                 }
650         }
651
652         return 0;
653 }
654
655 static void bch2_clear_page_bits(struct page *page)
656 {
657         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
658         struct bch_fs *c = inode->v.i_sb->s_fs_info;
659         struct bch_page_state *s = bch2_page_state(page);
660         struct disk_reservation disk_res = { 0 };
661         int i, dirty_sectors = 0;
662
663         if (!s)
664                 return;
665
666         EBUG_ON(!PageLocked(page));
667         EBUG_ON(PageWriteback(page));
668
669         for (i = 0; i < ARRAY_SIZE(s->s); i++) {
670                 disk_res.sectors += s->s[i].replicas_reserved;
671                 s->s[i].replicas_reserved = 0;
672
673                 switch (s->s[i].state) {
674                 case SECTOR_DIRTY:
675                         s->s[i].state = SECTOR_UNALLOCATED;
676                         --dirty_sectors;
677                         break;
678                 case SECTOR_DIRTY_RESERVED:
679                         s->s[i].state = SECTOR_RESERVED;
680                         break;
681                 default:
682                         break;
683                 }
684         }
685
686         bch2_disk_reservation_put(c, &disk_res);
687
688         i_sectors_acct(c, inode, NULL, dirty_sectors);
689
690         bch2_page_state_release(page);
691 }
692
693 static void bch2_set_page_dirty(struct bch_fs *c,
694                         struct bch_inode_info *inode, struct page *page,
695                         struct bch2_page_reservation *res,
696                         unsigned offset, unsigned len)
697 {
698         struct bch_page_state *s = bch2_page_state(page);
699         unsigned i, dirty_sectors = 0;
700
701         WARN_ON((u64) page_offset(page) + offset + len >
702                 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
703
704         spin_lock(&s->lock);
705
706         for (i = round_down(offset, block_bytes(c)) >> 9;
707              i < round_up(offset + len, block_bytes(c)) >> 9;
708              i++) {
709                 unsigned sectors = sectors_to_reserve(&s->s[i],
710                                                 res->disk.nr_replicas);
711
712                 /*
713                  * This can happen if we race with the error path in
714                  * bch2_writepage_io_done():
715                  */
716                 sectors = min_t(unsigned, sectors, res->disk.sectors);
717
718                 s->s[i].replicas_reserved += sectors;
719                 res->disk.sectors -= sectors;
720
721                 switch (s->s[i].state) {
722                 case SECTOR_UNALLOCATED:
723                         s->s[i].state = SECTOR_DIRTY;
724                         dirty_sectors++;
725                         break;
726                 case SECTOR_RESERVED:
727                         s->s[i].state = SECTOR_DIRTY_RESERVED;
728                         break;
729                 default:
730                         break;
731                 }
732         }
733
734         spin_unlock(&s->lock);
735
736         i_sectors_acct(c, inode, &res->quota, dirty_sectors);
737
738         if (!PageDirty(page))
739                 __set_page_dirty_nobuffers(page);
740 }
741
742 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
743 {
744         struct file *file = vmf->vma->vm_file;
745         struct address_space *mapping = file->f_mapping;
746         struct address_space *fdm = faults_disabled_mapping();
747         struct bch_inode_info *inode = file_bch_inode(file);
748         int ret;
749
750         if (fdm == mapping)
751                 return VM_FAULT_SIGBUS;
752
753         /* Lock ordering: */
754         if (fdm > mapping) {
755                 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
756
757                 if (bch2_pagecache_add_tryget(&inode->ei_pagecache_lock))
758                         goto got_lock;
759
760                 bch2_pagecache_block_put(&fdm_host->ei_pagecache_lock);
761
762                 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
763                 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
764
765                 bch2_pagecache_block_get(&fdm_host->ei_pagecache_lock);
766
767                 /* Signal that lock has been dropped: */
768                 set_fdm_dropped_locks();
769                 return VM_FAULT_SIGBUS;
770         }
771
772         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
773 got_lock:
774         ret = filemap_fault(vmf);
775         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
776
777         return ret;
778 }
779
780 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
781 {
782         struct page *page = vmf->page;
783         struct file *file = vmf->vma->vm_file;
784         struct bch_inode_info *inode = file_bch_inode(file);
785         struct address_space *mapping = file->f_mapping;
786         struct bch_fs *c = inode->v.i_sb->s_fs_info;
787         struct bch2_page_reservation res;
788         unsigned len;
789         loff_t isize;
790         int ret;
791
792         bch2_page_reservation_init(c, inode, &res);
793
794         sb_start_pagefault(inode->v.i_sb);
795         file_update_time(file);
796
797         /*
798          * Not strictly necessary, but helps avoid dio writes livelocking in
799          * write_invalidate_inode_pages_range() - can drop this if/when we get
800          * a write_invalidate_inode_pages_range() that works without dropping
801          * page lock before invalidating page
802          */
803         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
804
805         lock_page(page);
806         isize = i_size_read(&inode->v);
807
808         if (page->mapping != mapping || page_offset(page) >= isize) {
809                 unlock_page(page);
810                 ret = VM_FAULT_NOPAGE;
811                 goto out;
812         }
813
814         len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
815
816         if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
817                 if (bch2_page_state_set(c, inode_inum(inode), &page, 1)) {
818                         unlock_page(page);
819                         ret = VM_FAULT_SIGBUS;
820                         goto out;
821                 }
822         }
823
824         if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
825                 unlock_page(page);
826                 ret = VM_FAULT_SIGBUS;
827                 goto out;
828         }
829
830         bch2_set_page_dirty(c, inode, page, &res, 0, len);
831         bch2_page_reservation_put(c, inode, &res);
832
833         wait_for_stable_page(page);
834         ret = VM_FAULT_LOCKED;
835 out:
836         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
837         sb_end_pagefault(inode->v.i_sb);
838
839         return ret;
840 }
841
842 void bch2_invalidatepage(struct page *page, unsigned int offset,
843                          unsigned int length)
844 {
845         if (offset || length < PAGE_SIZE)
846                 return;
847
848         bch2_clear_page_bits(page);
849 }
850
851 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
852 {
853         if (PageDirty(page))
854                 return 0;
855
856         bch2_clear_page_bits(page);
857         return 1;
858 }
859
860 #ifdef CONFIG_MIGRATION
861 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
862                       struct page *page, enum migrate_mode mode)
863 {
864         int ret;
865
866         EBUG_ON(!PageLocked(page));
867         EBUG_ON(!PageLocked(newpage));
868
869         ret = migrate_page_move_mapping(mapping, newpage, page, 0);
870         if (ret != MIGRATEPAGE_SUCCESS)
871                 return ret;
872
873         if (PagePrivate(page))
874                 attach_page_private(newpage, detach_page_private(page));
875
876         if (mode != MIGRATE_SYNC_NO_COPY)
877                 migrate_page_copy(newpage, page);
878         else
879                 migrate_page_states(newpage, page);
880         return MIGRATEPAGE_SUCCESS;
881 }
882 #endif
883
884 /* readpage(s): */
885
886 static void bch2_readpages_end_io(struct bio *bio)
887 {
888         struct bvec_iter_all iter;
889         struct bio_vec *bv;
890
891         bio_for_each_segment_all(bv, bio, iter) {
892                 struct page *page = bv->bv_page;
893
894                 if (!bio->bi_status) {
895                         SetPageUptodate(page);
896                 } else {
897                         ClearPageUptodate(page);
898                         SetPageError(page);
899                 }
900                 unlock_page(page);
901         }
902
903         bio_put(bio);
904 }
905
906 struct readpages_iter {
907         struct address_space    *mapping;
908         struct page             **pages;
909         unsigned                nr_pages;
910         unsigned                idx;
911         pgoff_t                 offset;
912 };
913
914 static int readpages_iter_init(struct readpages_iter *iter,
915                                struct readahead_control *ractl)
916 {
917         unsigned i, nr_pages = readahead_count(ractl);
918
919         memset(iter, 0, sizeof(*iter));
920
921         iter->mapping   = ractl->mapping;
922         iter->offset    = readahead_index(ractl);
923         iter->nr_pages  = nr_pages;
924
925         iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
926         if (!iter->pages)
927                 return -ENOMEM;
928
929         nr_pages = __readahead_batch(ractl, iter->pages, nr_pages);
930         for (i = 0; i < nr_pages; i++) {
931                 __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL);
932                 put_page(iter->pages[i]);
933         }
934
935         return 0;
936 }
937
938 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
939 {
940         if (iter->idx >= iter->nr_pages)
941                 return NULL;
942
943         EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
944
945         return iter->pages[iter->idx];
946 }
947
948 static bool extent_partial_reads_expensive(struct bkey_s_c k)
949 {
950         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
951         struct bch_extent_crc_unpacked crc;
952         const union bch_extent_entry *i;
953
954         bkey_for_each_crc(k.k, ptrs, crc, i)
955                 if (crc.csum_type || crc.compression_type)
956                         return true;
957         return false;
958 }
959
960 static void readpage_bio_extend(struct readpages_iter *iter,
961                                 struct bio *bio,
962                                 unsigned sectors_this_extent,
963                                 bool get_more)
964 {
965         while (bio_sectors(bio) < sectors_this_extent &&
966                bio->bi_vcnt < bio->bi_max_vecs) {
967                 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
968                 struct page *page = readpage_iter_next(iter);
969                 int ret;
970
971                 if (page) {
972                         if (iter->offset + iter->idx != page_offset)
973                                 break;
974
975                         iter->idx++;
976                 } else {
977                         if (!get_more)
978                                 break;
979
980                         page = xa_load(&iter->mapping->i_pages, page_offset);
981                         if (page && !xa_is_value(page))
982                                 break;
983
984                         page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
985                         if (!page)
986                                 break;
987
988                         if (!__bch2_page_state_create(page, 0)) {
989                                 put_page(page);
990                                 break;
991                         }
992
993                         ret = add_to_page_cache_lru(page, iter->mapping,
994                                                     page_offset, GFP_NOFS);
995                         if (ret) {
996                                 __bch2_page_state_release(page);
997                                 put_page(page);
998                                 break;
999                         }
1000
1001                         put_page(page);
1002                 }
1003
1004                 BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
1005         }
1006 }
1007
1008 static void bchfs_read(struct btree_trans *trans,
1009                        struct bch_read_bio *rbio,
1010                        subvol_inum inum,
1011                        struct readpages_iter *readpages_iter)
1012 {
1013         struct bch_fs *c = trans->c;
1014         struct btree_iter iter;
1015         struct bkey_buf sk;
1016         int flags = BCH_READ_RETRY_IF_STALE|
1017                 BCH_READ_MAY_PROMOTE;
1018         u32 snapshot;
1019         int ret = 0;
1020
1021         rbio->c = c;
1022         rbio->start_time = local_clock();
1023         rbio->subvol = inum.subvol;
1024
1025         bch2_bkey_buf_init(&sk);
1026 retry:
1027         bch2_trans_begin(trans);
1028         iter = (struct btree_iter) { NULL };
1029
1030         ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1031         if (ret)
1032                 goto err;
1033
1034         bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1035                              SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
1036                              BTREE_ITER_SLOTS);
1037         while (1) {
1038                 struct bkey_s_c k;
1039                 unsigned bytes, sectors, offset_into_extent;
1040                 enum btree_id data_btree = BTREE_ID_extents;
1041
1042                 /*
1043                  * read_extent -> io_time_reset may cause a transaction restart
1044                  * without returning an error, we need to check for that here:
1045                  */
1046                 if (!bch2_trans_relock(trans)) {
1047                         ret = -EINTR;
1048                         break;
1049                 }
1050
1051                 bch2_btree_iter_set_pos(&iter,
1052                                 POS(inum.inum, rbio->bio.bi_iter.bi_sector));
1053
1054                 k = bch2_btree_iter_peek_slot(&iter);
1055                 ret = bkey_err(k);
1056                 if (ret)
1057                         break;
1058
1059                 offset_into_extent = iter.pos.offset -
1060                         bkey_start_offset(k.k);
1061                 sectors = k.k->size - offset_into_extent;
1062
1063                 bch2_bkey_buf_reassemble(&sk, c, k);
1064
1065                 ret = bch2_read_indirect_extent(trans, &data_btree,
1066                                         &offset_into_extent, &sk);
1067                 if (ret)
1068                         break;
1069
1070                 k = bkey_i_to_s_c(sk.k);
1071
1072                 sectors = min(sectors, k.k->size - offset_into_extent);
1073
1074                 if (readpages_iter)
1075                         readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
1076                                             extent_partial_reads_expensive(k));
1077
1078                 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
1079                 swap(rbio->bio.bi_iter.bi_size, bytes);
1080
1081                 if (rbio->bio.bi_iter.bi_size == bytes)
1082                         flags |= BCH_READ_LAST_FRAGMENT;
1083
1084                 bch2_bio_page_state_set(&rbio->bio, k);
1085
1086                 bch2_read_extent(trans, rbio, iter.pos,
1087                                  data_btree, k, offset_into_extent, flags);
1088
1089                 if (flags & BCH_READ_LAST_FRAGMENT)
1090                         break;
1091
1092                 swap(rbio->bio.bi_iter.bi_size, bytes);
1093                 bio_advance(&rbio->bio, bytes);
1094
1095                 ret = btree_trans_too_many_iters(trans);
1096                 if (ret)
1097                         break;
1098         }
1099 err:
1100         bch2_trans_iter_exit(trans, &iter);
1101
1102         if (ret == -EINTR)
1103                 goto retry;
1104
1105         if (ret) {
1106                 bch_err_inum_ratelimited(c, inum.inum,
1107                                 "read error %i from btree lookup", ret);
1108                 rbio->bio.bi_status = BLK_STS_IOERR;
1109                 bio_endio(&rbio->bio);
1110         }
1111
1112         bch2_bkey_buf_exit(&sk, c);
1113 }
1114
1115 void bch2_readahead(struct readahead_control *ractl)
1116 {
1117         struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
1118         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1119         struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1120         struct btree_trans trans;
1121         struct page *page;
1122         struct readpages_iter readpages_iter;
1123         int ret;
1124
1125         ret = readpages_iter_init(&readpages_iter, ractl);
1126         BUG_ON(ret);
1127
1128         bch2_trans_init(&trans, c, 0, 0);
1129
1130         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1131
1132         while ((page = readpage_iter_next(&readpages_iter))) {
1133                 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
1134                 unsigned n = min_t(unsigned,
1135                                    readpages_iter.nr_pages -
1136                                    readpages_iter.idx,
1137                                    BIO_MAX_VECS);
1138                 struct bch_read_bio *rbio =
1139                         rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
1140                                   opts);
1141
1142                 readpages_iter.idx++;
1143
1144                 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
1145                 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTORS_SHIFT;
1146                 rbio->bio.bi_end_io = bch2_readpages_end_io;
1147                 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
1148
1149                 bchfs_read(&trans, rbio, inode_inum(inode),
1150                            &readpages_iter);
1151         }
1152
1153         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1154
1155         bch2_trans_exit(&trans);
1156         kfree(readpages_iter.pages);
1157 }
1158
1159 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
1160                              subvol_inum inum, struct page *page)
1161 {
1162         struct btree_trans trans;
1163
1164         bch2_page_state_create(page, __GFP_NOFAIL);
1165
1166         bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
1167         rbio->bio.bi_iter.bi_sector =
1168                 (sector_t) page->index << PAGE_SECTORS_SHIFT;
1169         BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
1170
1171         bch2_trans_init(&trans, c, 0, 0);
1172         bchfs_read(&trans, rbio, inum, NULL);
1173         bch2_trans_exit(&trans);
1174 }
1175
1176 int bch2_readpage(struct file *file, struct page *page)
1177 {
1178         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1179         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1180         struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1181         struct bch_read_bio *rbio;
1182
1183         rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
1184         rbio->bio.bi_end_io = bch2_readpages_end_io;
1185
1186         __bchfs_readpage(c, rbio, inode_inum(inode), page);
1187         return 0;
1188 }
1189
1190 static void bch2_read_single_page_end_io(struct bio *bio)
1191 {
1192         complete(bio->bi_private);
1193 }
1194
1195 static int bch2_read_single_page(struct page *page,
1196                                  struct address_space *mapping)
1197 {
1198         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1199         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1200         struct bch_read_bio *rbio;
1201         int ret;
1202         DECLARE_COMPLETION_ONSTACK(done);
1203
1204         rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
1205                          io_opts(c, &inode->ei_inode));
1206         rbio->bio.bi_private = &done;
1207         rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1208
1209         __bchfs_readpage(c, rbio, inode_inum(inode), page);
1210         wait_for_completion(&done);
1211
1212         ret = blk_status_to_errno(rbio->bio.bi_status);
1213         bio_put(&rbio->bio);
1214
1215         if (ret < 0)
1216                 return ret;
1217
1218         SetPageUptodate(page);
1219         return 0;
1220 }
1221
1222 /* writepages: */
1223
1224 struct bch_writepage_state {
1225         struct bch_writepage_io *io;
1226         struct bch_io_opts      opts;
1227 };
1228
1229 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1230                                                                   struct bch_inode_info *inode)
1231 {
1232         return (struct bch_writepage_state) {
1233                 .opts = io_opts(c, &inode->ei_inode)
1234         };
1235 }
1236
1237 static void bch2_writepage_io_free(struct closure *cl)
1238 {
1239         struct bch_writepage_io *io = container_of(cl,
1240                                         struct bch_writepage_io, cl);
1241
1242         bio_put(&io->op.wbio.bio);
1243 }
1244
1245 static void bch2_writepage_io_done(struct closure *cl)
1246 {
1247         struct bch_writepage_io *io = container_of(cl,
1248                                         struct bch_writepage_io, cl);
1249         struct bch_fs *c = io->op.c;
1250         struct bio *bio = &io->op.wbio.bio;
1251         struct bvec_iter_all iter;
1252         struct bio_vec *bvec;
1253         unsigned i;
1254
1255         up(&io->op.c->io_in_flight);
1256
1257         if (io->op.error) {
1258                 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
1259
1260                 bio_for_each_segment_all(bvec, bio, iter) {
1261                         struct bch_page_state *s;
1262
1263                         SetPageError(bvec->bv_page);
1264                         mapping_set_error(bvec->bv_page->mapping, -EIO);
1265
1266                         s = __bch2_page_state(bvec->bv_page);
1267                         spin_lock(&s->lock);
1268                         for (i = 0; i < PAGE_SECTORS; i++)
1269                                 s->s[i].nr_replicas = 0;
1270                         spin_unlock(&s->lock);
1271                 }
1272         }
1273
1274         if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1275                 bio_for_each_segment_all(bvec, bio, iter) {
1276                         struct bch_page_state *s;
1277
1278                         s = __bch2_page_state(bvec->bv_page);
1279                         spin_lock(&s->lock);
1280                         for (i = 0; i < PAGE_SECTORS; i++)
1281                                 s->s[i].nr_replicas = 0;
1282                         spin_unlock(&s->lock);
1283                 }
1284         }
1285
1286         /*
1287          * racing with fallocate can cause us to add fewer sectors than
1288          * expected - but we shouldn't add more sectors than expected:
1289          */
1290         WARN_ON_ONCE(io->op.i_sectors_delta > 0);
1291
1292         /*
1293          * (error (due to going RO) halfway through a page can screw that up
1294          * slightly)
1295          * XXX wtf?
1296            BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1297          */
1298
1299         /*
1300          * PageWriteback is effectively our ref on the inode - fixup i_blocks
1301          * before calling end_page_writeback:
1302          */
1303         i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1304
1305         bio_for_each_segment_all(bvec, bio, iter) {
1306                 struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
1307
1308                 if (atomic_dec_and_test(&s->write_count))
1309                         end_page_writeback(bvec->bv_page);
1310         }
1311
1312         closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1313 }
1314
1315 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1316 {
1317         struct bch_writepage_io *io = w->io;
1318
1319         down(&io->op.c->io_in_flight);
1320
1321         w->io = NULL;
1322         closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
1323         continue_at(&io->cl, bch2_writepage_io_done, NULL);
1324 }
1325
1326 /*
1327  * Get a bch_writepage_io and add @page to it - appending to an existing one if
1328  * possible, else allocating a new one:
1329  */
1330 static void bch2_writepage_io_alloc(struct bch_fs *c,
1331                                     struct writeback_control *wbc,
1332                                     struct bch_writepage_state *w,
1333                                     struct bch_inode_info *inode,
1334                                     u64 sector,
1335                                     unsigned nr_replicas)
1336 {
1337         struct bch_write_op *op;
1338
1339         w->io = container_of(bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS,
1340                                               &c->writepage_bioset),
1341                              struct bch_writepage_io, op.wbio.bio);
1342
1343         closure_init(&w->io->cl, NULL);
1344         w->io->inode            = inode;
1345
1346         op                      = &w->io->op;
1347         bch2_write_op_init(op, c, w->opts);
1348         op->target              = w->opts.foreground_target;
1349         op->nr_replicas         = nr_replicas;
1350         op->res.nr_replicas     = nr_replicas;
1351         op->write_point         = writepoint_hashed(inode->ei_last_dirtied);
1352         op->subvol              = inode->ei_subvol;
1353         op->pos                 = POS(inode->v.i_ino, sector);
1354         op->wbio.bio.bi_iter.bi_sector = sector;
1355         op->wbio.bio.bi_opf     = wbc_to_write_flags(wbc);
1356 }
1357
1358 static int __bch2_writepage(struct page *page,
1359                             struct writeback_control *wbc,
1360                             void *data)
1361 {
1362         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1363         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1364         struct bch_writepage_state *w = data;
1365         struct bch_page_state *s, orig;
1366         unsigned i, offset, nr_replicas_this_write = U32_MAX;
1367         loff_t i_size = i_size_read(&inode->v);
1368         pgoff_t end_index = i_size >> PAGE_SHIFT;
1369         int ret;
1370
1371         EBUG_ON(!PageUptodate(page));
1372
1373         /* Is the page fully inside i_size? */
1374         if (page->index < end_index)
1375                 goto do_io;
1376
1377         /* Is the page fully outside i_size? (truncate in progress) */
1378         offset = i_size & (PAGE_SIZE - 1);
1379         if (page->index > end_index || !offset) {
1380                 unlock_page(page);
1381                 return 0;
1382         }
1383
1384         /*
1385          * The page straddles i_size.  It must be zeroed out on each and every
1386          * writepage invocation because it may be mmapped.  "A file is mapped
1387          * in multiples of the page size.  For a file that is not a multiple of
1388          * the  page size, the remaining memory is zeroed when mapped, and
1389          * writes to that region are not written out to the file."
1390          */
1391         zero_user_segment(page, offset, PAGE_SIZE);
1392 do_io:
1393         s = bch2_page_state_create(page, __GFP_NOFAIL);
1394
1395         /*
1396          * Things get really hairy with errors during writeback:
1397          */
1398         ret = bch2_get_page_disk_reservation(c, inode, page, false);
1399         BUG_ON(ret);
1400
1401         /* Before unlocking the page, get copy of reservations: */
1402         spin_lock(&s->lock);
1403         orig = *s;
1404         spin_unlock(&s->lock);
1405
1406         for (i = 0; i < PAGE_SECTORS; i++) {
1407                 if (s->s[i].state < SECTOR_DIRTY)
1408                         continue;
1409
1410                 nr_replicas_this_write =
1411                         min_t(unsigned, nr_replicas_this_write,
1412                               s->s[i].nr_replicas +
1413                               s->s[i].replicas_reserved);
1414         }
1415
1416         for (i = 0; i < PAGE_SECTORS; i++) {
1417                 if (s->s[i].state < SECTOR_DIRTY)
1418                         continue;
1419
1420                 s->s[i].nr_replicas = w->opts.compression
1421                         ? 0 : nr_replicas_this_write;
1422
1423                 s->s[i].replicas_reserved = 0;
1424                 s->s[i].state = SECTOR_ALLOCATED;
1425         }
1426
1427         BUG_ON(atomic_read(&s->write_count));
1428         atomic_set(&s->write_count, 1);
1429
1430         BUG_ON(PageWriteback(page));
1431         set_page_writeback(page);
1432
1433         unlock_page(page);
1434
1435         offset = 0;
1436         while (1) {
1437                 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
1438                 u64 sector;
1439
1440                 while (offset < PAGE_SECTORS &&
1441                        orig.s[offset].state < SECTOR_DIRTY)
1442                         offset++;
1443
1444                 if (offset == PAGE_SECTORS)
1445                         break;
1446
1447                 while (offset + sectors < PAGE_SECTORS &&
1448                        orig.s[offset + sectors].state >= SECTOR_DIRTY) {
1449                         reserved_sectors += orig.s[offset + sectors].replicas_reserved;
1450                         dirty_sectors += orig.s[offset + sectors].state == SECTOR_DIRTY;
1451                         sectors++;
1452                 }
1453                 BUG_ON(!sectors);
1454
1455                 sector = ((u64) page->index << PAGE_SECTORS_SHIFT) + offset;
1456
1457                 if (w->io &&
1458                     (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1459                      bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
1460                      w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1461                      (BIO_MAX_VECS * PAGE_SIZE) ||
1462                      bio_end_sector(&w->io->op.wbio.bio) != sector))
1463                         bch2_writepage_do_io(w);
1464
1465                 if (!w->io)
1466                         bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1467                                                 nr_replicas_this_write);
1468
1469                 atomic_inc(&s->write_count);
1470
1471                 BUG_ON(inode != w->io->inode);
1472                 BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
1473                                      sectors << 9, offset << 9));
1474
1475                 /* Check for writing past i_size: */
1476                 WARN_ON_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1477                              round_up(i_size, block_bytes(c)));
1478
1479                 w->io->op.res.sectors += reserved_sectors;
1480                 w->io->op.i_sectors_delta -= dirty_sectors;
1481                 w->io->op.new_i_size = i_size;
1482
1483                 offset += sectors;
1484         }
1485
1486         if (atomic_dec_and_test(&s->write_count))
1487                 end_page_writeback(page);
1488
1489         return 0;
1490 }
1491
1492 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1493 {
1494         struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1495         struct bch_writepage_state w =
1496                 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1497         struct blk_plug plug;
1498         int ret;
1499
1500         blk_start_plug(&plug);
1501         ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1502         if (w.io)
1503                 bch2_writepage_do_io(&w);
1504         blk_finish_plug(&plug);
1505         return ret;
1506 }
1507
1508 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1509 {
1510         struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1511         struct bch_writepage_state w =
1512                 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1513         int ret;
1514
1515         ret = __bch2_writepage(page, wbc, &w);
1516         if (w.io)
1517                 bch2_writepage_do_io(&w);
1518
1519         return ret;
1520 }
1521
1522 /* buffered writes: */
1523
1524 int bch2_write_begin(struct file *file, struct address_space *mapping,
1525                      loff_t pos, unsigned len, unsigned flags,
1526                      struct page **pagep, void **fsdata)
1527 {
1528         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1529         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1530         struct bch2_page_reservation *res;
1531         pgoff_t index = pos >> PAGE_SHIFT;
1532         unsigned offset = pos & (PAGE_SIZE - 1);
1533         struct page *page;
1534         int ret = -ENOMEM;
1535
1536         res = kmalloc(sizeof(*res), GFP_KERNEL);
1537         if (!res)
1538                 return -ENOMEM;
1539
1540         bch2_page_reservation_init(c, inode, res);
1541         *fsdata = res;
1542
1543         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1544
1545         page = grab_cache_page_write_begin(mapping, index, flags);
1546         if (!page)
1547                 goto err_unlock;
1548
1549         if (PageUptodate(page))
1550                 goto out;
1551
1552         /* If we're writing entire page, don't need to read it in first: */
1553         if (len == PAGE_SIZE)
1554                 goto out;
1555
1556         if (!offset && pos + len >= inode->v.i_size) {
1557                 zero_user_segment(page, len, PAGE_SIZE);
1558                 flush_dcache_page(page);
1559                 goto out;
1560         }
1561
1562         if (index > inode->v.i_size >> PAGE_SHIFT) {
1563                 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1564                 flush_dcache_page(page);
1565                 goto out;
1566         }
1567 readpage:
1568         ret = bch2_read_single_page(page, mapping);
1569         if (ret)
1570                 goto err;
1571 out:
1572         if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
1573                 ret = bch2_page_state_set(c, inode_inum(inode), &page, 1);
1574                 if (ret)
1575                         goto out;
1576         }
1577
1578         ret = bch2_page_reservation_get(c, inode, page, res,
1579                                         offset, len, true);
1580         if (ret) {
1581                 if (!PageUptodate(page)) {
1582                         /*
1583                          * If the page hasn't been read in, we won't know if we
1584                          * actually need a reservation - we don't actually need
1585                          * to read here, we just need to check if the page is
1586                          * fully backed by uncompressed data:
1587                          */
1588                         goto readpage;
1589                 }
1590
1591                 goto err;
1592         }
1593
1594         *pagep = page;
1595         return 0;
1596 err:
1597         unlock_page(page);
1598         put_page(page);
1599         *pagep = NULL;
1600 err_unlock:
1601         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1602         kfree(res);
1603         *fsdata = NULL;
1604         return ret;
1605 }
1606
1607 int bch2_write_end(struct file *file, struct address_space *mapping,
1608                    loff_t pos, unsigned len, unsigned copied,
1609                    struct page *page, void *fsdata)
1610 {
1611         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1612         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1613         struct bch2_page_reservation *res = fsdata;
1614         unsigned offset = pos & (PAGE_SIZE - 1);
1615
1616         lockdep_assert_held(&inode->v.i_rwsem);
1617
1618         if (unlikely(copied < len && !PageUptodate(page))) {
1619                 /*
1620                  * The page needs to be read in, but that would destroy
1621                  * our partial write - simplest thing is to just force
1622                  * userspace to redo the write:
1623                  */
1624                 zero_user(page, 0, PAGE_SIZE);
1625                 flush_dcache_page(page);
1626                 copied = 0;
1627         }
1628
1629         spin_lock(&inode->v.i_lock);
1630         if (pos + copied > inode->v.i_size)
1631                 i_size_write(&inode->v, pos + copied);
1632         spin_unlock(&inode->v.i_lock);
1633
1634         if (copied) {
1635                 if (!PageUptodate(page))
1636                         SetPageUptodate(page);
1637
1638                 bch2_set_page_dirty(c, inode, page, res, offset, copied);
1639
1640                 inode->ei_last_dirtied = (unsigned long) current;
1641         }
1642
1643         unlock_page(page);
1644         put_page(page);
1645         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1646
1647         bch2_page_reservation_put(c, inode, res);
1648         kfree(res);
1649
1650         return copied;
1651 }
1652
1653 #define WRITE_BATCH_PAGES       32
1654
1655 static int __bch2_buffered_write(struct bch_inode_info *inode,
1656                                  struct address_space *mapping,
1657                                  struct iov_iter *iter,
1658                                  loff_t pos, unsigned len)
1659 {
1660         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1661         struct page *pages[WRITE_BATCH_PAGES];
1662         struct bch2_page_reservation res;
1663         unsigned long index = pos >> PAGE_SHIFT;
1664         unsigned offset = pos & (PAGE_SIZE - 1);
1665         unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1666         unsigned i, reserved = 0, set_dirty = 0;
1667         unsigned copied = 0, nr_pages_copied = 0;
1668         int ret = 0;
1669
1670         BUG_ON(!len);
1671         BUG_ON(nr_pages > ARRAY_SIZE(pages));
1672
1673         bch2_page_reservation_init(c, inode, &res);
1674
1675         for (i = 0; i < nr_pages; i++) {
1676                 pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
1677                 if (!pages[i]) {
1678                         nr_pages = i;
1679                         if (!i) {
1680                                 ret = -ENOMEM;
1681                                 goto out;
1682                         }
1683                         len = min_t(unsigned, len,
1684                                     nr_pages * PAGE_SIZE - offset);
1685                         break;
1686                 }
1687         }
1688
1689         if (offset && !PageUptodate(pages[0])) {
1690                 ret = bch2_read_single_page(pages[0], mapping);
1691                 if (ret)
1692                         goto out;
1693         }
1694
1695         if ((pos + len) & (PAGE_SIZE - 1) &&
1696             !PageUptodate(pages[nr_pages - 1])) {
1697                 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1698                         zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1699                 } else {
1700                         ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1701                         if (ret)
1702                                 goto out;
1703                 }
1704         }
1705
1706         while (reserved < len) {
1707                 unsigned i = (offset + reserved) >> PAGE_SHIFT;
1708                 struct page *page = pages[i];
1709                 unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
1710                 unsigned pg_len = min_t(unsigned, len - reserved,
1711                                         PAGE_SIZE - pg_offset);
1712
1713                 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
1714                         ret = bch2_page_state_set(c, inode_inum(inode),
1715                                                   pages + i, nr_pages - i);
1716                         if (ret)
1717                                 goto out;
1718                 }
1719
1720                 ret = bch2_page_reservation_get(c, inode, page, &res,
1721                                                 pg_offset, pg_len, true);
1722                 if (ret)
1723                         goto out;
1724
1725                 reserved += pg_len;
1726         }
1727
1728         if (mapping_writably_mapped(mapping))
1729                 for (i = 0; i < nr_pages; i++)
1730                         flush_dcache_page(pages[i]);
1731
1732         while (copied < len) {
1733                 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1734                 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1735                 unsigned pg_len = min_t(unsigned, len - copied,
1736                                         PAGE_SIZE - pg_offset);
1737                 unsigned pg_copied = copy_page_from_iter_atomic(page,
1738                                                 pg_offset, pg_len,iter);
1739
1740                 if (!pg_copied)
1741                         break;
1742
1743                 if (!PageUptodate(page) &&
1744                     pg_copied != PAGE_SIZE &&
1745                     pos + copied + pg_copied < inode->v.i_size) {
1746                         zero_user(page, 0, PAGE_SIZE);
1747                         break;
1748                 }
1749
1750                 flush_dcache_page(page);
1751                 copied += pg_copied;
1752
1753                 if (pg_copied != pg_len)
1754                         break;
1755         }
1756
1757         if (!copied)
1758                 goto out;
1759
1760         spin_lock(&inode->v.i_lock);
1761         if (pos + copied > inode->v.i_size)
1762                 i_size_write(&inode->v, pos + copied);
1763         spin_unlock(&inode->v.i_lock);
1764
1765         while (set_dirty < copied) {
1766                 struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
1767                 unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
1768                 unsigned pg_len = min_t(unsigned, copied - set_dirty,
1769                                         PAGE_SIZE - pg_offset);
1770
1771                 if (!PageUptodate(page))
1772                         SetPageUptodate(page);
1773
1774                 bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len);
1775                 unlock_page(page);
1776                 put_page(page);
1777
1778                 set_dirty += pg_len;
1779         }
1780
1781         nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1782         inode->ei_last_dirtied = (unsigned long) current;
1783 out:
1784         for (i = nr_pages_copied; i < nr_pages; i++) {
1785                 unlock_page(pages[i]);
1786                 put_page(pages[i]);
1787         }
1788
1789         bch2_page_reservation_put(c, inode, &res);
1790
1791         return copied ?: ret;
1792 }
1793
1794 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1795 {
1796         struct file *file = iocb->ki_filp;
1797         struct address_space *mapping = file->f_mapping;
1798         struct bch_inode_info *inode = file_bch_inode(file);
1799         loff_t pos = iocb->ki_pos;
1800         ssize_t written = 0;
1801         int ret = 0;
1802
1803         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1804
1805         do {
1806                 unsigned offset = pos & (PAGE_SIZE - 1);
1807                 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1808                               PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1809 again:
1810                 /*
1811                  * Bring in the user page that we will copy from _first_.
1812                  * Otherwise there's a nasty deadlock on copying from the
1813                  * same page as we're writing to, without it being marked
1814                  * up-to-date.
1815                  *
1816                  * Not only is this an optimisation, but it is also required
1817                  * to check that the address is actually valid, when atomic
1818                  * usercopies are used, below.
1819                  */
1820                 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1821                         bytes = min_t(unsigned long, iov_iter_count(iter),
1822                                       PAGE_SIZE - offset);
1823
1824                         if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1825                                 ret = -EFAULT;
1826                                 break;
1827                         }
1828                 }
1829
1830                 if (unlikely(fatal_signal_pending(current))) {
1831                         ret = -EINTR;
1832                         break;
1833                 }
1834
1835                 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1836                 if (unlikely(ret < 0))
1837                         break;
1838
1839                 cond_resched();
1840
1841                 if (unlikely(ret == 0)) {
1842                         /*
1843                          * If we were unable to copy any data at all, we must
1844                          * fall back to a single segment length write.
1845                          *
1846                          * If we didn't fallback here, we could livelock
1847                          * because not all segments in the iov can be copied at
1848                          * once without a pagefault.
1849                          */
1850                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
1851                                       iov_iter_single_seg_count(iter));
1852                         goto again;
1853                 }
1854                 pos += ret;
1855                 written += ret;
1856                 ret = 0;
1857
1858                 balance_dirty_pages_ratelimited(mapping);
1859         } while (iov_iter_count(iter));
1860
1861         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1862
1863         return written ? written : ret;
1864 }
1865
1866 /* O_DIRECT reads */
1867
1868 static void bio_check_or_release(struct bio *bio, bool check_dirty)
1869 {
1870         if (check_dirty) {
1871                 bio_check_pages_dirty(bio);
1872         } else {
1873                 bio_release_pages(bio, false);
1874                 bio_put(bio);
1875         }
1876 }
1877
1878 static void bch2_dio_read_complete(struct closure *cl)
1879 {
1880         struct dio_read *dio = container_of(cl, struct dio_read, cl);
1881
1882         dio->req->ki_complete(dio->req, dio->ret);
1883         bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
1884 }
1885
1886 static void bch2_direct_IO_read_endio(struct bio *bio)
1887 {
1888         struct dio_read *dio = bio->bi_private;
1889
1890         if (bio->bi_status)
1891                 dio->ret = blk_status_to_errno(bio->bi_status);
1892
1893         closure_put(&dio->cl);
1894 }
1895
1896 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1897 {
1898         struct dio_read *dio = bio->bi_private;
1899         bool should_dirty = dio->should_dirty;
1900
1901         bch2_direct_IO_read_endio(bio);
1902         bio_check_or_release(bio, should_dirty);
1903 }
1904
1905 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1906 {
1907         struct file *file = req->ki_filp;
1908         struct bch_inode_info *inode = file_bch_inode(file);
1909         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1910         struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1911         struct dio_read *dio;
1912         struct bio *bio;
1913         loff_t offset = req->ki_pos;
1914         bool sync = is_sync_kiocb(req);
1915         size_t shorten;
1916         ssize_t ret;
1917
1918         if ((offset|iter->count) & (block_bytes(c) - 1))
1919                 return -EINVAL;
1920
1921         ret = min_t(loff_t, iter->count,
1922                     max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1923
1924         if (!ret)
1925                 return ret;
1926
1927         shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1928         iter->count -= shorten;
1929
1930         bio = bio_alloc_bioset(GFP_KERNEL,
1931                                bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
1932                                &c->dio_read_bioset);
1933
1934         bio->bi_end_io = bch2_direct_IO_read_endio;
1935
1936         dio = container_of(bio, struct dio_read, rbio.bio);
1937         closure_init(&dio->cl, NULL);
1938
1939         /*
1940          * this is a _really_ horrible hack just to avoid an atomic sub at the
1941          * end:
1942          */
1943         if (!sync) {
1944                 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1945                 atomic_set(&dio->cl.remaining,
1946                            CLOSURE_REMAINING_INITIALIZER -
1947                            CLOSURE_RUNNING +
1948                            CLOSURE_DESTRUCTOR);
1949         } else {
1950                 atomic_set(&dio->cl.remaining,
1951                            CLOSURE_REMAINING_INITIALIZER + 1);
1952         }
1953
1954         dio->req        = req;
1955         dio->ret        = ret;
1956         /*
1957          * This is one of the sketchier things I've encountered: we have to skip
1958          * the dirtying of requests that are internal from the kernel (i.e. from
1959          * loopback), because we'll deadlock on page_lock.
1960          */
1961         dio->should_dirty = iter_is_iovec(iter);
1962
1963         goto start;
1964         while (iter->count) {
1965                 bio = bio_alloc_bioset(GFP_KERNEL,
1966                                        bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
1967                                        &c->bio_read);
1968                 bio->bi_end_io          = bch2_direct_IO_read_split_endio;
1969 start:
1970                 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1971                 bio->bi_iter.bi_sector  = offset >> 9;
1972                 bio->bi_private         = dio;
1973
1974                 ret = bio_iov_iter_get_pages(bio, iter);
1975                 if (ret < 0) {
1976                         /* XXX: fault inject this path */
1977                         bio->bi_status = BLK_STS_RESOURCE;
1978                         bio_endio(bio);
1979                         break;
1980                 }
1981
1982                 offset += bio->bi_iter.bi_size;
1983
1984                 if (dio->should_dirty)
1985                         bio_set_pages_dirty(bio);
1986
1987                 if (iter->count)
1988                         closure_get(&dio->cl);
1989
1990                 bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
1991         }
1992
1993         iter->count += shorten;
1994
1995         if (sync) {
1996                 closure_sync(&dio->cl);
1997                 closure_debug_destroy(&dio->cl);
1998                 ret = dio->ret;
1999                 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
2000                 return ret;
2001         } else {
2002                 return -EIOCBQUEUED;
2003         }
2004 }
2005
2006 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2007 {
2008         struct file *file = iocb->ki_filp;
2009         struct bch_inode_info *inode = file_bch_inode(file);
2010         struct address_space *mapping = file->f_mapping;
2011         size_t count = iov_iter_count(iter);
2012         ssize_t ret;
2013
2014         if (!count)
2015                 return 0; /* skip atime */
2016
2017         if (iocb->ki_flags & IOCB_DIRECT) {
2018                 struct blk_plug plug;
2019
2020                 ret = filemap_write_and_wait_range(mapping,
2021                                         iocb->ki_pos,
2022                                         iocb->ki_pos + count - 1);
2023                 if (ret < 0)
2024                         return ret;
2025
2026                 file_accessed(file);
2027
2028                 blk_start_plug(&plug);
2029                 ret = bch2_direct_IO_read(iocb, iter);
2030                 blk_finish_plug(&plug);
2031
2032                 if (ret >= 0)
2033                         iocb->ki_pos += ret;
2034         } else {
2035                 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
2036                 ret = generic_file_read_iter(iocb, iter);
2037                 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
2038         }
2039
2040         return ret;
2041 }
2042
2043 /* O_DIRECT writes */
2044
2045 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
2046                                        u64 offset, u64 size,
2047                                        unsigned nr_replicas, bool compressed)
2048 {
2049         struct btree_trans trans;
2050         struct btree_iter iter;
2051         struct bkey_s_c k;
2052         u64 end = offset + size;
2053         u32 snapshot;
2054         bool ret = true;
2055         int err;
2056
2057         bch2_trans_init(&trans, c, 0, 0);
2058 retry:
2059         bch2_trans_begin(&trans);
2060
2061         err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
2062         if (err)
2063                 goto err;
2064
2065         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
2066                            SPOS(inum.inum, offset, snapshot),
2067                            BTREE_ITER_SLOTS, k, err) {
2068                 if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0)
2069                         break;
2070
2071                 if (k.k->p.snapshot != snapshot ||
2072                     nr_replicas > bch2_bkey_replicas(c, k) ||
2073                     (!compressed && bch2_bkey_sectors_compressed(k))) {
2074                         ret = false;
2075                         break;
2076                 }
2077         }
2078
2079         offset = iter.pos.offset;
2080         bch2_trans_iter_exit(&trans, &iter);
2081 err:
2082         if (err == -EINTR)
2083                 goto retry;
2084         bch2_trans_exit(&trans);
2085
2086         return err ? false : ret;
2087 }
2088
2089 static void bch2_dio_write_loop_async(struct bch_write_op *);
2090
2091 static long bch2_dio_write_loop(struct dio_write *dio)
2092 {
2093         bool kthread = (current->flags & PF_KTHREAD) != 0;
2094         struct kiocb *req = dio->req;
2095         struct address_space *mapping = req->ki_filp->f_mapping;
2096         struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
2097         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2098         struct bio *bio = &dio->op.wbio.bio;
2099         struct bvec_iter_all iter;
2100         struct bio_vec *bv;
2101         unsigned unaligned, iter_count;
2102         bool sync = dio->sync, dropped_locks;
2103         long ret;
2104
2105         if (dio->loop)
2106                 goto loop;
2107
2108         down(&c->io_in_flight);
2109
2110         while (1) {
2111                 iter_count = dio->iter.count;
2112
2113                 if (kthread && dio->mm)
2114                         kthread_use_mm(dio->mm);
2115                 BUG_ON(current->faults_disabled_mapping);
2116                 current->faults_disabled_mapping = mapping;
2117
2118                 ret = bio_iov_iter_get_pages(bio, &dio->iter);
2119
2120                 dropped_locks = fdm_dropped_locks();
2121
2122                 current->faults_disabled_mapping = NULL;
2123                 if (kthread && dio->mm)
2124                         kthread_unuse_mm(dio->mm);
2125
2126                 /*
2127                  * If the fault handler returned an error but also signalled
2128                  * that it dropped & retook ei_pagecache_lock, we just need to
2129                  * re-shoot down the page cache and retry:
2130                  */
2131                 if (dropped_locks && ret)
2132                         ret = 0;
2133
2134                 if (unlikely(ret < 0))
2135                         goto err;
2136
2137                 if (unlikely(dropped_locks)) {
2138                         ret = write_invalidate_inode_pages_range(mapping,
2139                                         req->ki_pos,
2140                                         req->ki_pos + iter_count - 1);
2141                         if (unlikely(ret))
2142                                 goto err;
2143
2144                         if (!bio->bi_iter.bi_size)
2145                                 continue;
2146                 }
2147
2148                 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
2149                 bio->bi_iter.bi_size -= unaligned;
2150                 iov_iter_revert(&dio->iter, unaligned);
2151
2152                 if (!bio->bi_iter.bi_size) {
2153                         /*
2154                          * bio_iov_iter_get_pages was only able to get <
2155                          * blocksize worth of pages:
2156                          */
2157                         ret = -EFAULT;
2158                         goto err;
2159                 }
2160
2161                 bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
2162                 dio->op.end_io          = bch2_dio_write_loop_async;
2163                 dio->op.target          = dio->op.opts.foreground_target;
2164                 dio->op.write_point     = writepoint_hashed((unsigned long) current);
2165                 dio->op.nr_replicas     = dio->op.opts.data_replicas;
2166                 dio->op.subvol          = inode->ei_subvol;
2167                 dio->op.pos             = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
2168
2169                 if ((req->ki_flags & IOCB_DSYNC) &&
2170                     !c->opts.journal_flush_disabled)
2171                         dio->op.flags |= BCH_WRITE_FLUSH;
2172                 dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
2173
2174                 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
2175                                                 dio->op.opts.data_replicas, 0);
2176                 if (unlikely(ret) &&
2177                     !bch2_check_range_allocated(c, inode_inum(inode),
2178                                 dio->op.pos.offset, bio_sectors(bio),
2179                                 dio->op.opts.data_replicas,
2180                                 dio->op.opts.compression != 0))
2181                         goto err;
2182
2183                 task_io_account_write(bio->bi_iter.bi_size);
2184
2185                 if (!dio->sync && !dio->loop && dio->iter.count) {
2186                         struct iovec *iov = dio->inline_vecs;
2187
2188                         if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
2189                                 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
2190                                               GFP_KERNEL);
2191                                 if (unlikely(!iov)) {
2192                                         dio->sync = sync = true;
2193                                         goto do_io;
2194                                 }
2195
2196                                 dio->free_iov = true;
2197                         }
2198
2199                         memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
2200                         dio->iter.iov = iov;
2201                 }
2202 do_io:
2203                 dio->loop = true;
2204                 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
2205
2206                 if (sync)
2207                         wait_for_completion(&dio->done);
2208                 else
2209                         return -EIOCBQUEUED;
2210 loop:
2211                 i_sectors_acct(c, inode, &dio->quota_res,
2212                                dio->op.i_sectors_delta);
2213                 req->ki_pos += (u64) dio->op.written << 9;
2214                 dio->written += dio->op.written;
2215
2216                 spin_lock(&inode->v.i_lock);
2217                 if (req->ki_pos > inode->v.i_size)
2218                         i_size_write(&inode->v, req->ki_pos);
2219                 spin_unlock(&inode->v.i_lock);
2220
2221                 if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
2222                         bio_for_each_segment_all(bv, bio, iter)
2223                                 put_page(bv->bv_page);
2224                 bio->bi_vcnt = 0;
2225
2226                 if (dio->op.error) {
2227                         set_bit(EI_INODE_ERROR, &inode->ei_flags);
2228                         break;
2229                 }
2230
2231                 if (!dio->iter.count)
2232                         break;
2233
2234                 bio_reset(bio);
2235                 reinit_completion(&dio->done);
2236         }
2237
2238         ret = dio->op.error ?: ((long) dio->written << 9);
2239 err:
2240         up(&c->io_in_flight);
2241         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2242         bch2_quota_reservation_put(c, inode, &dio->quota_res);
2243
2244         if (dio->free_iov)
2245                 kfree(dio->iter.iov);
2246
2247         if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
2248                 bio_for_each_segment_all(bv, bio, iter)
2249                         put_page(bv->bv_page);
2250         bio_put(bio);
2251
2252         /* inode->i_dio_count is our ref on inode and thus bch_fs */
2253         inode_dio_end(&inode->v);
2254
2255         if (!sync) {
2256                 req->ki_complete(req, ret);
2257                 ret = -EIOCBQUEUED;
2258         }
2259         return ret;
2260 }
2261
2262 static void bch2_dio_write_loop_async(struct bch_write_op *op)
2263 {
2264         struct dio_write *dio = container_of(op, struct dio_write, op);
2265
2266         if (dio->sync)
2267                 complete(&dio->done);
2268         else
2269                 bch2_dio_write_loop(dio);
2270 }
2271
2272 static noinline
2273 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
2274 {
2275         struct file *file = req->ki_filp;
2276         struct address_space *mapping = file->f_mapping;
2277         struct bch_inode_info *inode = file_bch_inode(file);
2278         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2279         struct dio_write *dio;
2280         struct bio *bio;
2281         bool locked = true, extending;
2282         ssize_t ret;
2283
2284         prefetch(&c->opts);
2285         prefetch((void *) &c->opts + 64);
2286         prefetch(&inode->ei_inode);
2287         prefetch((void *) &inode->ei_inode + 64);
2288
2289         inode_lock(&inode->v);
2290
2291         ret = generic_write_checks(req, iter);
2292         if (unlikely(ret <= 0))
2293                 goto err;
2294
2295         ret = file_remove_privs(file);
2296         if (unlikely(ret))
2297                 goto err;
2298
2299         ret = file_update_time(file);
2300         if (unlikely(ret))
2301                 goto err;
2302
2303         if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
2304                 goto err;
2305
2306         inode_dio_begin(&inode->v);
2307         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2308
2309         extending = req->ki_pos + iter->count > inode->v.i_size;
2310         if (!extending) {
2311                 inode_unlock(&inode->v);
2312                 locked = false;
2313         }
2314
2315         bio = bio_alloc_bioset(GFP_KERNEL,
2316                                bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2317                                &c->dio_write_bioset);
2318         dio = container_of(bio, struct dio_write, op.wbio.bio);
2319         init_completion(&dio->done);
2320         dio->req                = req;
2321         dio->mm                 = current->mm;
2322         dio->loop               = false;
2323         dio->sync               = is_sync_kiocb(req) || extending;
2324         dio->free_iov           = false;
2325         dio->quota_res.sectors  = 0;
2326         dio->written            = 0;
2327         dio->iter               = *iter;
2328
2329         ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2330                                          iter->count >> 9, true);
2331         if (unlikely(ret))
2332                 goto err_put_bio;
2333
2334         ret = write_invalidate_inode_pages_range(mapping,
2335                                         req->ki_pos,
2336                                         req->ki_pos + iter->count - 1);
2337         if (unlikely(ret))
2338                 goto err_put_bio;
2339
2340         ret = bch2_dio_write_loop(dio);
2341 err:
2342         if (locked)
2343                 inode_unlock(&inode->v);
2344         return ret;
2345 err_put_bio:
2346         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2347         bch2_quota_reservation_put(c, inode, &dio->quota_res);
2348         bio_put(bio);
2349         inode_dio_end(&inode->v);
2350         goto err;
2351 }
2352
2353 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2354 {
2355         struct file *file = iocb->ki_filp;
2356         struct bch_inode_info *inode = file_bch_inode(file);
2357         ssize_t ret;
2358
2359         if (iocb->ki_flags & IOCB_DIRECT)
2360                 return bch2_direct_write(iocb, from);
2361
2362         /* We can write back this queue in page reclaim */
2363         current->backing_dev_info = inode_to_bdi(&inode->v);
2364         inode_lock(&inode->v);
2365
2366         ret = generic_write_checks(iocb, from);
2367         if (ret <= 0)
2368                 goto unlock;
2369
2370         ret = file_remove_privs(file);
2371         if (ret)
2372                 goto unlock;
2373
2374         ret = file_update_time(file);
2375         if (ret)
2376                 goto unlock;
2377
2378         ret = bch2_buffered_write(iocb, from);
2379         if (likely(ret > 0))
2380                 iocb->ki_pos += ret;
2381 unlock:
2382         inode_unlock(&inode->v);
2383         current->backing_dev_info = NULL;
2384
2385         if (ret > 0)
2386                 ret = generic_write_sync(iocb, ret);
2387
2388         return ret;
2389 }
2390
2391 /* fsync: */
2392
2393 /*
2394  * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
2395  * insert trigger: look up the btree inode instead
2396  */
2397 static int bch2_flush_inode(struct bch_fs *c, subvol_inum inum)
2398 {
2399         struct bch_inode_unpacked inode;
2400         int ret;
2401
2402         if (c->opts.journal_flush_disabled)
2403                 return 0;
2404
2405         ret = bch2_inode_find_by_inum(c, inum, &inode);
2406         if (ret)
2407                 return ret;
2408
2409         return bch2_journal_flush_seq(&c->journal, inode.bi_journal_seq);
2410 }
2411
2412 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2413 {
2414         struct bch_inode_info *inode = file_bch_inode(file);
2415         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2416         int ret, ret2, ret3;
2417
2418         ret = file_write_and_wait_range(file, start, end);
2419         ret2 = sync_inode_metadata(&inode->v, 1);
2420         ret3 = bch2_flush_inode(c, inode_inum(inode));
2421
2422         return ret ?: ret2 ?: ret3;
2423 }
2424
2425 /* truncate: */
2426
2427 static inline int range_has_data(struct bch_fs *c, u32 subvol,
2428                                  struct bpos start,
2429                                  struct bpos end)
2430 {
2431         struct btree_trans trans;
2432         struct btree_iter iter;
2433         struct bkey_s_c k;
2434         int ret = 0;
2435
2436         bch2_trans_init(&trans, c, 0, 0);
2437 retry:
2438         bch2_trans_begin(&trans);
2439
2440         ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
2441         if (ret)
2442                 goto err;
2443
2444         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
2445                 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2446                         break;
2447
2448                 if (bkey_extent_is_data(k.k)) {
2449                         ret = 1;
2450                         break;
2451                 }
2452         }
2453         start = iter.pos;
2454         bch2_trans_iter_exit(&trans, &iter);
2455 err:
2456         if (ret == -EINTR)
2457                 goto retry;
2458
2459         bch2_trans_exit(&trans);
2460         return ret;
2461 }
2462
2463 static int __bch2_truncate_page(struct bch_inode_info *inode,
2464                                 pgoff_t index, loff_t start, loff_t end)
2465 {
2466         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2467         struct address_space *mapping = inode->v.i_mapping;
2468         struct bch_page_state *s;
2469         unsigned start_offset = start & (PAGE_SIZE - 1);
2470         unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2471         unsigned i;
2472         struct page *page;
2473         s64 i_sectors_delta = 0;
2474         int ret = 0;
2475
2476         /* Page boundary? Nothing to do */
2477         if (!((index == start >> PAGE_SHIFT && start_offset) ||
2478               (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2479                 return 0;
2480
2481         /* Above i_size? */
2482         if (index << PAGE_SHIFT >= inode->v.i_size)
2483                 return 0;
2484
2485         page = find_lock_page(mapping, index);
2486         if (!page) {
2487                 /*
2488                  * XXX: we're doing two index lookups when we end up reading the
2489                  * page
2490                  */
2491                 ret = range_has_data(c, inode->ei_subvol,
2492                                 POS(inode->v.i_ino, index << PAGE_SECTORS_SHIFT),
2493                                 POS(inode->v.i_ino, (index + 1) << PAGE_SECTORS_SHIFT));
2494                 if (ret <= 0)
2495                         return ret;
2496
2497                 page = find_or_create_page(mapping, index, GFP_KERNEL);
2498                 if (unlikely(!page)) {
2499                         ret = -ENOMEM;
2500                         goto out;
2501                 }
2502         }
2503
2504         s = bch2_page_state_create(page, 0);
2505         if (!s) {
2506                 ret = -ENOMEM;
2507                 goto unlock;
2508         }
2509
2510         if (!PageUptodate(page)) {
2511                 ret = bch2_read_single_page(page, mapping);
2512                 if (ret)
2513                         goto unlock;
2514         }
2515
2516         if (index != start >> PAGE_SHIFT)
2517                 start_offset = 0;
2518         if (index != end >> PAGE_SHIFT)
2519                 end_offset = PAGE_SIZE;
2520
2521         for (i = round_up(start_offset, block_bytes(c)) >> 9;
2522              i < round_down(end_offset, block_bytes(c)) >> 9;
2523              i++) {
2524                 s->s[i].nr_replicas     = 0;
2525                 if (s->s[i].state == SECTOR_DIRTY)
2526                         i_sectors_delta--;
2527                 s->s[i].state           = SECTOR_UNALLOCATED;
2528         }
2529
2530         i_sectors_acct(c, inode, NULL, i_sectors_delta);
2531
2532         /*
2533          * Caller needs to know whether this page will be written out by
2534          * writeback - doing an i_size update if necessary - or whether it will
2535          * be responsible for the i_size update:
2536          */
2537         ret = s->s[(min_t(u64, inode->v.i_size - (index << PAGE_SHIFT),
2538                           PAGE_SIZE) - 1) >> 9].state >= SECTOR_DIRTY;
2539
2540         zero_user_segment(page, start_offset, end_offset);
2541
2542         /*
2543          * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2544          *
2545          * XXX: because we aren't currently tracking whether the page has actual
2546          * data in it (vs. just 0s, or only partially written) this wrong. ick.
2547          */
2548         BUG_ON(bch2_get_page_disk_reservation(c, inode, page, false));
2549
2550         /*
2551          * This removes any writeable userspace mappings; we need to force
2552          * .page_mkwrite to be called again before any mmapped writes, to
2553          * redirty the full page:
2554          */
2555         page_mkclean(page);
2556         __set_page_dirty_nobuffers(page);
2557 unlock:
2558         unlock_page(page);
2559         put_page(page);
2560 out:
2561         return ret;
2562 }
2563
2564 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2565 {
2566         return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2567                                     from, round_up(from, PAGE_SIZE));
2568 }
2569
2570 static int bch2_truncate_pages(struct bch_inode_info *inode,
2571                                loff_t start, loff_t end)
2572 {
2573         int ret = __bch2_truncate_page(inode, start >> PAGE_SHIFT,
2574                                        start, end);
2575
2576         if (ret >= 0 &&
2577             start >> PAGE_SHIFT != end >> PAGE_SHIFT)
2578                 ret = __bch2_truncate_page(inode,
2579                                            end >> PAGE_SHIFT,
2580                                            start, end);
2581         return ret;
2582 }
2583
2584 static int bch2_extend(struct user_namespace *mnt_userns,
2585                        struct bch_inode_info *inode,
2586                        struct bch_inode_unpacked *inode_u,
2587                        struct iattr *iattr)
2588 {
2589         struct address_space *mapping = inode->v.i_mapping;
2590         int ret;
2591
2592         /*
2593          * sync appends:
2594          *
2595          * this has to be done _before_ extending i_size:
2596          */
2597         ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2598         if (ret)
2599                 return ret;
2600
2601         truncate_setsize(&inode->v, iattr->ia_size);
2602
2603         return bch2_setattr_nonsize(mnt_userns, inode, iattr);
2604 }
2605
2606 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2607                                    struct bch_inode_unpacked *bi,
2608                                    void *p)
2609 {
2610         bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2611         return 0;
2612 }
2613
2614 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2615                                   struct bch_inode_unpacked *bi, void *p)
2616 {
2617         u64 *new_i_size = p;
2618
2619         bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2620         bi->bi_size = *new_i_size;
2621         return 0;
2622 }
2623
2624 int bch2_truncate(struct user_namespace *mnt_userns,
2625                   struct bch_inode_info *inode, struct iattr *iattr)
2626 {
2627         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2628         struct address_space *mapping = inode->v.i_mapping;
2629         struct bch_inode_unpacked inode_u;
2630         u64 new_i_size = iattr->ia_size;
2631         s64 i_sectors_delta = 0;
2632         int ret = 0;
2633
2634         /*
2635          * If the truncate call with change the size of the file, the
2636          * cmtimes should be updated. If the size will not change, we
2637          * do not need to update the cmtimes.
2638          */
2639         if (iattr->ia_size != inode->v.i_size) {
2640                 if (!(iattr->ia_valid & ATTR_MTIME))
2641                         ktime_get_coarse_real_ts64(&iattr->ia_mtime);
2642                 if (!(iattr->ia_valid & ATTR_CTIME))
2643                         ktime_get_coarse_real_ts64(&iattr->ia_ctime);
2644                 iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
2645         }
2646
2647         inode_dio_wait(&inode->v);
2648         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2649
2650         ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
2651         if (ret)
2652                 goto err;
2653
2654         /*
2655          * check this before next assertion; on filesystem error our normal
2656          * invariants are a bit broken (truncate has to truncate the page cache
2657          * before the inode).
2658          */
2659         ret = bch2_journal_error(&c->journal);
2660         if (ret)
2661                 goto err;
2662
2663         WARN_ON(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
2664                 inode->v.i_size < inode_u.bi_size);
2665
2666         if (iattr->ia_size > inode->v.i_size) {
2667                 ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
2668                 goto err;
2669         }
2670
2671         iattr->ia_valid &= ~ATTR_SIZE;
2672
2673         ret = bch2_truncate_page(inode, iattr->ia_size);
2674         if (unlikely(ret < 0))
2675                 goto err;
2676
2677         /*
2678          * When extending, we're going to write the new i_size to disk
2679          * immediately so we need to flush anything above the current on disk
2680          * i_size first:
2681          *
2682          * Also, when extending we need to flush the page that i_size currently
2683          * straddles - if it's mapped to userspace, we need to ensure that
2684          * userspace has to redirty it and call .mkwrite -> set_page_dirty
2685          * again to allocate the part of the page that was extended.
2686          */
2687         if (iattr->ia_size > inode_u.bi_size)
2688                 ret = filemap_write_and_wait_range(mapping,
2689                                 inode_u.bi_size,
2690                                 iattr->ia_size - 1);
2691         else if (iattr->ia_size & (PAGE_SIZE - 1))
2692                 ret = filemap_write_and_wait_range(mapping,
2693                                 round_down(iattr->ia_size, PAGE_SIZE),
2694                                 iattr->ia_size - 1);
2695         if (ret)
2696                 goto err;
2697
2698         mutex_lock(&inode->ei_update_lock);
2699         ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2700                                &new_i_size, 0);
2701         mutex_unlock(&inode->ei_update_lock);
2702
2703         if (unlikely(ret))
2704                 goto err;
2705
2706         truncate_setsize(&inode->v, iattr->ia_size);
2707
2708         ret = bch2_fpunch(c, inode_inum(inode),
2709                         round_up(iattr->ia_size, block_bytes(c)) >> 9,
2710                         U64_MAX, &i_sectors_delta);
2711         i_sectors_acct(c, inode, NULL, i_sectors_delta);
2712
2713         WARN_ON(!inode->v.i_size && inode->v.i_blocks &&
2714                 !bch2_journal_error(&c->journal));
2715
2716         if (unlikely(ret))
2717                 goto err;
2718
2719         mutex_lock(&inode->ei_update_lock);
2720         ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
2721         mutex_unlock(&inode->ei_update_lock);
2722
2723         ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
2724 err:
2725         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2726         return ret;
2727 }
2728
2729 /* fallocate: */
2730
2731 static int inode_update_times_fn(struct bch_inode_info *inode,
2732                                  struct bch_inode_unpacked *bi, void *p)
2733 {
2734         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2735
2736         bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2737         return 0;
2738 }
2739
2740 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2741 {
2742         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2743         u64 end         = offset + len;
2744         u64 block_start = round_up(offset, block_bytes(c));
2745         u64 block_end   = round_down(end, block_bytes(c));
2746         bool truncated_last_page;
2747         int ret = 0;
2748
2749         ret = bch2_truncate_pages(inode, offset, end);
2750         if (unlikely(ret < 0))
2751                 goto err;
2752
2753         truncated_last_page = ret;
2754
2755         truncate_pagecache_range(&inode->v, offset, end - 1);
2756
2757         if (block_start < block_end ) {
2758                 s64 i_sectors_delta = 0;
2759
2760                 ret = bch2_fpunch(c, inode_inum(inode),
2761                                   block_start >> 9, block_end >> 9,
2762                                   &i_sectors_delta);
2763                 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2764         }
2765
2766         mutex_lock(&inode->ei_update_lock);
2767         if (end >= inode->v.i_size && !truncated_last_page) {
2768                 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2769                                             ATTR_MTIME|ATTR_CTIME);
2770         } else {
2771                 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
2772                                        ATTR_MTIME|ATTR_CTIME);
2773         }
2774         mutex_unlock(&inode->ei_update_lock);
2775 err:
2776         return ret;
2777 }
2778
2779 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
2780                                    loff_t offset, loff_t len,
2781                                    bool insert)
2782 {
2783         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2784         struct address_space *mapping = inode->v.i_mapping;
2785         struct bkey_buf copy;
2786         struct btree_trans trans;
2787         struct btree_iter src, dst, del;
2788         loff_t shift, new_size;
2789         u64 src_start;
2790         int ret = 0;
2791
2792         if ((offset | len) & (block_bytes(c) - 1))
2793                 return -EINVAL;
2794
2795         if (insert) {
2796                 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
2797                         return -EFBIG;
2798
2799                 if (offset >= inode->v.i_size)
2800                         return -EINVAL;
2801
2802                 src_start       = U64_MAX;
2803                 shift           = len;
2804         } else {
2805                 if (offset + len >= inode->v.i_size)
2806                         return -EINVAL;
2807
2808                 src_start       = offset + len;
2809                 shift           = -len;
2810         }
2811
2812         new_size = inode->v.i_size + shift;
2813
2814         ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2815         if (ret)
2816                 return ret;
2817
2818         if (insert) {
2819                 i_size_write(&inode->v, new_size);
2820                 mutex_lock(&inode->ei_update_lock);
2821                 ret = bch2_write_inode_size(c, inode, new_size,
2822                                             ATTR_MTIME|ATTR_CTIME);
2823                 mutex_unlock(&inode->ei_update_lock);
2824         } else {
2825                 s64 i_sectors_delta = 0;
2826
2827                 ret = bch2_fpunch(c, inode_inum(inode),
2828                                   offset >> 9, (offset + len) >> 9,
2829                                   &i_sectors_delta);
2830                 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2831
2832                 if (ret)
2833                         return ret;
2834         }
2835
2836         bch2_bkey_buf_init(&copy);
2837         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
2838         bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
2839                         POS(inode->v.i_ino, src_start >> 9),
2840                         BTREE_ITER_INTENT);
2841         bch2_trans_copy_iter(&dst, &src);
2842         bch2_trans_copy_iter(&del, &src);
2843
2844         while (ret == 0 || ret == -EINTR) {
2845                 struct disk_reservation disk_res =
2846                         bch2_disk_reservation_init(c, 0);
2847                 struct bkey_i delete;
2848                 struct bkey_s_c k;
2849                 struct bpos next_pos;
2850                 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
2851                 struct bpos atomic_end;
2852                 unsigned trigger_flags = 0;
2853                 u32 snapshot;
2854
2855                 bch2_trans_begin(&trans);
2856
2857                 ret = bch2_subvolume_get_snapshot(&trans,
2858                                         inode->ei_subvol, &snapshot);
2859                 if (ret)
2860                         continue;
2861
2862                 bch2_btree_iter_set_snapshot(&src, snapshot);
2863                 bch2_btree_iter_set_snapshot(&dst, snapshot);
2864                 bch2_btree_iter_set_snapshot(&del, snapshot);
2865
2866                 bch2_trans_begin(&trans);
2867
2868                 k = insert
2869                         ? bch2_btree_iter_peek_prev(&src)
2870                         : bch2_btree_iter_peek(&src);
2871                 if ((ret = bkey_err(k)))
2872                         continue;
2873
2874                 if (!k.k || k.k->p.inode != inode->v.i_ino)
2875                         break;
2876
2877                 if (insert &&
2878                     bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
2879                         break;
2880 reassemble:
2881                 bch2_bkey_buf_reassemble(&copy, c, k);
2882
2883                 if (insert &&
2884                     bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
2885                         bch2_cut_front(move_pos, copy.k);
2886
2887                 copy.k->k.p.offset += shift >> 9;
2888                 bch2_btree_iter_set_pos(&dst, bkey_start_pos(&copy.k->k));
2889
2890                 ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
2891                 if (ret)
2892                         continue;
2893
2894                 if (bkey_cmp(atomic_end, copy.k->k.p)) {
2895                         if (insert) {
2896                                 move_pos = atomic_end;
2897                                 move_pos.offset -= shift >> 9;
2898                                 goto reassemble;
2899                         } else {
2900                                 bch2_cut_back(atomic_end, copy.k);
2901                         }
2902                 }
2903
2904                 bkey_init(&delete.k);
2905                 delete.k.p = copy.k->k.p;
2906                 delete.k.size = copy.k->k.size;
2907                 delete.k.p.offset -= shift >> 9;
2908                 bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
2909
2910                 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
2911
2912                 if (copy.k->k.size != k.k->size) {
2913                         /* We might end up splitting compressed extents: */
2914                         unsigned nr_ptrs =
2915                                 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
2916
2917                         ret = bch2_disk_reservation_get(c, &disk_res,
2918                                         copy.k->k.size, nr_ptrs,
2919                                         BCH_DISK_RESERVATION_NOFAIL);
2920                         BUG_ON(ret);
2921                 }
2922
2923                 ret =   bch2_btree_iter_traverse(&del) ?:
2924                         bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
2925                         bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
2926                         bch2_trans_commit(&trans, &disk_res, NULL,
2927                                           BTREE_INSERT_NOFAIL);
2928                 bch2_disk_reservation_put(c, &disk_res);
2929
2930                 if (!ret)
2931                         bch2_btree_iter_set_pos(&src, next_pos);
2932         }
2933         bch2_trans_iter_exit(&trans, &del);
2934         bch2_trans_iter_exit(&trans, &dst);
2935         bch2_trans_iter_exit(&trans, &src);
2936         bch2_trans_exit(&trans);
2937         bch2_bkey_buf_exit(&copy, c);
2938
2939         if (ret)
2940                 return ret;
2941
2942         mutex_lock(&inode->ei_update_lock);
2943         if (!insert) {
2944                 i_size_write(&inode->v, new_size);
2945                 ret = bch2_write_inode_size(c, inode, new_size,
2946                                             ATTR_MTIME|ATTR_CTIME);
2947         } else {
2948                 /* We need an inode update to update bi_journal_seq for fsync: */
2949                 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
2950                                        ATTR_MTIME|ATTR_CTIME);
2951         }
2952         mutex_unlock(&inode->ei_update_lock);
2953         return ret;
2954 }
2955
2956 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
2957                              u64 start_sector, u64 end_sector)
2958 {
2959         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2960         struct btree_trans trans;
2961         struct btree_iter iter;
2962         struct bpos end_pos = POS(inode->v.i_ino, end_sector);
2963         unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
2964         int ret = 0;
2965
2966         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
2967
2968         bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
2969                         POS(inode->v.i_ino, start_sector),
2970                         BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2971
2972         while (!ret && bkey_cmp(iter.pos, end_pos) < 0) {
2973                 s64 i_sectors_delta = 0;
2974                 struct disk_reservation disk_res = { 0 };
2975                 struct quota_res quota_res = { 0 };
2976                 struct bkey_i_reservation reservation;
2977                 struct bkey_s_c k;
2978                 unsigned sectors;
2979                 u32 snapshot;
2980
2981                 bch2_trans_begin(&trans);
2982
2983                 ret = bch2_subvolume_get_snapshot(&trans,
2984                                         inode->ei_subvol, &snapshot);
2985                 if (ret)
2986                         goto bkey_err;
2987
2988                 bch2_btree_iter_set_snapshot(&iter, snapshot);
2989
2990                 k = bch2_btree_iter_peek_slot(&iter);
2991                 if ((ret = bkey_err(k)))
2992                         goto bkey_err;
2993
2994                 /* already reserved */
2995                 if (k.k->type == KEY_TYPE_reservation &&
2996                     bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2997                         bch2_btree_iter_advance(&iter);
2998                         continue;
2999                 }
3000
3001                 if (bkey_extent_is_data(k.k) &&
3002                     !(mode & FALLOC_FL_ZERO_RANGE)) {
3003                         bch2_btree_iter_advance(&iter);
3004                         continue;
3005                 }
3006
3007                 bkey_reservation_init(&reservation.k_i);
3008                 reservation.k.type      = KEY_TYPE_reservation;
3009                 reservation.k.p         = k.k->p;
3010                 reservation.k.size      = k.k->size;
3011
3012                 bch2_cut_front(iter.pos,        &reservation.k_i);
3013                 bch2_cut_back(end_pos,          &reservation.k_i);
3014
3015                 sectors = reservation.k.size;
3016                 reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
3017
3018                 if (!bkey_extent_is_allocation(k.k)) {
3019                         ret = bch2_quota_reservation_add(c, inode,
3020                                         &quota_res,
3021                                         sectors, true);
3022                         if (unlikely(ret))
3023                                 goto bkey_err;
3024                 }
3025
3026                 if (reservation.v.nr_replicas < replicas ||
3027                     bch2_bkey_sectors_compressed(k)) {
3028                         ret = bch2_disk_reservation_get(c, &disk_res, sectors,
3029                                                         replicas, 0);
3030                         if (unlikely(ret))
3031                                 goto bkey_err;
3032
3033                         reservation.v.nr_replicas = disk_res.nr_replicas;
3034                 }
3035
3036                 ret = bch2_extent_update(&trans, inode_inum(inode), &iter,
3037                                          &reservation.k_i,
3038                                 &disk_res, NULL,
3039                                 0, &i_sectors_delta, true);
3040                 if (ret)
3041                         goto bkey_err;
3042                 i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
3043 bkey_err:
3044                 bch2_quota_reservation_put(c, inode, &quota_res);
3045                 bch2_disk_reservation_put(c, &disk_res);
3046                 if (ret == -EINTR)
3047                         ret = 0;
3048         }
3049
3050         bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
3051         mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
3052
3053         if (ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)) {
3054                 struct quota_res quota_res = { 0 };
3055                 s64 i_sectors_delta = 0;
3056
3057                 bch2_fpunch_at(&trans, &iter, inode_inum(inode),
3058                                end_sector, &i_sectors_delta);
3059                 i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
3060                 bch2_quota_reservation_put(c, inode, &quota_res);
3061         }
3062
3063         bch2_trans_iter_exit(&trans, &iter);
3064         bch2_trans_exit(&trans);
3065         return ret;
3066 }
3067
3068 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
3069                             loff_t offset, loff_t len)
3070 {
3071         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3072         u64 end         = offset + len;
3073         u64 block_start = round_down(offset,    block_bytes(c));
3074         u64 block_end   = round_up(end,         block_bytes(c));
3075         bool truncated_last_page = false;
3076         int ret, ret2 = 0;
3077
3078         if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
3079                 ret = inode_newsize_ok(&inode->v, end);
3080                 if (ret)
3081                         return ret;
3082         }
3083
3084         if (mode & FALLOC_FL_ZERO_RANGE) {
3085                 ret = bch2_truncate_pages(inode, offset, end);
3086                 if (unlikely(ret < 0))
3087                         return ret;
3088
3089                 truncated_last_page = ret;
3090
3091                 truncate_pagecache_range(&inode->v, offset, end - 1);
3092
3093                 block_start     = round_up(offset,      block_bytes(c));
3094                 block_end       = round_down(end,       block_bytes(c));
3095         }
3096
3097         ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
3098
3099         /*
3100          * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
3101          * so that the VFS cache i_size is consistent with the btree i_size:
3102          */
3103         if (ret &&
3104             !(ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)))
3105                 return ret;
3106
3107         if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
3108                 end = inode->v.i_size;
3109
3110         if (end >= inode->v.i_size &&
3111             (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
3112              !(mode & FALLOC_FL_KEEP_SIZE))) {
3113                 spin_lock(&inode->v.i_lock);
3114                 i_size_write(&inode->v, end);
3115                 spin_unlock(&inode->v.i_lock);
3116
3117                 mutex_lock(&inode->ei_update_lock);
3118                 ret2 = bch2_write_inode_size(c, inode, end, 0);
3119                 mutex_unlock(&inode->ei_update_lock);
3120         }
3121
3122         return ret ?: ret2;
3123 }
3124
3125 long bch2_fallocate_dispatch(struct file *file, int mode,
3126                              loff_t offset, loff_t len)
3127 {
3128         struct bch_inode_info *inode = file_bch_inode(file);
3129         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3130         long ret;
3131
3132         if (!percpu_ref_tryget(&c->writes))
3133                 return -EROFS;
3134
3135         inode_lock(&inode->v);
3136         inode_dio_wait(&inode->v);
3137         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
3138
3139         if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
3140                 ret = bchfs_fallocate(inode, mode, offset, len);
3141         else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
3142                 ret = bchfs_fpunch(inode, offset, len);
3143         else if (mode == FALLOC_FL_INSERT_RANGE)
3144                 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
3145         else if (mode == FALLOC_FL_COLLAPSE_RANGE)
3146                 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
3147         else
3148                 ret = -EOPNOTSUPP;
3149
3150
3151         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
3152         inode_unlock(&inode->v);
3153         percpu_ref_put(&c->writes);
3154
3155         return ret;
3156 }
3157
3158 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
3159                              struct file *file_dst, loff_t pos_dst,
3160                              loff_t len, unsigned remap_flags)
3161 {
3162         struct bch_inode_info *src = file_bch_inode(file_src);
3163         struct bch_inode_info *dst = file_bch_inode(file_dst);
3164         struct bch_fs *c = src->v.i_sb->s_fs_info;
3165         s64 i_sectors_delta = 0;
3166         u64 aligned_len;
3167         loff_t ret = 0;
3168
3169         if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
3170                 return -EINVAL;
3171
3172         if (remap_flags & REMAP_FILE_DEDUP)
3173                 return -EOPNOTSUPP;
3174
3175         if ((pos_src & (block_bytes(c) - 1)) ||
3176             (pos_dst & (block_bytes(c) - 1)))
3177                 return -EINVAL;
3178
3179         if (src == dst &&
3180             abs(pos_src - pos_dst) < len)
3181                 return -EINVAL;
3182
3183         bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3184
3185         file_update_time(file_dst);
3186
3187         inode_dio_wait(&src->v);
3188         inode_dio_wait(&dst->v);
3189
3190         ret = generic_remap_file_range_prep(file_src, pos_src,
3191                                             file_dst, pos_dst,
3192                                             &len, remap_flags);
3193         if (ret < 0 || len == 0)
3194                 goto err;
3195
3196         aligned_len = round_up((u64) len, block_bytes(c));
3197
3198         ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
3199                                 pos_dst, pos_dst + len - 1);
3200         if (ret)
3201                 goto err;
3202
3203         mark_pagecache_unallocated(src, pos_src >> 9,
3204                                    (pos_src + aligned_len) >> 9);
3205
3206         ret = bch2_remap_range(c,
3207                                inode_inum(dst), pos_dst >> 9,
3208                                inode_inum(src), pos_src >> 9,
3209                                aligned_len >> 9,
3210                                pos_dst + len, &i_sectors_delta);
3211         if (ret < 0)
3212                 goto err;
3213
3214         /*
3215          * due to alignment, we might have remapped slightly more than requsted
3216          */
3217         ret = min((u64) ret << 9, (u64) len);
3218
3219         /* XXX get a quota reservation */
3220         i_sectors_acct(c, dst, NULL, i_sectors_delta);
3221
3222         spin_lock(&dst->v.i_lock);
3223         if (pos_dst + ret > dst->v.i_size)
3224                 i_size_write(&dst->v, pos_dst + ret);
3225         spin_unlock(&dst->v.i_lock);
3226
3227         if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
3228             IS_SYNC(file_inode(file_dst)))
3229                 ret = bch2_flush_inode(c, inode_inum(dst));
3230 err:
3231         bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3232
3233         return ret;
3234 }
3235
3236 /* fseek: */
3237
3238 static int page_data_offset(struct page *page, unsigned offset)
3239 {
3240         struct bch_page_state *s = bch2_page_state(page);
3241         unsigned i;
3242
3243         if (s)
3244                 for (i = offset >> 9; i < PAGE_SECTORS; i++)
3245                         if (s->s[i].state >= SECTOR_DIRTY)
3246                                 return i << 9;
3247
3248         return -1;
3249 }
3250
3251 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
3252                                        loff_t start_offset,
3253                                        loff_t end_offset)
3254 {
3255         struct address_space *mapping = vinode->i_mapping;
3256         struct page *page;
3257         pgoff_t start_index     = start_offset >> PAGE_SHIFT;
3258         pgoff_t end_index       = end_offset >> PAGE_SHIFT;
3259         pgoff_t index           = start_index;
3260         loff_t ret;
3261         int offset;
3262
3263         while (index <= end_index) {
3264                 if (find_get_pages_range(mapping, &index, end_index, 1, &page)) {
3265                         lock_page(page);
3266
3267                         offset = page_data_offset(page,
3268                                         page->index == start_index
3269                                         ? start_offset & (PAGE_SIZE - 1)
3270                                         : 0);
3271                         if (offset >= 0) {
3272                                 ret = clamp(((loff_t) page->index << PAGE_SHIFT) +
3273                                             offset,
3274                                             start_offset, end_offset);
3275                                 unlock_page(page);
3276                                 put_page(page);
3277                                 return ret;
3278                         }
3279
3280                         unlock_page(page);
3281                         put_page(page);
3282                 } else {
3283                         break;
3284                 }
3285         }
3286
3287         return end_offset;
3288 }
3289
3290 static loff_t bch2_seek_data(struct file *file, u64 offset)
3291 {
3292         struct bch_inode_info *inode = file_bch_inode(file);
3293         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3294         struct btree_trans trans;
3295         struct btree_iter iter;
3296         struct bkey_s_c k;
3297         subvol_inum inum = inode_inum(inode);
3298         u64 isize, next_data = MAX_LFS_FILESIZE;
3299         u32 snapshot;
3300         int ret;
3301
3302         isize = i_size_read(&inode->v);
3303         if (offset >= isize)
3304                 return -ENXIO;
3305
3306         bch2_trans_init(&trans, c, 0, 0);
3307 retry:
3308         bch2_trans_begin(&trans);
3309
3310         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3311         if (ret)
3312                 goto err;
3313
3314         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3315                            SPOS(inode->v.i_ino, offset >> 9, snapshot), 0, k, ret) {
3316                 if (k.k->p.inode != inode->v.i_ino) {
3317                         break;
3318                 } else if (bkey_extent_is_data(k.k)) {
3319                         next_data = max(offset, bkey_start_offset(k.k) << 9);
3320                         break;
3321                 } else if (k.k->p.offset >> 9 > isize)
3322                         break;
3323         }
3324         bch2_trans_iter_exit(&trans, &iter);
3325 err:
3326         if (ret == -EINTR)
3327                 goto retry;
3328
3329         bch2_trans_exit(&trans);
3330         if (ret)
3331                 return ret;
3332
3333         if (next_data > offset)
3334                 next_data = bch2_seek_pagecache_data(&inode->v,
3335                                                      offset, next_data);
3336
3337         if (next_data >= isize)
3338                 return -ENXIO;
3339
3340         return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
3341 }
3342
3343 static int __page_hole_offset(struct page *page, unsigned offset)
3344 {
3345         struct bch_page_state *s = bch2_page_state(page);
3346         unsigned i;
3347
3348         if (!s)
3349                 return 0;
3350
3351         for (i = offset >> 9; i < PAGE_SECTORS; i++)
3352                 if (s->s[i].state < SECTOR_DIRTY)
3353                         return i << 9;
3354
3355         return -1;
3356 }
3357
3358 static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
3359 {
3360         pgoff_t index = offset >> PAGE_SHIFT;
3361         struct page *page;
3362         int pg_offset;
3363         loff_t ret = -1;
3364
3365         page = find_lock_page(mapping, index);
3366         if (!page)
3367                 return offset;
3368
3369         pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
3370         if (pg_offset >= 0)
3371                 ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
3372
3373         unlock_page(page);
3374
3375         return ret;
3376 }
3377
3378 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3379                                        loff_t start_offset,
3380                                        loff_t end_offset)
3381 {
3382         struct address_space *mapping = vinode->i_mapping;
3383         loff_t offset = start_offset, hole;
3384
3385         while (offset < end_offset) {
3386                 hole = page_hole_offset(mapping, offset);
3387                 if (hole >= 0 && hole <= end_offset)
3388                         return max(start_offset, hole);
3389
3390                 offset += PAGE_SIZE;
3391                 offset &= PAGE_MASK;
3392         }
3393
3394         return end_offset;
3395 }
3396
3397 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3398 {
3399         struct bch_inode_info *inode = file_bch_inode(file);
3400         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3401         struct btree_trans trans;
3402         struct btree_iter iter;
3403         struct bkey_s_c k;
3404         subvol_inum inum = inode_inum(inode);
3405         u64 isize, next_hole = MAX_LFS_FILESIZE;
3406         u32 snapshot;
3407         int ret;
3408
3409         isize = i_size_read(&inode->v);
3410         if (offset >= isize)
3411                 return -ENXIO;
3412
3413         bch2_trans_init(&trans, c, 0, 0);
3414 retry:
3415         bch2_trans_begin(&trans);
3416
3417         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3418         if (ret)
3419                 goto err;
3420
3421         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3422                            SPOS(inode->v.i_ino, offset >> 9, snapshot),
3423                            BTREE_ITER_SLOTS, k, ret) {
3424                 if (k.k->p.inode != inode->v.i_ino) {
3425                         next_hole = bch2_seek_pagecache_hole(&inode->v,
3426                                         offset, MAX_LFS_FILESIZE);
3427                         break;
3428                 } else if (!bkey_extent_is_data(k.k)) {
3429                         next_hole = bch2_seek_pagecache_hole(&inode->v,
3430                                         max(offset, bkey_start_offset(k.k) << 9),
3431                                         k.k->p.offset << 9);
3432
3433                         if (next_hole < k.k->p.offset << 9)
3434                                 break;
3435                 } else {
3436                         offset = max(offset, bkey_start_offset(k.k) << 9);
3437                 }
3438         }
3439         bch2_trans_iter_exit(&trans, &iter);
3440 err:
3441         if (ret == -EINTR)
3442                 goto retry;
3443
3444         bch2_trans_exit(&trans);
3445         if (ret)
3446                 return ret;
3447
3448         if (next_hole > isize)
3449                 next_hole = isize;
3450
3451         return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3452 }
3453
3454 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3455 {
3456         switch (whence) {
3457         case SEEK_SET:
3458         case SEEK_CUR:
3459         case SEEK_END:
3460                 return generic_file_llseek(file, offset, whence);
3461         case SEEK_DATA:
3462                 return bch2_seek_data(file, offset);
3463         case SEEK_HOLE:
3464                 return bch2_seek_hole(file, offset);
3465         }
3466
3467         return -EINVAL;
3468 }
3469
3470 void bch2_fs_fsio_exit(struct bch_fs *c)
3471 {
3472         bioset_exit(&c->dio_write_bioset);
3473         bioset_exit(&c->dio_read_bioset);
3474         bioset_exit(&c->writepage_bioset);
3475 }
3476
3477 int bch2_fs_fsio_init(struct bch_fs *c)
3478 {
3479         int ret = 0;
3480
3481         pr_verbose_init(c->opts, "");
3482
3483         if (bioset_init(&c->writepage_bioset,
3484                         4, offsetof(struct bch_writepage_io, op.wbio.bio),
3485                         BIOSET_NEED_BVECS) ||
3486             bioset_init(&c->dio_read_bioset,
3487                         4, offsetof(struct dio_read, rbio.bio),
3488                         BIOSET_NEED_BVECS) ||
3489             bioset_init(&c->dio_write_bioset,
3490                         4, offsetof(struct dio_write, op.wbio.bio),
3491                         BIOSET_NEED_BVECS))
3492                 ret = -ENOMEM;
3493
3494         pr_verbose_init(c->opts, "ret %i", ret);
3495         return ret;
3496 }
3497
3498 #endif /* NO_BCACHEFS_FS */