]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/fs-io.c
Update bcachefs sources to bdf6d7c135 fixup! bcachefs: Kill journal buf bloom filter
[bcachefs-tools-debian] / libbcachefs / fs-io.c
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3
4 #include "bcachefs.h"
5 #include "alloc_foreground.h"
6 #include "bkey_buf.h"
7 #include "btree_update.h"
8 #include "buckets.h"
9 #include "clock.h"
10 #include "error.h"
11 #include "extents.h"
12 #include "extent_update.h"
13 #include "fs.h"
14 #include "fs-io.h"
15 #include "fsck.h"
16 #include "inode.h"
17 #include "journal.h"
18 #include "io.h"
19 #include "keylist.h"
20 #include "quota.h"
21 #include "reflink.h"
22
23 #include <linux/aio.h>
24 #include <linux/backing-dev.h>
25 #include <linux/falloc.h>
26 #include <linux/migrate.h>
27 #include <linux/mmu_context.h>
28 #include <linux/pagevec.h>
29 #include <linux/rmap.h>
30 #include <linux/sched/signal.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/uio.h>
33 #include <linux/writeback.h>
34
35 #include <trace/events/bcachefs.h>
36 #include <trace/events/writeback.h>
37
38 static inline bool bio_full(struct bio *bio, unsigned len)
39 {
40         if (bio->bi_vcnt >= bio->bi_max_vecs)
41                 return true;
42         if (bio->bi_iter.bi_size > UINT_MAX - len)
43                 return true;
44         return false;
45 }
46
47 static inline struct address_space *faults_disabled_mapping(void)
48 {
49         return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
50 }
51
52 static inline void set_fdm_dropped_locks(void)
53 {
54         current->faults_disabled_mapping =
55                 (void *) (((unsigned long) current->faults_disabled_mapping)|1);
56 }
57
58 static inline bool fdm_dropped_locks(void)
59 {
60         return ((unsigned long) current->faults_disabled_mapping) & 1;
61 }
62
63 struct quota_res {
64         u64                             sectors;
65 };
66
67 struct bch_writepage_io {
68         struct closure                  cl;
69         struct bch_inode_info           *inode;
70
71         /* must be last: */
72         struct bch_write_op             op;
73 };
74
75 struct dio_write {
76         struct completion               done;
77         struct kiocb                    *req;
78         struct mm_struct                *mm;
79         unsigned                        loop:1,
80                                         sync:1,
81                                         free_iov:1;
82         struct quota_res                quota_res;
83         u64                             written;
84
85         struct iov_iter                 iter;
86         struct iovec                    inline_vecs[2];
87
88         /* must be last: */
89         struct bch_write_op             op;
90 };
91
92 struct dio_read {
93         struct closure                  cl;
94         struct kiocb                    *req;
95         long                            ret;
96         bool                            should_dirty;
97         struct bch_read_bio             rbio;
98 };
99
100 /* pagecache_block must be held */
101 static int write_invalidate_inode_pages_range(struct address_space *mapping,
102                                               loff_t start, loff_t end)
103 {
104         int ret;
105
106         /*
107          * XXX: the way this is currently implemented, we can spin if a process
108          * is continually redirtying a specific page
109          */
110         do {
111                 if (!mapping->nrpages)
112                         return 0;
113
114                 ret = filemap_write_and_wait_range(mapping, start, end);
115                 if (ret)
116                         break;
117
118                 if (!mapping->nrpages)
119                         return 0;
120
121                 ret = invalidate_inode_pages2_range(mapping,
122                                 start >> PAGE_SHIFT,
123                                 end >> PAGE_SHIFT);
124         } while (ret == -EBUSY);
125
126         return ret;
127 }
128
129 /* quotas */
130
131 #ifdef CONFIG_BCACHEFS_QUOTA
132
133 static void bch2_quota_reservation_put(struct bch_fs *c,
134                                        struct bch_inode_info *inode,
135                                        struct quota_res *res)
136 {
137         if (!res->sectors)
138                 return;
139
140         mutex_lock(&inode->ei_quota_lock);
141         BUG_ON(res->sectors > inode->ei_quota_reserved);
142
143         bch2_quota_acct(c, inode->ei_qid, Q_SPC,
144                         -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
145         inode->ei_quota_reserved -= res->sectors;
146         mutex_unlock(&inode->ei_quota_lock);
147
148         res->sectors = 0;
149 }
150
151 static int bch2_quota_reservation_add(struct bch_fs *c,
152                                       struct bch_inode_info *inode,
153                                       struct quota_res *res,
154                                       unsigned sectors,
155                                       bool check_enospc)
156 {
157         int ret;
158
159         mutex_lock(&inode->ei_quota_lock);
160         ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
161                               check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
162         if (likely(!ret)) {
163                 inode->ei_quota_reserved += sectors;
164                 res->sectors += sectors;
165         }
166         mutex_unlock(&inode->ei_quota_lock);
167
168         return ret;
169 }
170
171 #else
172
173 static void bch2_quota_reservation_put(struct bch_fs *c,
174                                        struct bch_inode_info *inode,
175                                        struct quota_res *res)
176 {
177 }
178
179 static int bch2_quota_reservation_add(struct bch_fs *c,
180                                       struct bch_inode_info *inode,
181                                       struct quota_res *res,
182                                       unsigned sectors,
183                                       bool check_enospc)
184 {
185         return 0;
186 }
187
188 #endif
189
190 /* i_size updates: */
191
192 struct inode_new_size {
193         loff_t          new_size;
194         u64             now;
195         unsigned        fields;
196 };
197
198 static int inode_set_size(struct bch_inode_info *inode,
199                           struct bch_inode_unpacked *bi,
200                           void *p)
201 {
202         struct inode_new_size *s = p;
203
204         bi->bi_size = s->new_size;
205         if (s->fields & ATTR_ATIME)
206                 bi->bi_atime = s->now;
207         if (s->fields & ATTR_MTIME)
208                 bi->bi_mtime = s->now;
209         if (s->fields & ATTR_CTIME)
210                 bi->bi_ctime = s->now;
211
212         return 0;
213 }
214
215 int __must_check bch2_write_inode_size(struct bch_fs *c,
216                                        struct bch_inode_info *inode,
217                                        loff_t new_size, unsigned fields)
218 {
219         struct inode_new_size s = {
220                 .new_size       = new_size,
221                 .now            = bch2_current_time(c),
222                 .fields         = fields,
223         };
224
225         return bch2_write_inode(c, inode, inode_set_size, &s, fields);
226 }
227
228 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
229                            struct quota_res *quota_res, s64 sectors)
230 {
231         if (!sectors)
232                 return;
233
234         mutex_lock(&inode->ei_quota_lock);
235         bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
236                                 "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
237                                 inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
238                                 inode->ei_inode.bi_sectors);
239         inode->v.i_blocks += sectors;
240
241 #ifdef CONFIG_BCACHEFS_QUOTA
242         if (quota_res && sectors > 0) {
243                 BUG_ON(sectors > quota_res->sectors);
244                 BUG_ON(sectors > inode->ei_quota_reserved);
245
246                 quota_res->sectors -= sectors;
247                 inode->ei_quota_reserved -= sectors;
248         } else {
249                 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
250         }
251 #endif
252         mutex_unlock(&inode->ei_quota_lock);
253 }
254
255 /* page state: */
256
257 /* stored in page->private: */
258
259 struct bch_page_sector {
260         /* Uncompressed, fully allocated replicas (or on disk reservation): */
261         unsigned                nr_replicas:4;
262
263         /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
264         unsigned                replicas_reserved:4;
265
266         /* i_sectors: */
267         enum {
268                 SECTOR_UNALLOCATED,
269                 SECTOR_RESERVED,
270                 SECTOR_DIRTY,
271                 SECTOR_DIRTY_RESERVED,
272                 SECTOR_ALLOCATED,
273         }                       state:8;
274 };
275
276 struct bch_page_state {
277         spinlock_t              lock;
278         atomic_t                write_count;
279         bool                    uptodate;
280         struct bch_page_sector  s[PAGE_SECTORS];
281 };
282
283 static inline struct bch_page_state *__bch2_page_state(struct page *page)
284 {
285         return page_has_private(page)
286                 ? (struct bch_page_state *) page_private(page)
287                 : NULL;
288 }
289
290 static inline struct bch_page_state *bch2_page_state(struct page *page)
291 {
292         EBUG_ON(!PageLocked(page));
293
294         return __bch2_page_state(page);
295 }
296
297 /* for newly allocated pages: */
298 static void __bch2_page_state_release(struct page *page)
299 {
300         kfree(detach_page_private(page));
301 }
302
303 static void bch2_page_state_release(struct page *page)
304 {
305         EBUG_ON(!PageLocked(page));
306         __bch2_page_state_release(page);
307 }
308
309 /* for newly allocated pages: */
310 static struct bch_page_state *__bch2_page_state_create(struct page *page,
311                                                        gfp_t gfp)
312 {
313         struct bch_page_state *s;
314
315         s = kzalloc(sizeof(*s), GFP_NOFS|gfp);
316         if (!s)
317                 return NULL;
318
319         spin_lock_init(&s->lock);
320         attach_page_private(page, s);
321         return s;
322 }
323
324 static struct bch_page_state *bch2_page_state_create(struct page *page,
325                                                      gfp_t gfp)
326 {
327         return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
328 }
329
330 static unsigned bkey_to_sector_state(const struct bkey *k)
331 {
332         if (k->type == KEY_TYPE_reservation)
333                 return SECTOR_RESERVED;
334         if (bkey_extent_is_allocation(k))
335                 return SECTOR_ALLOCATED;
336         return SECTOR_UNALLOCATED;
337 }
338
339 static void __bch2_page_state_set(struct page *page,
340                                   unsigned pg_offset, unsigned pg_len,
341                                   unsigned nr_ptrs, unsigned state)
342 {
343         struct bch_page_state *s = bch2_page_state_create(page, __GFP_NOFAIL);
344         unsigned i;
345
346         BUG_ON(pg_offset >= PAGE_SECTORS);
347         BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
348
349         spin_lock(&s->lock);
350
351         for (i = pg_offset; i < pg_offset + pg_len; i++) {
352                 s->s[i].nr_replicas = nr_ptrs;
353                 s->s[i].state = state;
354         }
355
356         if (i == PAGE_SECTORS)
357                 s->uptodate = true;
358
359         spin_unlock(&s->lock);
360 }
361
362 static int bch2_page_state_set(struct bch_fs *c, subvol_inum inum,
363                                struct page **pages, unsigned nr_pages)
364 {
365         struct btree_trans trans;
366         struct btree_iter iter;
367         struct bkey_s_c k;
368         u64 offset = pages[0]->index << PAGE_SECTORS_SHIFT;
369         unsigned pg_idx = 0;
370         u32 snapshot;
371         int ret;
372
373         bch2_trans_init(&trans, c, 0, 0);
374 retry:
375         bch2_trans_begin(&trans);
376
377         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
378         if (ret)
379                 goto err;
380
381         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
382                            SPOS(inum.inum, offset, snapshot),
383                            BTREE_ITER_SLOTS, k, ret) {
384                 unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
385                 unsigned state = bkey_to_sector_state(k.k);
386
387                 while (pg_idx < nr_pages) {
388                         struct page *page = pages[pg_idx];
389                         u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
390                         u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
391                         unsigned pg_offset = max(bkey_start_offset(k.k), pg_start) - pg_start;
392                         unsigned pg_len = min(k.k->p.offset, pg_end) - pg_offset - pg_start;
393
394                         BUG_ON(k.k->p.offset < pg_start);
395                         BUG_ON(bkey_start_offset(k.k) > pg_end);
396
397                         if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate)
398                                 __bch2_page_state_set(page, pg_offset, pg_len, nr_ptrs, state);
399
400                         if (k.k->p.offset < pg_end)
401                                 break;
402                         pg_idx++;
403                 }
404
405                 if (pg_idx == nr_pages)
406                         break;
407         }
408
409         offset = iter.pos.offset;
410         bch2_trans_iter_exit(&trans, &iter);
411 err:
412         if (ret == -EINTR)
413                 goto retry;
414         bch2_trans_exit(&trans);
415
416         return ret;
417 }
418
419 static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
420 {
421         struct bvec_iter iter;
422         struct bio_vec bv;
423         unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
424                 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
425         unsigned state = bkey_to_sector_state(k.k);
426
427         bio_for_each_segment(bv, bio, iter)
428                 __bch2_page_state_set(bv.bv_page, bv.bv_offset >> 9,
429                                       bv.bv_len >> 9, nr_ptrs, state);
430 }
431
432 static void mark_pagecache_unallocated(struct bch_inode_info *inode,
433                                        u64 start, u64 end)
434 {
435         pgoff_t index = start >> PAGE_SECTORS_SHIFT;
436         pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
437         struct pagevec pvec;
438
439         if (end <= start)
440                 return;
441
442         pagevec_init(&pvec);
443
444         do {
445                 unsigned nr_pages, i, j;
446
447                 nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
448                                                 &index, end_index);
449                 for (i = 0; i < nr_pages; i++) {
450                         struct page *page = pvec.pages[i];
451                         u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
452                         u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
453                         unsigned pg_offset = max(start, pg_start) - pg_start;
454                         unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
455                         struct bch_page_state *s;
456
457                         BUG_ON(end <= pg_start);
458                         BUG_ON(pg_offset >= PAGE_SECTORS);
459                         BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
460
461                         lock_page(page);
462                         s = bch2_page_state(page);
463
464                         if (s) {
465                                 spin_lock(&s->lock);
466                                 for (j = pg_offset; j < pg_offset + pg_len; j++)
467                                         s->s[j].nr_replicas = 0;
468                                 spin_unlock(&s->lock);
469                         }
470
471                         unlock_page(page);
472                 }
473                 pagevec_release(&pvec);
474         } while (index <= end_index);
475 }
476
477 static void mark_pagecache_reserved(struct bch_inode_info *inode,
478                                     u64 start, u64 end)
479 {
480         struct bch_fs *c = inode->v.i_sb->s_fs_info;
481         pgoff_t index = start >> PAGE_SECTORS_SHIFT;
482         pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
483         struct pagevec pvec;
484         s64 i_sectors_delta = 0;
485
486         if (end <= start)
487                 return;
488
489         pagevec_init(&pvec);
490
491         do {
492                 unsigned nr_pages, i, j;
493
494                 nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
495                                                 &index, end_index);
496                 for (i = 0; i < nr_pages; i++) {
497                         struct page *page = pvec.pages[i];
498                         u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
499                         u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
500                         unsigned pg_offset = max(start, pg_start) - pg_start;
501                         unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
502                         struct bch_page_state *s;
503
504                         BUG_ON(end <= pg_start);
505                         BUG_ON(pg_offset >= PAGE_SECTORS);
506                         BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
507
508                         lock_page(page);
509                         s = bch2_page_state(page);
510
511                         if (s) {
512                                 spin_lock(&s->lock);
513                                 for (j = pg_offset; j < pg_offset + pg_len; j++)
514                                         switch (s->s[j].state) {
515                                         case SECTOR_UNALLOCATED:
516                                                 s->s[j].state = SECTOR_RESERVED;
517                                                 break;
518                                         case SECTOR_DIRTY:
519                                                 s->s[j].state = SECTOR_DIRTY_RESERVED;
520                                                 i_sectors_delta--;
521                                                 break;
522                                         default:
523                                                 break;
524                                         }
525                                 spin_unlock(&s->lock);
526                         }
527
528                         unlock_page(page);
529                 }
530                 pagevec_release(&pvec);
531         } while (index <= end_index);
532
533         i_sectors_acct(c, inode, NULL, i_sectors_delta);
534 }
535
536 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
537 {
538         /* XXX: this should not be open coded */
539         return inode->ei_inode.bi_data_replicas
540                 ? inode->ei_inode.bi_data_replicas - 1
541                 : c->opts.data_replicas;
542 }
543
544 static inline unsigned sectors_to_reserve(struct bch_page_sector *s,
545                                                   unsigned nr_replicas)
546 {
547         return max(0, (int) nr_replicas -
548                    s->nr_replicas -
549                    s->replicas_reserved);
550 }
551
552 static int bch2_get_page_disk_reservation(struct bch_fs *c,
553                                 struct bch_inode_info *inode,
554                                 struct page *page, bool check_enospc)
555 {
556         struct bch_page_state *s = bch2_page_state_create(page, 0);
557         unsigned nr_replicas = inode_nr_replicas(c, inode);
558         struct disk_reservation disk_res = { 0 };
559         unsigned i, disk_res_sectors = 0;
560         int ret;
561
562         if (!s)
563                 return -ENOMEM;
564
565         for (i = 0; i < ARRAY_SIZE(s->s); i++)
566                 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
567
568         if (!disk_res_sectors)
569                 return 0;
570
571         ret = bch2_disk_reservation_get(c, &disk_res,
572                                         disk_res_sectors, 1,
573                                         !check_enospc
574                                         ? BCH_DISK_RESERVATION_NOFAIL
575                                         : 0);
576         if (unlikely(ret))
577                 return ret;
578
579         for (i = 0; i < ARRAY_SIZE(s->s); i++)
580                 s->s[i].replicas_reserved +=
581                         sectors_to_reserve(&s->s[i], nr_replicas);
582
583         return 0;
584 }
585
586 struct bch2_page_reservation {
587         struct disk_reservation disk;
588         struct quota_res        quota;
589 };
590
591 static void bch2_page_reservation_init(struct bch_fs *c,
592                         struct bch_inode_info *inode,
593                         struct bch2_page_reservation *res)
594 {
595         memset(res, 0, sizeof(*res));
596
597         res->disk.nr_replicas = inode_nr_replicas(c, inode);
598 }
599
600 static void bch2_page_reservation_put(struct bch_fs *c,
601                         struct bch_inode_info *inode,
602                         struct bch2_page_reservation *res)
603 {
604         bch2_disk_reservation_put(c, &res->disk);
605         bch2_quota_reservation_put(c, inode, &res->quota);
606 }
607
608 static int bch2_page_reservation_get(struct bch_fs *c,
609                         struct bch_inode_info *inode, struct page *page,
610                         struct bch2_page_reservation *res,
611                         unsigned offset, unsigned len, bool check_enospc)
612 {
613         struct bch_page_state *s = bch2_page_state_create(page, 0);
614         unsigned i, disk_sectors = 0, quota_sectors = 0;
615         int ret;
616
617         if (!s)
618                 return -ENOMEM;
619
620         BUG_ON(!s->uptodate);
621
622         for (i = round_down(offset, block_bytes(c)) >> 9;
623              i < round_up(offset + len, block_bytes(c)) >> 9;
624              i++) {
625                 disk_sectors += sectors_to_reserve(&s->s[i],
626                                                 res->disk.nr_replicas);
627                 quota_sectors += s->s[i].state == SECTOR_UNALLOCATED;
628         }
629
630         if (disk_sectors) {
631                 ret = bch2_disk_reservation_add(c, &res->disk,
632                                                 disk_sectors,
633                                                 !check_enospc
634                                                 ? BCH_DISK_RESERVATION_NOFAIL
635                                                 : 0);
636                 if (unlikely(ret))
637                         return ret;
638         }
639
640         if (quota_sectors) {
641                 ret = bch2_quota_reservation_add(c, inode, &res->quota,
642                                                  quota_sectors,
643                                                  check_enospc);
644                 if (unlikely(ret)) {
645                         struct disk_reservation tmp = {
646                                 .sectors = disk_sectors
647                         };
648
649                         bch2_disk_reservation_put(c, &tmp);
650                         res->disk.sectors -= disk_sectors;
651                         return ret;
652                 }
653         }
654
655         return 0;
656 }
657
658 static void bch2_clear_page_bits(struct page *page)
659 {
660         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
661         struct bch_fs *c = inode->v.i_sb->s_fs_info;
662         struct bch_page_state *s = bch2_page_state(page);
663         struct disk_reservation disk_res = { 0 };
664         int i, dirty_sectors = 0;
665
666         if (!s)
667                 return;
668
669         EBUG_ON(!PageLocked(page));
670         EBUG_ON(PageWriteback(page));
671
672         for (i = 0; i < ARRAY_SIZE(s->s); i++) {
673                 disk_res.sectors += s->s[i].replicas_reserved;
674                 s->s[i].replicas_reserved = 0;
675
676                 switch (s->s[i].state) {
677                 case SECTOR_DIRTY:
678                         s->s[i].state = SECTOR_UNALLOCATED;
679                         --dirty_sectors;
680                         break;
681                 case SECTOR_DIRTY_RESERVED:
682                         s->s[i].state = SECTOR_RESERVED;
683                         break;
684                 default:
685                         break;
686                 }
687         }
688
689         bch2_disk_reservation_put(c, &disk_res);
690
691         i_sectors_acct(c, inode, NULL, dirty_sectors);
692
693         bch2_page_state_release(page);
694 }
695
696 static void bch2_set_page_dirty(struct bch_fs *c,
697                         struct bch_inode_info *inode, struct page *page,
698                         struct bch2_page_reservation *res,
699                         unsigned offset, unsigned len)
700 {
701         struct bch_page_state *s = bch2_page_state(page);
702         unsigned i, dirty_sectors = 0;
703
704         WARN_ON((u64) page_offset(page) + offset + len >
705                 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
706
707         spin_lock(&s->lock);
708
709         for (i = round_down(offset, block_bytes(c)) >> 9;
710              i < round_up(offset + len, block_bytes(c)) >> 9;
711              i++) {
712                 unsigned sectors = sectors_to_reserve(&s->s[i],
713                                                 res->disk.nr_replicas);
714
715                 /*
716                  * This can happen if we race with the error path in
717                  * bch2_writepage_io_done():
718                  */
719                 sectors = min_t(unsigned, sectors, res->disk.sectors);
720
721                 s->s[i].replicas_reserved += sectors;
722                 res->disk.sectors -= sectors;
723
724                 switch (s->s[i].state) {
725                 case SECTOR_UNALLOCATED:
726                         s->s[i].state = SECTOR_DIRTY;
727                         dirty_sectors++;
728                         break;
729                 case SECTOR_RESERVED:
730                         s->s[i].state = SECTOR_DIRTY_RESERVED;
731                         break;
732                 default:
733                         break;
734                 }
735         }
736
737         spin_unlock(&s->lock);
738
739         i_sectors_acct(c, inode, &res->quota, dirty_sectors);
740
741         if (!PageDirty(page))
742                 __set_page_dirty_nobuffers(page);
743 }
744
745 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
746 {
747         struct file *file = vmf->vma->vm_file;
748         struct address_space *mapping = file->f_mapping;
749         struct address_space *fdm = faults_disabled_mapping();
750         struct bch_inode_info *inode = file_bch_inode(file);
751         int ret;
752
753         if (fdm == mapping)
754                 return VM_FAULT_SIGBUS;
755
756         /* Lock ordering: */
757         if (fdm > mapping) {
758                 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
759
760                 if (bch2_pagecache_add_tryget(&inode->ei_pagecache_lock))
761                         goto got_lock;
762
763                 bch2_pagecache_block_put(&fdm_host->ei_pagecache_lock);
764
765                 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
766                 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
767
768                 bch2_pagecache_block_get(&fdm_host->ei_pagecache_lock);
769
770                 /* Signal that lock has been dropped: */
771                 set_fdm_dropped_locks();
772                 return VM_FAULT_SIGBUS;
773         }
774
775         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
776 got_lock:
777         ret = filemap_fault(vmf);
778         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
779
780         return ret;
781 }
782
783 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
784 {
785         struct page *page = vmf->page;
786         struct file *file = vmf->vma->vm_file;
787         struct bch_inode_info *inode = file_bch_inode(file);
788         struct address_space *mapping = file->f_mapping;
789         struct bch_fs *c = inode->v.i_sb->s_fs_info;
790         struct bch2_page_reservation res;
791         unsigned len;
792         loff_t isize;
793         int ret;
794
795         bch2_page_reservation_init(c, inode, &res);
796
797         sb_start_pagefault(inode->v.i_sb);
798         file_update_time(file);
799
800         /*
801          * Not strictly necessary, but helps avoid dio writes livelocking in
802          * write_invalidate_inode_pages_range() - can drop this if/when we get
803          * a write_invalidate_inode_pages_range() that works without dropping
804          * page lock before invalidating page
805          */
806         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
807
808         lock_page(page);
809         isize = i_size_read(&inode->v);
810
811         if (page->mapping != mapping || page_offset(page) >= isize) {
812                 unlock_page(page);
813                 ret = VM_FAULT_NOPAGE;
814                 goto out;
815         }
816
817         len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
818
819         if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
820                 if (bch2_page_state_set(c, inode_inum(inode), &page, 1)) {
821                         unlock_page(page);
822                         ret = VM_FAULT_SIGBUS;
823                         goto out;
824                 }
825         }
826
827         if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
828                 unlock_page(page);
829                 ret = VM_FAULT_SIGBUS;
830                 goto out;
831         }
832
833         bch2_set_page_dirty(c, inode, page, &res, 0, len);
834         bch2_page_reservation_put(c, inode, &res);
835
836         wait_for_stable_page(page);
837         ret = VM_FAULT_LOCKED;
838 out:
839         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
840         sb_end_pagefault(inode->v.i_sb);
841
842         return ret;
843 }
844
845 void bch2_invalidatepage(struct page *page, unsigned int offset,
846                          unsigned int length)
847 {
848         if (offset || length < PAGE_SIZE)
849                 return;
850
851         bch2_clear_page_bits(page);
852 }
853
854 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
855 {
856         if (PageDirty(page))
857                 return 0;
858
859         bch2_clear_page_bits(page);
860         return 1;
861 }
862
863 #ifdef CONFIG_MIGRATION
864 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
865                       struct page *page, enum migrate_mode mode)
866 {
867         int ret;
868
869         EBUG_ON(!PageLocked(page));
870         EBUG_ON(!PageLocked(newpage));
871
872         ret = migrate_page_move_mapping(mapping, newpage, page, 0);
873         if (ret != MIGRATEPAGE_SUCCESS)
874                 return ret;
875
876         if (PagePrivate(page))
877                 attach_page_private(newpage, detach_page_private(page));
878
879         if (mode != MIGRATE_SYNC_NO_COPY)
880                 migrate_page_copy(newpage, page);
881         else
882                 migrate_page_states(newpage, page);
883         return MIGRATEPAGE_SUCCESS;
884 }
885 #endif
886
887 /* readpage(s): */
888
889 static void bch2_readpages_end_io(struct bio *bio)
890 {
891         struct bvec_iter_all iter;
892         struct bio_vec *bv;
893
894         bio_for_each_segment_all(bv, bio, iter) {
895                 struct page *page = bv->bv_page;
896
897                 if (!bio->bi_status) {
898                         SetPageUptodate(page);
899                 } else {
900                         ClearPageUptodate(page);
901                         SetPageError(page);
902                 }
903                 unlock_page(page);
904         }
905
906         bio_put(bio);
907 }
908
909 struct readpages_iter {
910         struct address_space    *mapping;
911         struct page             **pages;
912         unsigned                nr_pages;
913         unsigned                idx;
914         pgoff_t                 offset;
915 };
916
917 static int readpages_iter_init(struct readpages_iter *iter,
918                                struct readahead_control *ractl)
919 {
920         unsigned i, nr_pages = readahead_count(ractl);
921
922         memset(iter, 0, sizeof(*iter));
923
924         iter->mapping   = ractl->mapping;
925         iter->offset    = readahead_index(ractl);
926         iter->nr_pages  = nr_pages;
927
928         iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
929         if (!iter->pages)
930                 return -ENOMEM;
931
932         nr_pages = __readahead_batch(ractl, iter->pages, nr_pages);
933         for (i = 0; i < nr_pages; i++) {
934                 __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL);
935                 put_page(iter->pages[i]);
936         }
937
938         return 0;
939 }
940
941 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
942 {
943         if (iter->idx >= iter->nr_pages)
944                 return NULL;
945
946         EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
947
948         return iter->pages[iter->idx];
949 }
950
951 static bool extent_partial_reads_expensive(struct bkey_s_c k)
952 {
953         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
954         struct bch_extent_crc_unpacked crc;
955         const union bch_extent_entry *i;
956
957         bkey_for_each_crc(k.k, ptrs, crc, i)
958                 if (crc.csum_type || crc.compression_type)
959                         return true;
960         return false;
961 }
962
963 static void readpage_bio_extend(struct readpages_iter *iter,
964                                 struct bio *bio,
965                                 unsigned sectors_this_extent,
966                                 bool get_more)
967 {
968         while (bio_sectors(bio) < sectors_this_extent &&
969                bio->bi_vcnt < bio->bi_max_vecs) {
970                 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
971                 struct page *page = readpage_iter_next(iter);
972                 int ret;
973
974                 if (page) {
975                         if (iter->offset + iter->idx != page_offset)
976                                 break;
977
978                         iter->idx++;
979                 } else {
980                         if (!get_more)
981                                 break;
982
983                         page = xa_load(&iter->mapping->i_pages, page_offset);
984                         if (page && !xa_is_value(page))
985                                 break;
986
987                         page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
988                         if (!page)
989                                 break;
990
991                         if (!__bch2_page_state_create(page, 0)) {
992                                 put_page(page);
993                                 break;
994                         }
995
996                         ret = add_to_page_cache_lru(page, iter->mapping,
997                                                     page_offset, GFP_NOFS);
998                         if (ret) {
999                                 __bch2_page_state_release(page);
1000                                 put_page(page);
1001                                 break;
1002                         }
1003
1004                         put_page(page);
1005                 }
1006
1007                 BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
1008         }
1009 }
1010
1011 static void bchfs_read(struct btree_trans *trans,
1012                        struct bch_read_bio *rbio,
1013                        subvol_inum inum,
1014                        struct readpages_iter *readpages_iter)
1015 {
1016         struct bch_fs *c = trans->c;
1017         struct btree_iter iter;
1018         struct bkey_buf sk;
1019         int flags = BCH_READ_RETRY_IF_STALE|
1020                 BCH_READ_MAY_PROMOTE;
1021         u32 snapshot;
1022         int ret = 0;
1023
1024         rbio->c = c;
1025         rbio->start_time = local_clock();
1026         rbio->subvol = inum.subvol;
1027
1028         bch2_bkey_buf_init(&sk);
1029 retry:
1030         bch2_trans_begin(trans);
1031         iter = (struct btree_iter) { NULL };
1032
1033         ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1034         if (ret)
1035                 goto err;
1036
1037         bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1038                              SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
1039                              BTREE_ITER_SLOTS);
1040         while (1) {
1041                 struct bkey_s_c k;
1042                 unsigned bytes, sectors, offset_into_extent;
1043                 enum btree_id data_btree = BTREE_ID_extents;
1044
1045                 /*
1046                  * read_extent -> io_time_reset may cause a transaction restart
1047                  * without returning an error, we need to check for that here:
1048                  */
1049                 if (!bch2_trans_relock(trans)) {
1050                         ret = -EINTR;
1051                         break;
1052                 }
1053
1054                 bch2_btree_iter_set_pos(&iter,
1055                                 POS(inum.inum, rbio->bio.bi_iter.bi_sector));
1056
1057                 k = bch2_btree_iter_peek_slot(&iter);
1058                 ret = bkey_err(k);
1059                 if (ret)
1060                         break;
1061
1062                 offset_into_extent = iter.pos.offset -
1063                         bkey_start_offset(k.k);
1064                 sectors = k.k->size - offset_into_extent;
1065
1066                 bch2_bkey_buf_reassemble(&sk, c, k);
1067
1068                 ret = bch2_read_indirect_extent(trans, &data_btree,
1069                                         &offset_into_extent, &sk);
1070                 if (ret)
1071                         break;
1072
1073                 k = bkey_i_to_s_c(sk.k);
1074
1075                 sectors = min(sectors, k.k->size - offset_into_extent);
1076
1077                 if (readpages_iter)
1078                         readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
1079                                             extent_partial_reads_expensive(k));
1080
1081                 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
1082                 swap(rbio->bio.bi_iter.bi_size, bytes);
1083
1084                 if (rbio->bio.bi_iter.bi_size == bytes)
1085                         flags |= BCH_READ_LAST_FRAGMENT;
1086
1087                 bch2_bio_page_state_set(&rbio->bio, k);
1088
1089                 bch2_read_extent(trans, rbio, iter.pos,
1090                                  data_btree, k, offset_into_extent, flags);
1091
1092                 if (flags & BCH_READ_LAST_FRAGMENT)
1093                         break;
1094
1095                 swap(rbio->bio.bi_iter.bi_size, bytes);
1096                 bio_advance(&rbio->bio, bytes);
1097
1098                 ret = btree_trans_too_many_iters(trans);
1099                 if (ret)
1100                         break;
1101         }
1102 err:
1103         bch2_trans_iter_exit(trans, &iter);
1104
1105         if (ret == -EINTR)
1106                 goto retry;
1107
1108         if (ret) {
1109                 bch_err_inum_ratelimited(c, inum.inum,
1110                                 "read error %i from btree lookup", ret);
1111                 rbio->bio.bi_status = BLK_STS_IOERR;
1112                 bio_endio(&rbio->bio);
1113         }
1114
1115         bch2_bkey_buf_exit(&sk, c);
1116 }
1117
1118 void bch2_readahead(struct readahead_control *ractl)
1119 {
1120         struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
1121         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1122         struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1123         struct btree_trans trans;
1124         struct page *page;
1125         struct readpages_iter readpages_iter;
1126         int ret;
1127
1128         ret = readpages_iter_init(&readpages_iter, ractl);
1129         BUG_ON(ret);
1130
1131         bch2_trans_init(&trans, c, 0, 0);
1132
1133         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1134
1135         while ((page = readpage_iter_next(&readpages_iter))) {
1136                 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
1137                 unsigned n = min_t(unsigned,
1138                                    readpages_iter.nr_pages -
1139                                    readpages_iter.idx,
1140                                    BIO_MAX_VECS);
1141                 struct bch_read_bio *rbio =
1142                         rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
1143                                   opts);
1144
1145                 readpages_iter.idx++;
1146
1147                 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
1148                 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTORS_SHIFT;
1149                 rbio->bio.bi_end_io = bch2_readpages_end_io;
1150                 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
1151
1152                 bchfs_read(&trans, rbio, inode_inum(inode),
1153                            &readpages_iter);
1154         }
1155
1156         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1157
1158         bch2_trans_exit(&trans);
1159         kfree(readpages_iter.pages);
1160 }
1161
1162 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
1163                              subvol_inum inum, struct page *page)
1164 {
1165         struct btree_trans trans;
1166
1167         bch2_page_state_create(page, __GFP_NOFAIL);
1168
1169         bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
1170         rbio->bio.bi_iter.bi_sector =
1171                 (sector_t) page->index << PAGE_SECTORS_SHIFT;
1172         BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
1173
1174         bch2_trans_init(&trans, c, 0, 0);
1175         bchfs_read(&trans, rbio, inum, NULL);
1176         bch2_trans_exit(&trans);
1177 }
1178
1179 int bch2_readpage(struct file *file, struct page *page)
1180 {
1181         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1182         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1183         struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1184         struct bch_read_bio *rbio;
1185
1186         rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
1187         rbio->bio.bi_end_io = bch2_readpages_end_io;
1188
1189         __bchfs_readpage(c, rbio, inode_inum(inode), page);
1190         return 0;
1191 }
1192
1193 static void bch2_read_single_page_end_io(struct bio *bio)
1194 {
1195         complete(bio->bi_private);
1196 }
1197
1198 static int bch2_read_single_page(struct page *page,
1199                                  struct address_space *mapping)
1200 {
1201         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1202         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1203         struct bch_read_bio *rbio;
1204         int ret;
1205         DECLARE_COMPLETION_ONSTACK(done);
1206
1207         rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
1208                          io_opts(c, &inode->ei_inode));
1209         rbio->bio.bi_private = &done;
1210         rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1211
1212         __bchfs_readpage(c, rbio, inode_inum(inode), page);
1213         wait_for_completion(&done);
1214
1215         ret = blk_status_to_errno(rbio->bio.bi_status);
1216         bio_put(&rbio->bio);
1217
1218         if (ret < 0)
1219                 return ret;
1220
1221         SetPageUptodate(page);
1222         return 0;
1223 }
1224
1225 /* writepages: */
1226
1227 struct bch_writepage_state {
1228         struct bch_writepage_io *io;
1229         struct bch_io_opts      opts;
1230 };
1231
1232 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1233                                                                   struct bch_inode_info *inode)
1234 {
1235         return (struct bch_writepage_state) {
1236                 .opts = io_opts(c, &inode->ei_inode)
1237         };
1238 }
1239
1240 static void bch2_writepage_io_free(struct closure *cl)
1241 {
1242         struct bch_writepage_io *io = container_of(cl,
1243                                         struct bch_writepage_io, cl);
1244
1245         bio_put(&io->op.wbio.bio);
1246 }
1247
1248 static void bch2_writepage_io_done(struct closure *cl)
1249 {
1250         struct bch_writepage_io *io = container_of(cl,
1251                                         struct bch_writepage_io, cl);
1252         struct bch_fs *c = io->op.c;
1253         struct bio *bio = &io->op.wbio.bio;
1254         struct bvec_iter_all iter;
1255         struct bio_vec *bvec;
1256         unsigned i;
1257
1258         up(&io->op.c->io_in_flight);
1259
1260         if (io->op.error) {
1261                 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
1262
1263                 bio_for_each_segment_all(bvec, bio, iter) {
1264                         struct bch_page_state *s;
1265
1266                         SetPageError(bvec->bv_page);
1267                         mapping_set_error(bvec->bv_page->mapping, -EIO);
1268
1269                         s = __bch2_page_state(bvec->bv_page);
1270                         spin_lock(&s->lock);
1271                         for (i = 0; i < PAGE_SECTORS; i++)
1272                                 s->s[i].nr_replicas = 0;
1273                         spin_unlock(&s->lock);
1274                 }
1275         }
1276
1277         if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1278                 bio_for_each_segment_all(bvec, bio, iter) {
1279                         struct bch_page_state *s;
1280
1281                         s = __bch2_page_state(bvec->bv_page);
1282                         spin_lock(&s->lock);
1283                         for (i = 0; i < PAGE_SECTORS; i++)
1284                                 s->s[i].nr_replicas = 0;
1285                         spin_unlock(&s->lock);
1286                 }
1287         }
1288
1289         /*
1290          * racing with fallocate can cause us to add fewer sectors than
1291          * expected - but we shouldn't add more sectors than expected:
1292          */
1293         WARN_ON_ONCE(io->op.i_sectors_delta > 0);
1294
1295         /*
1296          * (error (due to going RO) halfway through a page can screw that up
1297          * slightly)
1298          * XXX wtf?
1299            BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1300          */
1301
1302         /*
1303          * PageWriteback is effectively our ref on the inode - fixup i_blocks
1304          * before calling end_page_writeback:
1305          */
1306         i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1307
1308         bio_for_each_segment_all(bvec, bio, iter) {
1309                 struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
1310
1311                 if (atomic_dec_and_test(&s->write_count))
1312                         end_page_writeback(bvec->bv_page);
1313         }
1314
1315         closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1316 }
1317
1318 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1319 {
1320         struct bch_writepage_io *io = w->io;
1321
1322         down(&io->op.c->io_in_flight);
1323
1324         w->io = NULL;
1325         closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
1326         continue_at(&io->cl, bch2_writepage_io_done, NULL);
1327 }
1328
1329 /*
1330  * Get a bch_writepage_io and add @page to it - appending to an existing one if
1331  * possible, else allocating a new one:
1332  */
1333 static void bch2_writepage_io_alloc(struct bch_fs *c,
1334                                     struct writeback_control *wbc,
1335                                     struct bch_writepage_state *w,
1336                                     struct bch_inode_info *inode,
1337                                     u64 sector,
1338                                     unsigned nr_replicas)
1339 {
1340         struct bch_write_op *op;
1341
1342         w->io = container_of(bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS,
1343                                               &c->writepage_bioset),
1344                              struct bch_writepage_io, op.wbio.bio);
1345
1346         closure_init(&w->io->cl, NULL);
1347         w->io->inode            = inode;
1348
1349         op                      = &w->io->op;
1350         bch2_write_op_init(op, c, w->opts);
1351         op->target              = w->opts.foreground_target;
1352         op->nr_replicas         = nr_replicas;
1353         op->res.nr_replicas     = nr_replicas;
1354         op->write_point         = writepoint_hashed(inode->ei_last_dirtied);
1355         op->subvol              = inode->ei_subvol;
1356         op->pos                 = POS(inode->v.i_ino, sector);
1357         op->wbio.bio.bi_iter.bi_sector = sector;
1358         op->wbio.bio.bi_opf     = wbc_to_write_flags(wbc);
1359 }
1360
1361 static int __bch2_writepage(struct page *page,
1362                             struct writeback_control *wbc,
1363                             void *data)
1364 {
1365         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1366         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1367         struct bch_writepage_state *w = data;
1368         struct bch_page_state *s, orig;
1369         unsigned i, offset, nr_replicas_this_write = U32_MAX;
1370         loff_t i_size = i_size_read(&inode->v);
1371         pgoff_t end_index = i_size >> PAGE_SHIFT;
1372         int ret;
1373
1374         EBUG_ON(!PageUptodate(page));
1375
1376         /* Is the page fully inside i_size? */
1377         if (page->index < end_index)
1378                 goto do_io;
1379
1380         /* Is the page fully outside i_size? (truncate in progress) */
1381         offset = i_size & (PAGE_SIZE - 1);
1382         if (page->index > end_index || !offset) {
1383                 unlock_page(page);
1384                 return 0;
1385         }
1386
1387         /*
1388          * The page straddles i_size.  It must be zeroed out on each and every
1389          * writepage invocation because it may be mmapped.  "A file is mapped
1390          * in multiples of the page size.  For a file that is not a multiple of
1391          * the  page size, the remaining memory is zeroed when mapped, and
1392          * writes to that region are not written out to the file."
1393          */
1394         zero_user_segment(page, offset, PAGE_SIZE);
1395 do_io:
1396         s = bch2_page_state_create(page, __GFP_NOFAIL);
1397
1398         /*
1399          * Things get really hairy with errors during writeback:
1400          */
1401         ret = bch2_get_page_disk_reservation(c, inode, page, false);
1402         BUG_ON(ret);
1403
1404         /* Before unlocking the page, get copy of reservations: */
1405         spin_lock(&s->lock);
1406         orig = *s;
1407         spin_unlock(&s->lock);
1408
1409         for (i = 0; i < PAGE_SECTORS; i++) {
1410                 if (s->s[i].state < SECTOR_DIRTY)
1411                         continue;
1412
1413                 nr_replicas_this_write =
1414                         min_t(unsigned, nr_replicas_this_write,
1415                               s->s[i].nr_replicas +
1416                               s->s[i].replicas_reserved);
1417         }
1418
1419         for (i = 0; i < PAGE_SECTORS; i++) {
1420                 if (s->s[i].state < SECTOR_DIRTY)
1421                         continue;
1422
1423                 s->s[i].nr_replicas = w->opts.compression
1424                         ? 0 : nr_replicas_this_write;
1425
1426                 s->s[i].replicas_reserved = 0;
1427                 s->s[i].state = SECTOR_ALLOCATED;
1428         }
1429
1430         BUG_ON(atomic_read(&s->write_count));
1431         atomic_set(&s->write_count, 1);
1432
1433         BUG_ON(PageWriteback(page));
1434         set_page_writeback(page);
1435
1436         unlock_page(page);
1437
1438         offset = 0;
1439         while (1) {
1440                 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
1441                 u64 sector;
1442
1443                 while (offset < PAGE_SECTORS &&
1444                        orig.s[offset].state < SECTOR_DIRTY)
1445                         offset++;
1446
1447                 if (offset == PAGE_SECTORS)
1448                         break;
1449
1450                 while (offset + sectors < PAGE_SECTORS &&
1451                        orig.s[offset + sectors].state >= SECTOR_DIRTY) {
1452                         reserved_sectors += orig.s[offset + sectors].replicas_reserved;
1453                         dirty_sectors += orig.s[offset + sectors].state == SECTOR_DIRTY;
1454                         sectors++;
1455                 }
1456                 BUG_ON(!sectors);
1457
1458                 sector = ((u64) page->index << PAGE_SECTORS_SHIFT) + offset;
1459
1460                 if (w->io &&
1461                     (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1462                      bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
1463                      w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1464                      (BIO_MAX_VECS * PAGE_SIZE) ||
1465                      bio_end_sector(&w->io->op.wbio.bio) != sector))
1466                         bch2_writepage_do_io(w);
1467
1468                 if (!w->io)
1469                         bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1470                                                 nr_replicas_this_write);
1471
1472                 atomic_inc(&s->write_count);
1473
1474                 BUG_ON(inode != w->io->inode);
1475                 BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
1476                                      sectors << 9, offset << 9));
1477
1478                 /* Check for writing past i_size: */
1479                 WARN_ON_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1480                              round_up(i_size, block_bytes(c)));
1481
1482                 w->io->op.res.sectors += reserved_sectors;
1483                 w->io->op.i_sectors_delta -= dirty_sectors;
1484                 w->io->op.new_i_size = i_size;
1485
1486                 offset += sectors;
1487         }
1488
1489         if (atomic_dec_and_test(&s->write_count))
1490                 end_page_writeback(page);
1491
1492         return 0;
1493 }
1494
1495 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1496 {
1497         struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1498         struct bch_writepage_state w =
1499                 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1500         struct blk_plug plug;
1501         int ret;
1502
1503         blk_start_plug(&plug);
1504         ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1505         if (w.io)
1506                 bch2_writepage_do_io(&w);
1507         blk_finish_plug(&plug);
1508         return ret;
1509 }
1510
1511 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1512 {
1513         struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1514         struct bch_writepage_state w =
1515                 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1516         int ret;
1517
1518         ret = __bch2_writepage(page, wbc, &w);
1519         if (w.io)
1520                 bch2_writepage_do_io(&w);
1521
1522         return ret;
1523 }
1524
1525 /* buffered writes: */
1526
1527 int bch2_write_begin(struct file *file, struct address_space *mapping,
1528                      loff_t pos, unsigned len, unsigned flags,
1529                      struct page **pagep, void **fsdata)
1530 {
1531         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1532         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1533         struct bch2_page_reservation *res;
1534         pgoff_t index = pos >> PAGE_SHIFT;
1535         unsigned offset = pos & (PAGE_SIZE - 1);
1536         struct page *page;
1537         int ret = -ENOMEM;
1538
1539         res = kmalloc(sizeof(*res), GFP_KERNEL);
1540         if (!res)
1541                 return -ENOMEM;
1542
1543         bch2_page_reservation_init(c, inode, res);
1544         *fsdata = res;
1545
1546         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1547
1548         page = grab_cache_page_write_begin(mapping, index, flags);
1549         if (!page)
1550                 goto err_unlock;
1551
1552         if (PageUptodate(page))
1553                 goto out;
1554
1555         /* If we're writing entire page, don't need to read it in first: */
1556         if (len == PAGE_SIZE)
1557                 goto out;
1558
1559         if (!offset && pos + len >= inode->v.i_size) {
1560                 zero_user_segment(page, len, PAGE_SIZE);
1561                 flush_dcache_page(page);
1562                 goto out;
1563         }
1564
1565         if (index > inode->v.i_size >> PAGE_SHIFT) {
1566                 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1567                 flush_dcache_page(page);
1568                 goto out;
1569         }
1570 readpage:
1571         ret = bch2_read_single_page(page, mapping);
1572         if (ret)
1573                 goto err;
1574 out:
1575         if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
1576                 ret = bch2_page_state_set(c, inode_inum(inode), &page, 1);
1577                 if (ret)
1578                         goto out;
1579         }
1580
1581         ret = bch2_page_reservation_get(c, inode, page, res,
1582                                         offset, len, true);
1583         if (ret) {
1584                 if (!PageUptodate(page)) {
1585                         /*
1586                          * If the page hasn't been read in, we won't know if we
1587                          * actually need a reservation - we don't actually need
1588                          * to read here, we just need to check if the page is
1589                          * fully backed by uncompressed data:
1590                          */
1591                         goto readpage;
1592                 }
1593
1594                 goto err;
1595         }
1596
1597         *pagep = page;
1598         return 0;
1599 err:
1600         unlock_page(page);
1601         put_page(page);
1602         *pagep = NULL;
1603 err_unlock:
1604         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1605         kfree(res);
1606         *fsdata = NULL;
1607         return ret;
1608 }
1609
1610 int bch2_write_end(struct file *file, struct address_space *mapping,
1611                    loff_t pos, unsigned len, unsigned copied,
1612                    struct page *page, void *fsdata)
1613 {
1614         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1615         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1616         struct bch2_page_reservation *res = fsdata;
1617         unsigned offset = pos & (PAGE_SIZE - 1);
1618
1619         lockdep_assert_held(&inode->v.i_rwsem);
1620
1621         if (unlikely(copied < len && !PageUptodate(page))) {
1622                 /*
1623                  * The page needs to be read in, but that would destroy
1624                  * our partial write - simplest thing is to just force
1625                  * userspace to redo the write:
1626                  */
1627                 zero_user(page, 0, PAGE_SIZE);
1628                 flush_dcache_page(page);
1629                 copied = 0;
1630         }
1631
1632         spin_lock(&inode->v.i_lock);
1633         if (pos + copied > inode->v.i_size)
1634                 i_size_write(&inode->v, pos + copied);
1635         spin_unlock(&inode->v.i_lock);
1636
1637         if (copied) {
1638                 if (!PageUptodate(page))
1639                         SetPageUptodate(page);
1640
1641                 bch2_set_page_dirty(c, inode, page, res, offset, copied);
1642
1643                 inode->ei_last_dirtied = (unsigned long) current;
1644         }
1645
1646         unlock_page(page);
1647         put_page(page);
1648         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1649
1650         bch2_page_reservation_put(c, inode, res);
1651         kfree(res);
1652
1653         return copied;
1654 }
1655
1656 #define WRITE_BATCH_PAGES       32
1657
1658 static int __bch2_buffered_write(struct bch_inode_info *inode,
1659                                  struct address_space *mapping,
1660                                  struct iov_iter *iter,
1661                                  loff_t pos, unsigned len)
1662 {
1663         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1664         struct page *pages[WRITE_BATCH_PAGES];
1665         struct bch2_page_reservation res;
1666         unsigned long index = pos >> PAGE_SHIFT;
1667         unsigned offset = pos & (PAGE_SIZE - 1);
1668         unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1669         unsigned i, reserved = 0, set_dirty = 0;
1670         unsigned copied = 0, nr_pages_copied = 0;
1671         int ret = 0;
1672
1673         BUG_ON(!len);
1674         BUG_ON(nr_pages > ARRAY_SIZE(pages));
1675
1676         bch2_page_reservation_init(c, inode, &res);
1677
1678         for (i = 0; i < nr_pages; i++) {
1679                 pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
1680                 if (!pages[i]) {
1681                         nr_pages = i;
1682                         if (!i) {
1683                                 ret = -ENOMEM;
1684                                 goto out;
1685                         }
1686                         len = min_t(unsigned, len,
1687                                     nr_pages * PAGE_SIZE - offset);
1688                         break;
1689                 }
1690         }
1691
1692         if (offset && !PageUptodate(pages[0])) {
1693                 ret = bch2_read_single_page(pages[0], mapping);
1694                 if (ret)
1695                         goto out;
1696         }
1697
1698         if ((pos + len) & (PAGE_SIZE - 1) &&
1699             !PageUptodate(pages[nr_pages - 1])) {
1700                 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1701                         zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1702                 } else {
1703                         ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1704                         if (ret)
1705                                 goto out;
1706                 }
1707         }
1708
1709         while (reserved < len) {
1710                 unsigned i = (offset + reserved) >> PAGE_SHIFT;
1711                 struct page *page = pages[i];
1712                 unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
1713                 unsigned pg_len = min_t(unsigned, len - reserved,
1714                                         PAGE_SIZE - pg_offset);
1715
1716                 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
1717                         ret = bch2_page_state_set(c, inode_inum(inode),
1718                                                   pages + i, nr_pages - i);
1719                         if (ret)
1720                                 goto out;
1721                 }
1722
1723                 ret = bch2_page_reservation_get(c, inode, page, &res,
1724                                                 pg_offset, pg_len, true);
1725                 if (ret)
1726                         goto out;
1727
1728                 reserved += pg_len;
1729         }
1730
1731         if (mapping_writably_mapped(mapping))
1732                 for (i = 0; i < nr_pages; i++)
1733                         flush_dcache_page(pages[i]);
1734
1735         while (copied < len) {
1736                 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1737                 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1738                 unsigned pg_len = min_t(unsigned, len - copied,
1739                                         PAGE_SIZE - pg_offset);
1740                 unsigned pg_copied = copy_page_from_iter_atomic(page,
1741                                                 pg_offset, pg_len,iter);
1742
1743                 if (!pg_copied)
1744                         break;
1745
1746                 if (!PageUptodate(page) &&
1747                     pg_copied != PAGE_SIZE &&
1748                     pos + copied + pg_copied < inode->v.i_size) {
1749                         zero_user(page, 0, PAGE_SIZE);
1750                         break;
1751                 }
1752
1753                 flush_dcache_page(page);
1754                 copied += pg_copied;
1755
1756                 if (pg_copied != pg_len)
1757                         break;
1758         }
1759
1760         if (!copied)
1761                 goto out;
1762
1763         spin_lock(&inode->v.i_lock);
1764         if (pos + copied > inode->v.i_size)
1765                 i_size_write(&inode->v, pos + copied);
1766         spin_unlock(&inode->v.i_lock);
1767
1768         while (set_dirty < copied) {
1769                 struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
1770                 unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
1771                 unsigned pg_len = min_t(unsigned, copied - set_dirty,
1772                                         PAGE_SIZE - pg_offset);
1773
1774                 if (!PageUptodate(page))
1775                         SetPageUptodate(page);
1776
1777                 bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len);
1778                 unlock_page(page);
1779                 put_page(page);
1780
1781                 set_dirty += pg_len;
1782         }
1783
1784         nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1785         inode->ei_last_dirtied = (unsigned long) current;
1786 out:
1787         for (i = nr_pages_copied; i < nr_pages; i++) {
1788                 unlock_page(pages[i]);
1789                 put_page(pages[i]);
1790         }
1791
1792         bch2_page_reservation_put(c, inode, &res);
1793
1794         return copied ?: ret;
1795 }
1796
1797 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1798 {
1799         struct file *file = iocb->ki_filp;
1800         struct address_space *mapping = file->f_mapping;
1801         struct bch_inode_info *inode = file_bch_inode(file);
1802         loff_t pos = iocb->ki_pos;
1803         ssize_t written = 0;
1804         int ret = 0;
1805
1806         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1807
1808         do {
1809                 unsigned offset = pos & (PAGE_SIZE - 1);
1810                 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1811                               PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1812 again:
1813                 /*
1814                  * Bring in the user page that we will copy from _first_.
1815                  * Otherwise there's a nasty deadlock on copying from the
1816                  * same page as we're writing to, without it being marked
1817                  * up-to-date.
1818                  *
1819                  * Not only is this an optimisation, but it is also required
1820                  * to check that the address is actually valid, when atomic
1821                  * usercopies are used, below.
1822                  */
1823                 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1824                         bytes = min_t(unsigned long, iov_iter_count(iter),
1825                                       PAGE_SIZE - offset);
1826
1827                         if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1828                                 ret = -EFAULT;
1829                                 break;
1830                         }
1831                 }
1832
1833                 if (unlikely(fatal_signal_pending(current))) {
1834                         ret = -EINTR;
1835                         break;
1836                 }
1837
1838                 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1839                 if (unlikely(ret < 0))
1840                         break;
1841
1842                 cond_resched();
1843
1844                 if (unlikely(ret == 0)) {
1845                         /*
1846                          * If we were unable to copy any data at all, we must
1847                          * fall back to a single segment length write.
1848                          *
1849                          * If we didn't fallback here, we could livelock
1850                          * because not all segments in the iov can be copied at
1851                          * once without a pagefault.
1852                          */
1853                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
1854                                       iov_iter_single_seg_count(iter));
1855                         goto again;
1856                 }
1857                 pos += ret;
1858                 written += ret;
1859                 ret = 0;
1860
1861                 balance_dirty_pages_ratelimited(mapping);
1862         } while (iov_iter_count(iter));
1863
1864         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1865
1866         return written ? written : ret;
1867 }
1868
1869 /* O_DIRECT reads */
1870
1871 static void bio_check_or_release(struct bio *bio, bool check_dirty)
1872 {
1873         if (check_dirty) {
1874                 bio_check_pages_dirty(bio);
1875         } else {
1876                 bio_release_pages(bio, false);
1877                 bio_put(bio);
1878         }
1879 }
1880
1881 static void bch2_dio_read_complete(struct closure *cl)
1882 {
1883         struct dio_read *dio = container_of(cl, struct dio_read, cl);
1884
1885         dio->req->ki_complete(dio->req, dio->ret);
1886         bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
1887 }
1888
1889 static void bch2_direct_IO_read_endio(struct bio *bio)
1890 {
1891         struct dio_read *dio = bio->bi_private;
1892
1893         if (bio->bi_status)
1894                 dio->ret = blk_status_to_errno(bio->bi_status);
1895
1896         closure_put(&dio->cl);
1897 }
1898
1899 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1900 {
1901         struct dio_read *dio = bio->bi_private;
1902         bool should_dirty = dio->should_dirty;
1903
1904         bch2_direct_IO_read_endio(bio);
1905         bio_check_or_release(bio, should_dirty);
1906 }
1907
1908 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1909 {
1910         struct file *file = req->ki_filp;
1911         struct bch_inode_info *inode = file_bch_inode(file);
1912         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1913         struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1914         struct dio_read *dio;
1915         struct bio *bio;
1916         loff_t offset = req->ki_pos;
1917         bool sync = is_sync_kiocb(req);
1918         size_t shorten;
1919         ssize_t ret;
1920
1921         if ((offset|iter->count) & (block_bytes(c) - 1))
1922                 return -EINVAL;
1923
1924         ret = min_t(loff_t, iter->count,
1925                     max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1926
1927         if (!ret)
1928                 return ret;
1929
1930         shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1931         iter->count -= shorten;
1932
1933         bio = bio_alloc_bioset(GFP_KERNEL,
1934                                bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
1935                                &c->dio_read_bioset);
1936
1937         bio->bi_end_io = bch2_direct_IO_read_endio;
1938
1939         dio = container_of(bio, struct dio_read, rbio.bio);
1940         closure_init(&dio->cl, NULL);
1941
1942         /*
1943          * this is a _really_ horrible hack just to avoid an atomic sub at the
1944          * end:
1945          */
1946         if (!sync) {
1947                 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1948                 atomic_set(&dio->cl.remaining,
1949                            CLOSURE_REMAINING_INITIALIZER -
1950                            CLOSURE_RUNNING +
1951                            CLOSURE_DESTRUCTOR);
1952         } else {
1953                 atomic_set(&dio->cl.remaining,
1954                            CLOSURE_REMAINING_INITIALIZER + 1);
1955         }
1956
1957         dio->req        = req;
1958         dio->ret        = ret;
1959         /*
1960          * This is one of the sketchier things I've encountered: we have to skip
1961          * the dirtying of requests that are internal from the kernel (i.e. from
1962          * loopback), because we'll deadlock on page_lock.
1963          */
1964         dio->should_dirty = iter_is_iovec(iter);
1965
1966         goto start;
1967         while (iter->count) {
1968                 bio = bio_alloc_bioset(GFP_KERNEL,
1969                                        bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
1970                                        &c->bio_read);
1971                 bio->bi_end_io          = bch2_direct_IO_read_split_endio;
1972 start:
1973                 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1974                 bio->bi_iter.bi_sector  = offset >> 9;
1975                 bio->bi_private         = dio;
1976
1977                 ret = bio_iov_iter_get_pages(bio, iter);
1978                 if (ret < 0) {
1979                         /* XXX: fault inject this path */
1980                         bio->bi_status = BLK_STS_RESOURCE;
1981                         bio_endio(bio);
1982                         break;
1983                 }
1984
1985                 offset += bio->bi_iter.bi_size;
1986
1987                 if (dio->should_dirty)
1988                         bio_set_pages_dirty(bio);
1989
1990                 if (iter->count)
1991                         closure_get(&dio->cl);
1992
1993                 bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
1994         }
1995
1996         iter->count += shorten;
1997
1998         if (sync) {
1999                 closure_sync(&dio->cl);
2000                 closure_debug_destroy(&dio->cl);
2001                 ret = dio->ret;
2002                 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
2003                 return ret;
2004         } else {
2005                 return -EIOCBQUEUED;
2006         }
2007 }
2008
2009 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2010 {
2011         struct file *file = iocb->ki_filp;
2012         struct bch_inode_info *inode = file_bch_inode(file);
2013         struct address_space *mapping = file->f_mapping;
2014         size_t count = iov_iter_count(iter);
2015         ssize_t ret;
2016
2017         if (!count)
2018                 return 0; /* skip atime */
2019
2020         if (iocb->ki_flags & IOCB_DIRECT) {
2021                 struct blk_plug plug;
2022
2023                 ret = filemap_write_and_wait_range(mapping,
2024                                         iocb->ki_pos,
2025                                         iocb->ki_pos + count - 1);
2026                 if (ret < 0)
2027                         return ret;
2028
2029                 file_accessed(file);
2030
2031                 blk_start_plug(&plug);
2032                 ret = bch2_direct_IO_read(iocb, iter);
2033                 blk_finish_plug(&plug);
2034
2035                 if (ret >= 0)
2036                         iocb->ki_pos += ret;
2037         } else {
2038                 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
2039                 ret = generic_file_read_iter(iocb, iter);
2040                 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
2041         }
2042
2043         return ret;
2044 }
2045
2046 /* O_DIRECT writes */
2047
2048 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
2049                                        u64 offset, u64 size,
2050                                        unsigned nr_replicas, bool compressed)
2051 {
2052         struct btree_trans trans;
2053         struct btree_iter iter;
2054         struct bkey_s_c k;
2055         u64 end = offset + size;
2056         u32 snapshot;
2057         bool ret = true;
2058         int err;
2059
2060         bch2_trans_init(&trans, c, 0, 0);
2061 retry:
2062         bch2_trans_begin(&trans);
2063
2064         err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
2065         if (err)
2066                 goto err;
2067
2068         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
2069                            SPOS(inum.inum, offset, snapshot),
2070                            BTREE_ITER_SLOTS, k, err) {
2071                 if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0)
2072                         break;
2073
2074                 if (k.k->p.snapshot != snapshot ||
2075                     nr_replicas > bch2_bkey_replicas(c, k) ||
2076                     (!compressed && bch2_bkey_sectors_compressed(k))) {
2077                         ret = false;
2078                         break;
2079                 }
2080         }
2081
2082         offset = iter.pos.offset;
2083         bch2_trans_iter_exit(&trans, &iter);
2084 err:
2085         if (err == -EINTR)
2086                 goto retry;
2087         bch2_trans_exit(&trans);
2088
2089         return err ? false : ret;
2090 }
2091
2092 static void bch2_dio_write_loop_async(struct bch_write_op *);
2093
2094 static long bch2_dio_write_loop(struct dio_write *dio)
2095 {
2096         bool kthread = (current->flags & PF_KTHREAD) != 0;
2097         struct kiocb *req = dio->req;
2098         struct address_space *mapping = req->ki_filp->f_mapping;
2099         struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
2100         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2101         struct bio *bio = &dio->op.wbio.bio;
2102         struct bvec_iter_all iter;
2103         struct bio_vec *bv;
2104         unsigned unaligned, iter_count;
2105         bool sync = dio->sync, dropped_locks;
2106         long ret;
2107
2108         if (dio->loop)
2109                 goto loop;
2110
2111         down(&c->io_in_flight);
2112
2113         while (1) {
2114                 iter_count = dio->iter.count;
2115
2116                 if (kthread && dio->mm)
2117                         kthread_use_mm(dio->mm);
2118                 BUG_ON(current->faults_disabled_mapping);
2119                 current->faults_disabled_mapping = mapping;
2120
2121                 ret = bio_iov_iter_get_pages(bio, &dio->iter);
2122
2123                 dropped_locks = fdm_dropped_locks();
2124
2125                 current->faults_disabled_mapping = NULL;
2126                 if (kthread && dio->mm)
2127                         kthread_unuse_mm(dio->mm);
2128
2129                 /*
2130                  * If the fault handler returned an error but also signalled
2131                  * that it dropped & retook ei_pagecache_lock, we just need to
2132                  * re-shoot down the page cache and retry:
2133                  */
2134                 if (dropped_locks && ret)
2135                         ret = 0;
2136
2137                 if (unlikely(ret < 0))
2138                         goto err;
2139
2140                 if (unlikely(dropped_locks)) {
2141                         ret = write_invalidate_inode_pages_range(mapping,
2142                                         req->ki_pos,
2143                                         req->ki_pos + iter_count - 1);
2144                         if (unlikely(ret))
2145                                 goto err;
2146
2147                         if (!bio->bi_iter.bi_size)
2148                                 continue;
2149                 }
2150
2151                 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
2152                 bio->bi_iter.bi_size -= unaligned;
2153                 iov_iter_revert(&dio->iter, unaligned);
2154
2155                 if (!bio->bi_iter.bi_size) {
2156                         /*
2157                          * bio_iov_iter_get_pages was only able to get <
2158                          * blocksize worth of pages:
2159                          */
2160                         ret = -EFAULT;
2161                         goto err;
2162                 }
2163
2164                 bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
2165                 dio->op.end_io          = bch2_dio_write_loop_async;
2166                 dio->op.target          = dio->op.opts.foreground_target;
2167                 dio->op.write_point     = writepoint_hashed((unsigned long) current);
2168                 dio->op.nr_replicas     = dio->op.opts.data_replicas;
2169                 dio->op.subvol          = inode->ei_subvol;
2170                 dio->op.pos             = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
2171
2172                 if ((req->ki_flags & IOCB_DSYNC) &&
2173                     !c->opts.journal_flush_disabled)
2174                         dio->op.flags |= BCH_WRITE_FLUSH;
2175                 dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
2176
2177                 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
2178                                                 dio->op.opts.data_replicas, 0);
2179                 if (unlikely(ret) &&
2180                     !bch2_check_range_allocated(c, inode_inum(inode),
2181                                 dio->op.pos.offset, bio_sectors(bio),
2182                                 dio->op.opts.data_replicas,
2183                                 dio->op.opts.compression != 0))
2184                         goto err;
2185
2186                 task_io_account_write(bio->bi_iter.bi_size);
2187
2188                 if (!dio->sync && !dio->loop && dio->iter.count) {
2189                         struct iovec *iov = dio->inline_vecs;
2190
2191                         if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
2192                                 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
2193                                               GFP_KERNEL);
2194                                 if (unlikely(!iov)) {
2195                                         dio->sync = sync = true;
2196                                         goto do_io;
2197                                 }
2198
2199                                 dio->free_iov = true;
2200                         }
2201
2202                         memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
2203                         dio->iter.iov = iov;
2204                 }
2205 do_io:
2206                 dio->loop = true;
2207                 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
2208
2209                 if (sync)
2210                         wait_for_completion(&dio->done);
2211                 else
2212                         return -EIOCBQUEUED;
2213 loop:
2214                 i_sectors_acct(c, inode, &dio->quota_res,
2215                                dio->op.i_sectors_delta);
2216                 req->ki_pos += (u64) dio->op.written << 9;
2217                 dio->written += dio->op.written;
2218
2219                 spin_lock(&inode->v.i_lock);
2220                 if (req->ki_pos > inode->v.i_size)
2221                         i_size_write(&inode->v, req->ki_pos);
2222                 spin_unlock(&inode->v.i_lock);
2223
2224                 if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
2225                         bio_for_each_segment_all(bv, bio, iter)
2226                                 put_page(bv->bv_page);
2227                 bio->bi_vcnt = 0;
2228
2229                 if (dio->op.error) {
2230                         set_bit(EI_INODE_ERROR, &inode->ei_flags);
2231                         break;
2232                 }
2233
2234                 if (!dio->iter.count)
2235                         break;
2236
2237                 bio_reset(bio);
2238                 reinit_completion(&dio->done);
2239         }
2240
2241         ret = dio->op.error ?: ((long) dio->written << 9);
2242 err:
2243         up(&c->io_in_flight);
2244         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2245         bch2_quota_reservation_put(c, inode, &dio->quota_res);
2246
2247         if (dio->free_iov)
2248                 kfree(dio->iter.iov);
2249
2250         if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
2251                 bio_for_each_segment_all(bv, bio, iter)
2252                         put_page(bv->bv_page);
2253         bio_put(bio);
2254
2255         /* inode->i_dio_count is our ref on inode and thus bch_fs */
2256         inode_dio_end(&inode->v);
2257
2258         if (!sync) {
2259                 req->ki_complete(req, ret);
2260                 ret = -EIOCBQUEUED;
2261         }
2262         return ret;
2263 }
2264
2265 static void bch2_dio_write_loop_async(struct bch_write_op *op)
2266 {
2267         struct dio_write *dio = container_of(op, struct dio_write, op);
2268
2269         if (dio->sync)
2270                 complete(&dio->done);
2271         else
2272                 bch2_dio_write_loop(dio);
2273 }
2274
2275 static noinline
2276 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
2277 {
2278         struct file *file = req->ki_filp;
2279         struct address_space *mapping = file->f_mapping;
2280         struct bch_inode_info *inode = file_bch_inode(file);
2281         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2282         struct dio_write *dio;
2283         struct bio *bio;
2284         bool locked = true, extending;
2285         ssize_t ret;
2286
2287         prefetch(&c->opts);
2288         prefetch((void *) &c->opts + 64);
2289         prefetch(&inode->ei_inode);
2290         prefetch((void *) &inode->ei_inode + 64);
2291
2292         inode_lock(&inode->v);
2293
2294         ret = generic_write_checks(req, iter);
2295         if (unlikely(ret <= 0))
2296                 goto err;
2297
2298         ret = file_remove_privs(file);
2299         if (unlikely(ret))
2300                 goto err;
2301
2302         ret = file_update_time(file);
2303         if (unlikely(ret))
2304                 goto err;
2305
2306         if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
2307                 goto err;
2308
2309         inode_dio_begin(&inode->v);
2310         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2311
2312         extending = req->ki_pos + iter->count > inode->v.i_size;
2313         if (!extending) {
2314                 inode_unlock(&inode->v);
2315                 locked = false;
2316         }
2317
2318         bio = bio_alloc_bioset(GFP_KERNEL,
2319                                bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2320                                &c->dio_write_bioset);
2321         dio = container_of(bio, struct dio_write, op.wbio.bio);
2322         init_completion(&dio->done);
2323         dio->req                = req;
2324         dio->mm                 = current->mm;
2325         dio->loop               = false;
2326         dio->sync               = is_sync_kiocb(req) || extending;
2327         dio->free_iov           = false;
2328         dio->quota_res.sectors  = 0;
2329         dio->written            = 0;
2330         dio->iter               = *iter;
2331
2332         ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2333                                          iter->count >> 9, true);
2334         if (unlikely(ret))
2335                 goto err_put_bio;
2336
2337         ret = write_invalidate_inode_pages_range(mapping,
2338                                         req->ki_pos,
2339                                         req->ki_pos + iter->count - 1);
2340         if (unlikely(ret))
2341                 goto err_put_bio;
2342
2343         ret = bch2_dio_write_loop(dio);
2344 err:
2345         if (locked)
2346                 inode_unlock(&inode->v);
2347         return ret;
2348 err_put_bio:
2349         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2350         bch2_quota_reservation_put(c, inode, &dio->quota_res);
2351         bio_put(bio);
2352         inode_dio_end(&inode->v);
2353         goto err;
2354 }
2355
2356 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2357 {
2358         struct file *file = iocb->ki_filp;
2359         struct bch_inode_info *inode = file_bch_inode(file);
2360         ssize_t ret;
2361
2362         if (iocb->ki_flags & IOCB_DIRECT)
2363                 return bch2_direct_write(iocb, from);
2364
2365         /* We can write back this queue in page reclaim */
2366         current->backing_dev_info = inode_to_bdi(&inode->v);
2367         inode_lock(&inode->v);
2368
2369         ret = generic_write_checks(iocb, from);
2370         if (ret <= 0)
2371                 goto unlock;
2372
2373         ret = file_remove_privs(file);
2374         if (ret)
2375                 goto unlock;
2376
2377         ret = file_update_time(file);
2378         if (ret)
2379                 goto unlock;
2380
2381         ret = bch2_buffered_write(iocb, from);
2382         if (likely(ret > 0))
2383                 iocb->ki_pos += ret;
2384 unlock:
2385         inode_unlock(&inode->v);
2386         current->backing_dev_info = NULL;
2387
2388         if (ret > 0)
2389                 ret = generic_write_sync(iocb, ret);
2390
2391         return ret;
2392 }
2393
2394 /* fsync: */
2395
2396 /*
2397  * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
2398  * insert trigger: look up the btree inode instead
2399  */
2400 static int bch2_flush_inode(struct bch_fs *c, subvol_inum inum)
2401 {
2402         struct bch_inode_unpacked inode;
2403         int ret;
2404
2405         if (c->opts.journal_flush_disabled)
2406                 return 0;
2407
2408         ret = bch2_inode_find_by_inum(c, inum, &inode);
2409         if (ret)
2410                 return ret;
2411
2412         return bch2_journal_flush_seq(&c->journal, inode.bi_journal_seq);
2413 }
2414
2415 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2416 {
2417         struct bch_inode_info *inode = file_bch_inode(file);
2418         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2419         int ret, ret2, ret3;
2420
2421         ret = file_write_and_wait_range(file, start, end);
2422         ret2 = sync_inode_metadata(&inode->v, 1);
2423         ret3 = bch2_flush_inode(c, inode_inum(inode));
2424
2425         return ret ?: ret2 ?: ret3;
2426 }
2427
2428 /* truncate: */
2429
2430 static inline int range_has_data(struct bch_fs *c, u32 subvol,
2431                                  struct bpos start,
2432                                  struct bpos end)
2433 {
2434         struct btree_trans trans;
2435         struct btree_iter iter;
2436         struct bkey_s_c k;
2437         int ret = 0;
2438
2439         bch2_trans_init(&trans, c, 0, 0);
2440 retry:
2441         bch2_trans_begin(&trans);
2442
2443         ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
2444         if (ret)
2445                 goto err;
2446
2447         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
2448                 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2449                         break;
2450
2451                 if (bkey_extent_is_data(k.k)) {
2452                         ret = 1;
2453                         break;
2454                 }
2455         }
2456         start = iter.pos;
2457         bch2_trans_iter_exit(&trans, &iter);
2458 err:
2459         if (ret == -EINTR)
2460                 goto retry;
2461
2462         bch2_trans_exit(&trans);
2463         return ret;
2464 }
2465
2466 static int __bch2_truncate_page(struct bch_inode_info *inode,
2467                                 pgoff_t index, loff_t start, loff_t end)
2468 {
2469         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2470         struct address_space *mapping = inode->v.i_mapping;
2471         struct bch_page_state *s;
2472         unsigned start_offset = start & (PAGE_SIZE - 1);
2473         unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2474         unsigned i;
2475         struct page *page;
2476         s64 i_sectors_delta = 0;
2477         int ret = 0;
2478
2479         /* Page boundary? Nothing to do */
2480         if (!((index == start >> PAGE_SHIFT && start_offset) ||
2481               (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2482                 return 0;
2483
2484         /* Above i_size? */
2485         if (index << PAGE_SHIFT >= inode->v.i_size)
2486                 return 0;
2487
2488         page = find_lock_page(mapping, index);
2489         if (!page) {
2490                 /*
2491                  * XXX: we're doing two index lookups when we end up reading the
2492                  * page
2493                  */
2494                 ret = range_has_data(c, inode->ei_subvol,
2495                                 POS(inode->v.i_ino, index << PAGE_SECTORS_SHIFT),
2496                                 POS(inode->v.i_ino, (index + 1) << PAGE_SECTORS_SHIFT));
2497                 if (ret <= 0)
2498                         return ret;
2499
2500                 page = find_or_create_page(mapping, index, GFP_KERNEL);
2501                 if (unlikely(!page)) {
2502                         ret = -ENOMEM;
2503                         goto out;
2504                 }
2505         }
2506
2507         s = bch2_page_state_create(page, 0);
2508         if (!s) {
2509                 ret = -ENOMEM;
2510                 goto unlock;
2511         }
2512
2513         if (!PageUptodate(page)) {
2514                 ret = bch2_read_single_page(page, mapping);
2515                 if (ret)
2516                         goto unlock;
2517         }
2518
2519         if (index != start >> PAGE_SHIFT)
2520                 start_offset = 0;
2521         if (index != end >> PAGE_SHIFT)
2522                 end_offset = PAGE_SIZE;
2523
2524         for (i = round_up(start_offset, block_bytes(c)) >> 9;
2525              i < round_down(end_offset, block_bytes(c)) >> 9;
2526              i++) {
2527                 s->s[i].nr_replicas     = 0;
2528                 if (s->s[i].state == SECTOR_DIRTY)
2529                         i_sectors_delta--;
2530                 s->s[i].state           = SECTOR_UNALLOCATED;
2531         }
2532
2533         i_sectors_acct(c, inode, NULL, i_sectors_delta);
2534
2535         /*
2536          * Caller needs to know whether this page will be written out by
2537          * writeback - doing an i_size update if necessary - or whether it will
2538          * be responsible for the i_size update:
2539          */
2540         ret = s->s[(min_t(u64, inode->v.i_size - (index << PAGE_SHIFT),
2541                           PAGE_SIZE) - 1) >> 9].state >= SECTOR_DIRTY;
2542
2543         zero_user_segment(page, start_offset, end_offset);
2544
2545         /*
2546          * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2547          *
2548          * XXX: because we aren't currently tracking whether the page has actual
2549          * data in it (vs. just 0s, or only partially written) this wrong. ick.
2550          */
2551         BUG_ON(bch2_get_page_disk_reservation(c, inode, page, false));
2552
2553         /*
2554          * This removes any writeable userspace mappings; we need to force
2555          * .page_mkwrite to be called again before any mmapped writes, to
2556          * redirty the full page:
2557          */
2558         page_mkclean(page);
2559         __set_page_dirty_nobuffers(page);
2560 unlock:
2561         unlock_page(page);
2562         put_page(page);
2563 out:
2564         return ret;
2565 }
2566
2567 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2568 {
2569         return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2570                                     from, round_up(from, PAGE_SIZE));
2571 }
2572
2573 static int bch2_truncate_pages(struct bch_inode_info *inode,
2574                                loff_t start, loff_t end)
2575 {
2576         int ret = __bch2_truncate_page(inode, start >> PAGE_SHIFT,
2577                                        start, end);
2578
2579         if (ret >= 0 &&
2580             start >> PAGE_SHIFT != end >> PAGE_SHIFT)
2581                 ret = __bch2_truncate_page(inode,
2582                                            end >> PAGE_SHIFT,
2583                                            start, end);
2584         return ret;
2585 }
2586
2587 static int bch2_extend(struct user_namespace *mnt_userns,
2588                        struct bch_inode_info *inode,
2589                        struct bch_inode_unpacked *inode_u,
2590                        struct iattr *iattr)
2591 {
2592         struct address_space *mapping = inode->v.i_mapping;
2593         int ret;
2594
2595         /*
2596          * sync appends:
2597          *
2598          * this has to be done _before_ extending i_size:
2599          */
2600         ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2601         if (ret)
2602                 return ret;
2603
2604         truncate_setsize(&inode->v, iattr->ia_size);
2605
2606         return bch2_setattr_nonsize(mnt_userns, inode, iattr);
2607 }
2608
2609 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2610                                    struct bch_inode_unpacked *bi,
2611                                    void *p)
2612 {
2613         bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2614         return 0;
2615 }
2616
2617 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2618                                   struct bch_inode_unpacked *bi, void *p)
2619 {
2620         u64 *new_i_size = p;
2621
2622         bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2623         bi->bi_size = *new_i_size;
2624         return 0;
2625 }
2626
2627 int bch2_truncate(struct user_namespace *mnt_userns,
2628                   struct bch_inode_info *inode, struct iattr *iattr)
2629 {
2630         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2631         struct address_space *mapping = inode->v.i_mapping;
2632         struct bch_inode_unpacked inode_u;
2633         u64 new_i_size = iattr->ia_size;
2634         s64 i_sectors_delta = 0;
2635         int ret = 0;
2636
2637         /*
2638          * If the truncate call with change the size of the file, the
2639          * cmtimes should be updated. If the size will not change, we
2640          * do not need to update the cmtimes.
2641          */
2642         if (iattr->ia_size != inode->v.i_size) {
2643                 if (!(iattr->ia_valid & ATTR_MTIME))
2644                         ktime_get_coarse_real_ts64(&iattr->ia_mtime);
2645                 if (!(iattr->ia_valid & ATTR_CTIME))
2646                         ktime_get_coarse_real_ts64(&iattr->ia_ctime);
2647                 iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
2648         }
2649
2650         inode_dio_wait(&inode->v);
2651         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2652
2653         ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
2654         if (ret)
2655                 goto err;
2656
2657         /*
2658          * check this before next assertion; on filesystem error our normal
2659          * invariants are a bit broken (truncate has to truncate the page cache
2660          * before the inode).
2661          */
2662         ret = bch2_journal_error(&c->journal);
2663         if (ret)
2664                 goto err;
2665
2666         WARN_ON(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
2667                 inode->v.i_size < inode_u.bi_size);
2668
2669         if (iattr->ia_size > inode->v.i_size) {
2670                 ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
2671                 goto err;
2672         }
2673
2674         iattr->ia_valid &= ~ATTR_SIZE;
2675
2676         ret = bch2_truncate_page(inode, iattr->ia_size);
2677         if (unlikely(ret < 0))
2678                 goto err;
2679
2680         /*
2681          * When extending, we're going to write the new i_size to disk
2682          * immediately so we need to flush anything above the current on disk
2683          * i_size first:
2684          *
2685          * Also, when extending we need to flush the page that i_size currently
2686          * straddles - if it's mapped to userspace, we need to ensure that
2687          * userspace has to redirty it and call .mkwrite -> set_page_dirty
2688          * again to allocate the part of the page that was extended.
2689          */
2690         if (iattr->ia_size > inode_u.bi_size)
2691                 ret = filemap_write_and_wait_range(mapping,
2692                                 inode_u.bi_size,
2693                                 iattr->ia_size - 1);
2694         else if (iattr->ia_size & (PAGE_SIZE - 1))
2695                 ret = filemap_write_and_wait_range(mapping,
2696                                 round_down(iattr->ia_size, PAGE_SIZE),
2697                                 iattr->ia_size - 1);
2698         if (ret)
2699                 goto err;
2700
2701         mutex_lock(&inode->ei_update_lock);
2702         ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2703                                &new_i_size, 0);
2704         mutex_unlock(&inode->ei_update_lock);
2705
2706         if (unlikely(ret))
2707                 goto err;
2708
2709         truncate_setsize(&inode->v, iattr->ia_size);
2710
2711         ret = bch2_fpunch(c, inode_inum(inode),
2712                         round_up(iattr->ia_size, block_bytes(c)) >> 9,
2713                         U64_MAX, &i_sectors_delta);
2714         i_sectors_acct(c, inode, NULL, i_sectors_delta);
2715
2716         bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
2717                                 !bch2_journal_error(&c->journal), c,
2718                                 "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
2719                                 inode->v.i_ino, (u64) inode->v.i_blocks,
2720                                 inode->ei_inode.bi_sectors);
2721         if (unlikely(ret))
2722                 goto err;
2723
2724         mutex_lock(&inode->ei_update_lock);
2725         ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
2726         mutex_unlock(&inode->ei_update_lock);
2727
2728         ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
2729 err:
2730         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2731         return ret;
2732 }
2733
2734 /* fallocate: */
2735
2736 static int inode_update_times_fn(struct bch_inode_info *inode,
2737                                  struct bch_inode_unpacked *bi, void *p)
2738 {
2739         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2740
2741         bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2742         return 0;
2743 }
2744
2745 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2746 {
2747         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2748         u64 end         = offset + len;
2749         u64 block_start = round_up(offset, block_bytes(c));
2750         u64 block_end   = round_down(end, block_bytes(c));
2751         bool truncated_last_page;
2752         int ret = 0;
2753
2754         ret = bch2_truncate_pages(inode, offset, end);
2755         if (unlikely(ret < 0))
2756                 goto err;
2757
2758         truncated_last_page = ret;
2759
2760         truncate_pagecache_range(&inode->v, offset, end - 1);
2761
2762         if (block_start < block_end ) {
2763                 s64 i_sectors_delta = 0;
2764
2765                 ret = bch2_fpunch(c, inode_inum(inode),
2766                                   block_start >> 9, block_end >> 9,
2767                                   &i_sectors_delta);
2768                 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2769         }
2770
2771         mutex_lock(&inode->ei_update_lock);
2772         if (end >= inode->v.i_size && !truncated_last_page) {
2773                 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2774                                             ATTR_MTIME|ATTR_CTIME);
2775         } else {
2776                 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
2777                                        ATTR_MTIME|ATTR_CTIME);
2778         }
2779         mutex_unlock(&inode->ei_update_lock);
2780 err:
2781         return ret;
2782 }
2783
2784 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
2785                                    loff_t offset, loff_t len,
2786                                    bool insert)
2787 {
2788         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2789         struct address_space *mapping = inode->v.i_mapping;
2790         struct bkey_buf copy;
2791         struct btree_trans trans;
2792         struct btree_iter src, dst, del;
2793         loff_t shift, new_size;
2794         u64 src_start;
2795         int ret = 0;
2796
2797         if ((offset | len) & (block_bytes(c) - 1))
2798                 return -EINVAL;
2799
2800         if (insert) {
2801                 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
2802                         return -EFBIG;
2803
2804                 if (offset >= inode->v.i_size)
2805                         return -EINVAL;
2806
2807                 src_start       = U64_MAX;
2808                 shift           = len;
2809         } else {
2810                 if (offset + len >= inode->v.i_size)
2811                         return -EINVAL;
2812
2813                 src_start       = offset + len;
2814                 shift           = -len;
2815         }
2816
2817         new_size = inode->v.i_size + shift;
2818
2819         ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2820         if (ret)
2821                 return ret;
2822
2823         if (insert) {
2824                 i_size_write(&inode->v, new_size);
2825                 mutex_lock(&inode->ei_update_lock);
2826                 ret = bch2_write_inode_size(c, inode, new_size,
2827                                             ATTR_MTIME|ATTR_CTIME);
2828                 mutex_unlock(&inode->ei_update_lock);
2829         } else {
2830                 s64 i_sectors_delta = 0;
2831
2832                 ret = bch2_fpunch(c, inode_inum(inode),
2833                                   offset >> 9, (offset + len) >> 9,
2834                                   &i_sectors_delta);
2835                 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2836
2837                 if (ret)
2838                         return ret;
2839         }
2840
2841         bch2_bkey_buf_init(&copy);
2842         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
2843         bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
2844                         POS(inode->v.i_ino, src_start >> 9),
2845                         BTREE_ITER_INTENT);
2846         bch2_trans_copy_iter(&dst, &src);
2847         bch2_trans_copy_iter(&del, &src);
2848
2849         while (ret == 0 || ret == -EINTR) {
2850                 struct disk_reservation disk_res =
2851                         bch2_disk_reservation_init(c, 0);
2852                 struct bkey_i delete;
2853                 struct bkey_s_c k;
2854                 struct bpos next_pos;
2855                 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
2856                 struct bpos atomic_end;
2857                 unsigned trigger_flags = 0;
2858                 u32 snapshot;
2859
2860                 bch2_trans_begin(&trans);
2861
2862                 ret = bch2_subvolume_get_snapshot(&trans,
2863                                         inode->ei_subvol, &snapshot);
2864                 if (ret)
2865                         continue;
2866
2867                 bch2_btree_iter_set_snapshot(&src, snapshot);
2868                 bch2_btree_iter_set_snapshot(&dst, snapshot);
2869                 bch2_btree_iter_set_snapshot(&del, snapshot);
2870
2871                 bch2_trans_begin(&trans);
2872
2873                 k = insert
2874                         ? bch2_btree_iter_peek_prev(&src)
2875                         : bch2_btree_iter_peek(&src);
2876                 if ((ret = bkey_err(k)))
2877                         continue;
2878
2879                 if (!k.k || k.k->p.inode != inode->v.i_ino)
2880                         break;
2881
2882                 if (insert &&
2883                     bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
2884                         break;
2885 reassemble:
2886                 bch2_bkey_buf_reassemble(&copy, c, k);
2887
2888                 if (insert &&
2889                     bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
2890                         bch2_cut_front(move_pos, copy.k);
2891
2892                 copy.k->k.p.offset += shift >> 9;
2893                 bch2_btree_iter_set_pos(&dst, bkey_start_pos(&copy.k->k));
2894
2895                 ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
2896                 if (ret)
2897                         continue;
2898
2899                 if (bkey_cmp(atomic_end, copy.k->k.p)) {
2900                         if (insert) {
2901                                 move_pos = atomic_end;
2902                                 move_pos.offset -= shift >> 9;
2903                                 goto reassemble;
2904                         } else {
2905                                 bch2_cut_back(atomic_end, copy.k);
2906                         }
2907                 }
2908
2909                 bkey_init(&delete.k);
2910                 delete.k.p = copy.k->k.p;
2911                 delete.k.size = copy.k->k.size;
2912                 delete.k.p.offset -= shift >> 9;
2913                 bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
2914
2915                 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
2916
2917                 if (copy.k->k.size != k.k->size) {
2918                         /* We might end up splitting compressed extents: */
2919                         unsigned nr_ptrs =
2920                                 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
2921
2922                         ret = bch2_disk_reservation_get(c, &disk_res,
2923                                         copy.k->k.size, nr_ptrs,
2924                                         BCH_DISK_RESERVATION_NOFAIL);
2925                         BUG_ON(ret);
2926                 }
2927
2928                 ret =   bch2_btree_iter_traverse(&del) ?:
2929                         bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
2930                         bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
2931                         bch2_trans_commit(&trans, &disk_res, NULL,
2932                                           BTREE_INSERT_NOFAIL);
2933                 bch2_disk_reservation_put(c, &disk_res);
2934
2935                 if (!ret)
2936                         bch2_btree_iter_set_pos(&src, next_pos);
2937         }
2938         bch2_trans_iter_exit(&trans, &del);
2939         bch2_trans_iter_exit(&trans, &dst);
2940         bch2_trans_iter_exit(&trans, &src);
2941         bch2_trans_exit(&trans);
2942         bch2_bkey_buf_exit(&copy, c);
2943
2944         if (ret)
2945                 return ret;
2946
2947         mutex_lock(&inode->ei_update_lock);
2948         if (!insert) {
2949                 i_size_write(&inode->v, new_size);
2950                 ret = bch2_write_inode_size(c, inode, new_size,
2951                                             ATTR_MTIME|ATTR_CTIME);
2952         } else {
2953                 /* We need an inode update to update bi_journal_seq for fsync: */
2954                 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
2955                                        ATTR_MTIME|ATTR_CTIME);
2956         }
2957         mutex_unlock(&inode->ei_update_lock);
2958         return ret;
2959 }
2960
2961 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
2962                              u64 start_sector, u64 end_sector)
2963 {
2964         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2965         struct btree_trans trans;
2966         struct btree_iter iter;
2967         struct bpos end_pos = POS(inode->v.i_ino, end_sector);
2968         unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
2969         int ret = 0;
2970
2971         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
2972
2973         bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
2974                         POS(inode->v.i_ino, start_sector),
2975                         BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2976
2977         while (!ret && bkey_cmp(iter.pos, end_pos) < 0) {
2978                 s64 i_sectors_delta = 0;
2979                 struct disk_reservation disk_res = { 0 };
2980                 struct quota_res quota_res = { 0 };
2981                 struct bkey_i_reservation reservation;
2982                 struct bkey_s_c k;
2983                 unsigned sectors;
2984                 u32 snapshot;
2985
2986                 bch2_trans_begin(&trans);
2987
2988                 ret = bch2_subvolume_get_snapshot(&trans,
2989                                         inode->ei_subvol, &snapshot);
2990                 if (ret)
2991                         goto bkey_err;
2992
2993                 bch2_btree_iter_set_snapshot(&iter, snapshot);
2994
2995                 k = bch2_btree_iter_peek_slot(&iter);
2996                 if ((ret = bkey_err(k)))
2997                         goto bkey_err;
2998
2999                 /* already reserved */
3000                 if (k.k->type == KEY_TYPE_reservation &&
3001                     bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
3002                         bch2_btree_iter_advance(&iter);
3003                         continue;
3004                 }
3005
3006                 if (bkey_extent_is_data(k.k) &&
3007                     !(mode & FALLOC_FL_ZERO_RANGE)) {
3008                         bch2_btree_iter_advance(&iter);
3009                         continue;
3010                 }
3011
3012                 bkey_reservation_init(&reservation.k_i);
3013                 reservation.k.type      = KEY_TYPE_reservation;
3014                 reservation.k.p         = k.k->p;
3015                 reservation.k.size      = k.k->size;
3016
3017                 bch2_cut_front(iter.pos,        &reservation.k_i);
3018                 bch2_cut_back(end_pos,          &reservation.k_i);
3019
3020                 sectors = reservation.k.size;
3021                 reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
3022
3023                 if (!bkey_extent_is_allocation(k.k)) {
3024                         ret = bch2_quota_reservation_add(c, inode,
3025                                         &quota_res,
3026                                         sectors, true);
3027                         if (unlikely(ret))
3028                                 goto bkey_err;
3029                 }
3030
3031                 if (reservation.v.nr_replicas < replicas ||
3032                     bch2_bkey_sectors_compressed(k)) {
3033                         ret = bch2_disk_reservation_get(c, &disk_res, sectors,
3034                                                         replicas, 0);
3035                         if (unlikely(ret))
3036                                 goto bkey_err;
3037
3038                         reservation.v.nr_replicas = disk_res.nr_replicas;
3039                 }
3040
3041                 ret = bch2_extent_update(&trans, inode_inum(inode), &iter,
3042                                          &reservation.k_i,
3043                                 &disk_res, NULL,
3044                                 0, &i_sectors_delta, true);
3045                 if (ret)
3046                         goto bkey_err;
3047                 i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
3048 bkey_err:
3049                 bch2_quota_reservation_put(c, inode, &quota_res);
3050                 bch2_disk_reservation_put(c, &disk_res);
3051                 if (ret == -EINTR)
3052                         ret = 0;
3053         }
3054
3055         bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
3056         mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
3057
3058         if (ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)) {
3059                 struct quota_res quota_res = { 0 };
3060                 s64 i_sectors_delta = 0;
3061
3062                 bch2_fpunch_at(&trans, &iter, inode_inum(inode),
3063                                end_sector, &i_sectors_delta);
3064                 i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
3065                 bch2_quota_reservation_put(c, inode, &quota_res);
3066         }
3067
3068         bch2_trans_iter_exit(&trans, &iter);
3069         bch2_trans_exit(&trans);
3070         return ret;
3071 }
3072
3073 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
3074                             loff_t offset, loff_t len)
3075 {
3076         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3077         u64 end         = offset + len;
3078         u64 block_start = round_down(offset,    block_bytes(c));
3079         u64 block_end   = round_up(end,         block_bytes(c));
3080         bool truncated_last_page = false;
3081         int ret, ret2 = 0;
3082
3083         if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
3084                 ret = inode_newsize_ok(&inode->v, end);
3085                 if (ret)
3086                         return ret;
3087         }
3088
3089         if (mode & FALLOC_FL_ZERO_RANGE) {
3090                 ret = bch2_truncate_pages(inode, offset, end);
3091                 if (unlikely(ret < 0))
3092                         return ret;
3093
3094                 truncated_last_page = ret;
3095
3096                 truncate_pagecache_range(&inode->v, offset, end - 1);
3097
3098                 block_start     = round_up(offset,      block_bytes(c));
3099                 block_end       = round_down(end,       block_bytes(c));
3100         }
3101
3102         ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
3103
3104         /*
3105          * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
3106          * so that the VFS cache i_size is consistent with the btree i_size:
3107          */
3108         if (ret &&
3109             !(ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)))
3110                 return ret;
3111
3112         if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
3113                 end = inode->v.i_size;
3114
3115         if (end >= inode->v.i_size &&
3116             (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
3117              !(mode & FALLOC_FL_KEEP_SIZE))) {
3118                 spin_lock(&inode->v.i_lock);
3119                 i_size_write(&inode->v, end);
3120                 spin_unlock(&inode->v.i_lock);
3121
3122                 mutex_lock(&inode->ei_update_lock);
3123                 ret2 = bch2_write_inode_size(c, inode, end, 0);
3124                 mutex_unlock(&inode->ei_update_lock);
3125         }
3126
3127         return ret ?: ret2;
3128 }
3129
3130 long bch2_fallocate_dispatch(struct file *file, int mode,
3131                              loff_t offset, loff_t len)
3132 {
3133         struct bch_inode_info *inode = file_bch_inode(file);
3134         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3135         long ret;
3136
3137         if (!percpu_ref_tryget(&c->writes))
3138                 return -EROFS;
3139
3140         inode_lock(&inode->v);
3141         inode_dio_wait(&inode->v);
3142         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
3143
3144         if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
3145                 ret = bchfs_fallocate(inode, mode, offset, len);
3146         else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
3147                 ret = bchfs_fpunch(inode, offset, len);
3148         else if (mode == FALLOC_FL_INSERT_RANGE)
3149                 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
3150         else if (mode == FALLOC_FL_COLLAPSE_RANGE)
3151                 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
3152         else
3153                 ret = -EOPNOTSUPP;
3154
3155
3156         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
3157         inode_unlock(&inode->v);
3158         percpu_ref_put(&c->writes);
3159
3160         return ret;
3161 }
3162
3163 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
3164                              struct file *file_dst, loff_t pos_dst,
3165                              loff_t len, unsigned remap_flags)
3166 {
3167         struct bch_inode_info *src = file_bch_inode(file_src);
3168         struct bch_inode_info *dst = file_bch_inode(file_dst);
3169         struct bch_fs *c = src->v.i_sb->s_fs_info;
3170         s64 i_sectors_delta = 0;
3171         u64 aligned_len;
3172         loff_t ret = 0;
3173
3174         if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
3175                 return -EINVAL;
3176
3177         if (remap_flags & REMAP_FILE_DEDUP)
3178                 return -EOPNOTSUPP;
3179
3180         if ((pos_src & (block_bytes(c) - 1)) ||
3181             (pos_dst & (block_bytes(c) - 1)))
3182                 return -EINVAL;
3183
3184         if (src == dst &&
3185             abs(pos_src - pos_dst) < len)
3186                 return -EINVAL;
3187
3188         bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3189
3190         file_update_time(file_dst);
3191
3192         inode_dio_wait(&src->v);
3193         inode_dio_wait(&dst->v);
3194
3195         ret = generic_remap_file_range_prep(file_src, pos_src,
3196                                             file_dst, pos_dst,
3197                                             &len, remap_flags);
3198         if (ret < 0 || len == 0)
3199                 goto err;
3200
3201         aligned_len = round_up((u64) len, block_bytes(c));
3202
3203         ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
3204                                 pos_dst, pos_dst + len - 1);
3205         if (ret)
3206                 goto err;
3207
3208         mark_pagecache_unallocated(src, pos_src >> 9,
3209                                    (pos_src + aligned_len) >> 9);
3210
3211         ret = bch2_remap_range(c,
3212                                inode_inum(dst), pos_dst >> 9,
3213                                inode_inum(src), pos_src >> 9,
3214                                aligned_len >> 9,
3215                                pos_dst + len, &i_sectors_delta);
3216         if (ret < 0)
3217                 goto err;
3218
3219         /*
3220          * due to alignment, we might have remapped slightly more than requsted
3221          */
3222         ret = min((u64) ret << 9, (u64) len);
3223
3224         /* XXX get a quota reservation */
3225         i_sectors_acct(c, dst, NULL, i_sectors_delta);
3226
3227         spin_lock(&dst->v.i_lock);
3228         if (pos_dst + ret > dst->v.i_size)
3229                 i_size_write(&dst->v, pos_dst + ret);
3230         spin_unlock(&dst->v.i_lock);
3231
3232         if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
3233             IS_SYNC(file_inode(file_dst)))
3234                 ret = bch2_flush_inode(c, inode_inum(dst));
3235 err:
3236         bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3237
3238         return ret;
3239 }
3240
3241 /* fseek: */
3242
3243 static int page_data_offset(struct page *page, unsigned offset)
3244 {
3245         struct bch_page_state *s = bch2_page_state(page);
3246         unsigned i;
3247
3248         if (s)
3249                 for (i = offset >> 9; i < PAGE_SECTORS; i++)
3250                         if (s->s[i].state >= SECTOR_DIRTY)
3251                                 return i << 9;
3252
3253         return -1;
3254 }
3255
3256 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
3257                                        loff_t start_offset,
3258                                        loff_t end_offset)
3259 {
3260         struct address_space *mapping = vinode->i_mapping;
3261         struct page *page;
3262         pgoff_t start_index     = start_offset >> PAGE_SHIFT;
3263         pgoff_t end_index       = end_offset >> PAGE_SHIFT;
3264         pgoff_t index           = start_index;
3265         loff_t ret;
3266         int offset;
3267
3268         while (index <= end_index) {
3269                 if (find_get_pages_range(mapping, &index, end_index, 1, &page)) {
3270                         lock_page(page);
3271
3272                         offset = page_data_offset(page,
3273                                         page->index == start_index
3274                                         ? start_offset & (PAGE_SIZE - 1)
3275                                         : 0);
3276                         if (offset >= 0) {
3277                                 ret = clamp(((loff_t) page->index << PAGE_SHIFT) +
3278                                             offset,
3279                                             start_offset, end_offset);
3280                                 unlock_page(page);
3281                                 put_page(page);
3282                                 return ret;
3283                         }
3284
3285                         unlock_page(page);
3286                         put_page(page);
3287                 } else {
3288                         break;
3289                 }
3290         }
3291
3292         return end_offset;
3293 }
3294
3295 static loff_t bch2_seek_data(struct file *file, u64 offset)
3296 {
3297         struct bch_inode_info *inode = file_bch_inode(file);
3298         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3299         struct btree_trans trans;
3300         struct btree_iter iter;
3301         struct bkey_s_c k;
3302         subvol_inum inum = inode_inum(inode);
3303         u64 isize, next_data = MAX_LFS_FILESIZE;
3304         u32 snapshot;
3305         int ret;
3306
3307         isize = i_size_read(&inode->v);
3308         if (offset >= isize)
3309                 return -ENXIO;
3310
3311         bch2_trans_init(&trans, c, 0, 0);
3312 retry:
3313         bch2_trans_begin(&trans);
3314
3315         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3316         if (ret)
3317                 goto err;
3318
3319         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3320                            SPOS(inode->v.i_ino, offset >> 9, snapshot), 0, k, ret) {
3321                 if (k.k->p.inode != inode->v.i_ino) {
3322                         break;
3323                 } else if (bkey_extent_is_data(k.k)) {
3324                         next_data = max(offset, bkey_start_offset(k.k) << 9);
3325                         break;
3326                 } else if (k.k->p.offset >> 9 > isize)
3327                         break;
3328         }
3329         bch2_trans_iter_exit(&trans, &iter);
3330 err:
3331         if (ret == -EINTR)
3332                 goto retry;
3333
3334         bch2_trans_exit(&trans);
3335         if (ret)
3336                 return ret;
3337
3338         if (next_data > offset)
3339                 next_data = bch2_seek_pagecache_data(&inode->v,
3340                                                      offset, next_data);
3341
3342         if (next_data >= isize)
3343                 return -ENXIO;
3344
3345         return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
3346 }
3347
3348 static int __page_hole_offset(struct page *page, unsigned offset)
3349 {
3350         struct bch_page_state *s = bch2_page_state(page);
3351         unsigned i;
3352
3353         if (!s)
3354                 return 0;
3355
3356         for (i = offset >> 9; i < PAGE_SECTORS; i++)
3357                 if (s->s[i].state < SECTOR_DIRTY)
3358                         return i << 9;
3359
3360         return -1;
3361 }
3362
3363 static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
3364 {
3365         pgoff_t index = offset >> PAGE_SHIFT;
3366         struct page *page;
3367         int pg_offset;
3368         loff_t ret = -1;
3369
3370         page = find_lock_page(mapping, index);
3371         if (!page)
3372                 return offset;
3373
3374         pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
3375         if (pg_offset >= 0)
3376                 ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
3377
3378         unlock_page(page);
3379
3380         return ret;
3381 }
3382
3383 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3384                                        loff_t start_offset,
3385                                        loff_t end_offset)
3386 {
3387         struct address_space *mapping = vinode->i_mapping;
3388         loff_t offset = start_offset, hole;
3389
3390         while (offset < end_offset) {
3391                 hole = page_hole_offset(mapping, offset);
3392                 if (hole >= 0 && hole <= end_offset)
3393                         return max(start_offset, hole);
3394
3395                 offset += PAGE_SIZE;
3396                 offset &= PAGE_MASK;
3397         }
3398
3399         return end_offset;
3400 }
3401
3402 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3403 {
3404         struct bch_inode_info *inode = file_bch_inode(file);
3405         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3406         struct btree_trans trans;
3407         struct btree_iter iter;
3408         struct bkey_s_c k;
3409         subvol_inum inum = inode_inum(inode);
3410         u64 isize, next_hole = MAX_LFS_FILESIZE;
3411         u32 snapshot;
3412         int ret;
3413
3414         isize = i_size_read(&inode->v);
3415         if (offset >= isize)
3416                 return -ENXIO;
3417
3418         bch2_trans_init(&trans, c, 0, 0);
3419 retry:
3420         bch2_trans_begin(&trans);
3421
3422         ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3423         if (ret)
3424                 goto err;
3425
3426         for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3427                            SPOS(inode->v.i_ino, offset >> 9, snapshot),
3428                            BTREE_ITER_SLOTS, k, ret) {
3429                 if (k.k->p.inode != inode->v.i_ino) {
3430                         next_hole = bch2_seek_pagecache_hole(&inode->v,
3431                                         offset, MAX_LFS_FILESIZE);
3432                         break;
3433                 } else if (!bkey_extent_is_data(k.k)) {
3434                         next_hole = bch2_seek_pagecache_hole(&inode->v,
3435                                         max(offset, bkey_start_offset(k.k) << 9),
3436                                         k.k->p.offset << 9);
3437
3438                         if (next_hole < k.k->p.offset << 9)
3439                                 break;
3440                 } else {
3441                         offset = max(offset, bkey_start_offset(k.k) << 9);
3442                 }
3443         }
3444         bch2_trans_iter_exit(&trans, &iter);
3445 err:
3446         if (ret == -EINTR)
3447                 goto retry;
3448
3449         bch2_trans_exit(&trans);
3450         if (ret)
3451                 return ret;
3452
3453         if (next_hole > isize)
3454                 next_hole = isize;
3455
3456         return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3457 }
3458
3459 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3460 {
3461         switch (whence) {
3462         case SEEK_SET:
3463         case SEEK_CUR:
3464         case SEEK_END:
3465                 return generic_file_llseek(file, offset, whence);
3466         case SEEK_DATA:
3467                 return bch2_seek_data(file, offset);
3468         case SEEK_HOLE:
3469                 return bch2_seek_hole(file, offset);
3470         }
3471
3472         return -EINVAL;
3473 }
3474
3475 void bch2_fs_fsio_exit(struct bch_fs *c)
3476 {
3477         bioset_exit(&c->dio_write_bioset);
3478         bioset_exit(&c->dio_read_bioset);
3479         bioset_exit(&c->writepage_bioset);
3480 }
3481
3482 int bch2_fs_fsio_init(struct bch_fs *c)
3483 {
3484         int ret = 0;
3485
3486         pr_verbose_init(c->opts, "");
3487
3488         if (bioset_init(&c->writepage_bioset,
3489                         4, offsetof(struct bch_writepage_io, op.wbio.bio),
3490                         BIOSET_NEED_BVECS) ||
3491             bioset_init(&c->dio_read_bioset,
3492                         4, offsetof(struct dio_read, rbio.bio),
3493                         BIOSET_NEED_BVECS) ||
3494             bioset_init(&c->dio_write_bioset,
3495                         4, offsetof(struct dio_write, op.wbio.bio),
3496                         BIOSET_NEED_BVECS))
3497                 ret = -ENOMEM;
3498
3499         pr_verbose_init(c->opts, "ret %i", ret);
3500         return ret;
3501 }
3502
3503 #endif /* NO_BCACHEFS_FS */