]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/fs-io.c
Update bcachefs sources to 9f34144308 bcachefs: Refactor dio write code to reinit...
[bcachefs-tools-debian] / libbcachefs / fs-io.c
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3
4 #include "bcachefs.h"
5 #include "alloc_foreground.h"
6 #include "bkey_on_stack.h"
7 #include "btree_update.h"
8 #include "buckets.h"
9 #include "clock.h"
10 #include "error.h"
11 #include "extents.h"
12 #include "extent_update.h"
13 #include "fs.h"
14 #include "fs-io.h"
15 #include "fsck.h"
16 #include "inode.h"
17 #include "journal.h"
18 #include "io.h"
19 #include "keylist.h"
20 #include "quota.h"
21 #include "reflink.h"
22
23 #include <linux/aio.h>
24 #include <linux/backing-dev.h>
25 #include <linux/falloc.h>
26 #include <linux/migrate.h>
27 #include <linux/mmu_context.h>
28 #include <linux/pagevec.h>
29 #include <linux/sched/signal.h>
30 #include <linux/task_io_accounting_ops.h>
31 #include <linux/uio.h>
32 #include <linux/writeback.h>
33
34 #include <trace/events/bcachefs.h>
35 #include <trace/events/writeback.h>
36
37 struct quota_res {
38         u64                             sectors;
39 };
40
41 struct bch_writepage_io {
42         struct closure                  cl;
43         struct bch_inode_info           *inode;
44
45         /* must be last: */
46         struct bch_write_op             op;
47 };
48
49 struct dio_write {
50         struct completion               done;
51         struct kiocb                    *req;
52         struct mm_struct                *mm;
53         unsigned                        loop:1,
54                                         sync:1,
55                                         free_iov:1;
56         struct quota_res                quota_res;
57         u64                             written;
58
59         struct iov_iter                 iter;
60         struct iovec                    inline_vecs[2];
61
62         /* must be last: */
63         struct bch_write_op             op;
64 };
65
66 struct dio_read {
67         struct closure                  cl;
68         struct kiocb                    *req;
69         long                            ret;
70         struct bch_read_bio             rbio;
71 };
72
73 /* pagecache_block must be held */
74 static int write_invalidate_inode_pages_range(struct address_space *mapping,
75                                               loff_t start, loff_t end)
76 {
77         int ret;
78
79         /*
80          * XXX: the way this is currently implemented, we can spin if a process
81          * is continually redirtying a specific page
82          */
83         do {
84                 if (!mapping->nrpages &&
85                     !mapping->nrexceptional)
86                         return 0;
87
88                 ret = filemap_write_and_wait_range(mapping, start, end);
89                 if (ret)
90                         break;
91
92                 if (!mapping->nrpages)
93                         return 0;
94
95                 ret = invalidate_inode_pages2_range(mapping,
96                                 start >> PAGE_SHIFT,
97                                 end >> PAGE_SHIFT);
98         } while (ret == -EBUSY);
99
100         return ret;
101 }
102
103 /* quotas */
104
105 #ifdef CONFIG_BCACHEFS_QUOTA
106
107 static void bch2_quota_reservation_put(struct bch_fs *c,
108                                        struct bch_inode_info *inode,
109                                        struct quota_res *res)
110 {
111         if (!res->sectors)
112                 return;
113
114         mutex_lock(&inode->ei_quota_lock);
115         BUG_ON(res->sectors > inode->ei_quota_reserved);
116
117         bch2_quota_acct(c, inode->ei_qid, Q_SPC,
118                         -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
119         inode->ei_quota_reserved -= res->sectors;
120         mutex_unlock(&inode->ei_quota_lock);
121
122         res->sectors = 0;
123 }
124
125 static int bch2_quota_reservation_add(struct bch_fs *c,
126                                       struct bch_inode_info *inode,
127                                       struct quota_res *res,
128                                       unsigned sectors,
129                                       bool check_enospc)
130 {
131         int ret;
132
133         mutex_lock(&inode->ei_quota_lock);
134         ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
135                               check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
136         if (likely(!ret)) {
137                 inode->ei_quota_reserved += sectors;
138                 res->sectors += sectors;
139         }
140         mutex_unlock(&inode->ei_quota_lock);
141
142         return ret;
143 }
144
145 #else
146
147 static void bch2_quota_reservation_put(struct bch_fs *c,
148                                        struct bch_inode_info *inode,
149                                        struct quota_res *res)
150 {
151 }
152
153 static int bch2_quota_reservation_add(struct bch_fs *c,
154                                       struct bch_inode_info *inode,
155                                       struct quota_res *res,
156                                       unsigned sectors,
157                                       bool check_enospc)
158 {
159         return 0;
160 }
161
162 #endif
163
164 /* i_size updates: */
165
166 struct inode_new_size {
167         loff_t          new_size;
168         u64             now;
169         unsigned        fields;
170 };
171
172 static int inode_set_size(struct bch_inode_info *inode,
173                           struct bch_inode_unpacked *bi,
174                           void *p)
175 {
176         struct inode_new_size *s = p;
177
178         bi->bi_size = s->new_size;
179         if (s->fields & ATTR_ATIME)
180                 bi->bi_atime = s->now;
181         if (s->fields & ATTR_MTIME)
182                 bi->bi_mtime = s->now;
183         if (s->fields & ATTR_CTIME)
184                 bi->bi_ctime = s->now;
185
186         return 0;
187 }
188
189 int __must_check bch2_write_inode_size(struct bch_fs *c,
190                                        struct bch_inode_info *inode,
191                                        loff_t new_size, unsigned fields)
192 {
193         struct inode_new_size s = {
194                 .new_size       = new_size,
195                 .now            = bch2_current_time(c),
196                 .fields         = fields,
197         };
198
199         return bch2_write_inode(c, inode, inode_set_size, &s, fields);
200 }
201
202 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
203                            struct quota_res *quota_res, s64 sectors)
204 {
205         if (!sectors)
206                 return;
207
208         mutex_lock(&inode->ei_quota_lock);
209 #ifdef CONFIG_BCACHEFS_QUOTA
210         if (quota_res && sectors > 0) {
211                 BUG_ON(sectors > quota_res->sectors);
212                 BUG_ON(sectors > inode->ei_quota_reserved);
213
214                 quota_res->sectors -= sectors;
215                 inode->ei_quota_reserved -= sectors;
216         } else {
217                 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
218         }
219 #endif
220         inode->v.i_blocks += sectors;
221         mutex_unlock(&inode->ei_quota_lock);
222 }
223
224 /* page state: */
225
226 /* stored in page->private: */
227
228 struct bch_page_sector {
229         /* Uncompressed, fully allocated replicas: */
230         unsigned                nr_replicas:3;
231
232         /* Owns PAGE_SECTORS * replicas_reserved sized reservation: */
233         unsigned                replicas_reserved:3;
234
235         /* i_sectors: */
236         enum {
237                 SECTOR_UNALLOCATED,
238                 SECTOR_RESERVED,
239                 SECTOR_DIRTY,
240                 SECTOR_ALLOCATED,
241         }                       state:2;
242 };
243
244 struct bch_page_state {
245         spinlock_t              lock;
246         atomic_t                write_count;
247         struct bch_page_sector  s[PAGE_SECTORS];
248 };
249
250 static inline struct bch_page_state *__bch2_page_state(struct page *page)
251 {
252         return page_has_private(page)
253                 ? (struct bch_page_state *) page_private(page)
254                 : NULL;
255 }
256
257 static inline struct bch_page_state *bch2_page_state(struct page *page)
258 {
259         EBUG_ON(!PageLocked(page));
260
261         return __bch2_page_state(page);
262 }
263
264 /* for newly allocated pages: */
265 static void __bch2_page_state_release(struct page *page)
266 {
267         struct bch_page_state *s = __bch2_page_state(page);
268
269         if (!s)
270                 return;
271
272         ClearPagePrivate(page);
273         set_page_private(page, 0);
274         put_page(page);
275         kfree(s);
276 }
277
278 static void bch2_page_state_release(struct page *page)
279 {
280         struct bch_page_state *s = bch2_page_state(page);
281
282         if (!s)
283                 return;
284
285         ClearPagePrivate(page);
286         set_page_private(page, 0);
287         put_page(page);
288         kfree(s);
289 }
290
291 /* for newly allocated pages: */
292 static struct bch_page_state *__bch2_page_state_create(struct page *page,
293                                                        gfp_t gfp)
294 {
295         struct bch_page_state *s;
296
297         s = kzalloc(sizeof(*s), GFP_NOFS|gfp);
298         if (!s)
299                 return NULL;
300
301         spin_lock_init(&s->lock);
302         /*
303          * migrate_page_move_mapping() assumes that pages with private data
304          * have their count elevated by 1.
305          */
306         get_page(page);
307         set_page_private(page, (unsigned long) s);
308         SetPagePrivate(page);
309         return s;
310 }
311
312 static struct bch_page_state *bch2_page_state_create(struct page *page,
313                                                      gfp_t gfp)
314 {
315         return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
316 }
317
318 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
319 {
320         /* XXX: this should not be open coded */
321         return inode->ei_inode.bi_data_replicas
322                 ? inode->ei_inode.bi_data_replicas - 1
323                 : c->opts.data_replicas;
324 }
325
326 static inline unsigned sectors_to_reserve(struct bch_page_sector *s,
327                                                   unsigned nr_replicas)
328 {
329         return max(0, (int) nr_replicas -
330                    s->nr_replicas -
331                    s->replicas_reserved);
332 }
333
334 static int bch2_get_page_disk_reservation(struct bch_fs *c,
335                                 struct bch_inode_info *inode,
336                                 struct page *page, bool check_enospc)
337 {
338         struct bch_page_state *s = bch2_page_state_create(page, 0);
339         unsigned nr_replicas = inode_nr_replicas(c, inode);
340         struct disk_reservation disk_res = { 0 };
341         unsigned i, disk_res_sectors = 0;
342         int ret;
343
344         if (!s)
345                 return -ENOMEM;
346
347         for (i = 0; i < ARRAY_SIZE(s->s); i++)
348                 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
349
350         if (!disk_res_sectors)
351                 return 0;
352
353         ret = bch2_disk_reservation_get(c, &disk_res,
354                                         disk_res_sectors, 1,
355                                         !check_enospc
356                                         ? BCH_DISK_RESERVATION_NOFAIL
357                                         : 0);
358         if (unlikely(ret))
359                 return ret;
360
361         for (i = 0; i < ARRAY_SIZE(s->s); i++)
362                 s->s[i].replicas_reserved +=
363                         sectors_to_reserve(&s->s[i], nr_replicas);
364
365         return 0;
366 }
367
368 struct bch2_page_reservation {
369         struct disk_reservation disk;
370         struct quota_res        quota;
371 };
372
373 static void bch2_page_reservation_init(struct bch_fs *c,
374                         struct bch_inode_info *inode,
375                         struct bch2_page_reservation *res)
376 {
377         memset(res, 0, sizeof(*res));
378
379         res->disk.nr_replicas = inode_nr_replicas(c, inode);
380 }
381
382 static void bch2_page_reservation_put(struct bch_fs *c,
383                         struct bch_inode_info *inode,
384                         struct bch2_page_reservation *res)
385 {
386         bch2_disk_reservation_put(c, &res->disk);
387         bch2_quota_reservation_put(c, inode, &res->quota);
388 }
389
390 static int bch2_page_reservation_get(struct bch_fs *c,
391                         struct bch_inode_info *inode, struct page *page,
392                         struct bch2_page_reservation *res,
393                         unsigned offset, unsigned len, bool check_enospc)
394 {
395         struct bch_page_state *s = bch2_page_state_create(page, 0);
396         unsigned i, disk_sectors = 0, quota_sectors = 0;
397         int ret;
398
399         if (!s)
400                 return -ENOMEM;
401
402         for (i = round_down(offset, block_bytes(c)) >> 9;
403              i < round_up(offset + len, block_bytes(c)) >> 9;
404              i++) {
405                 disk_sectors += sectors_to_reserve(&s->s[i],
406                                                 res->disk.nr_replicas);
407                 quota_sectors += s->s[i].state == SECTOR_UNALLOCATED;
408         }
409
410         if (disk_sectors) {
411                 ret = bch2_disk_reservation_add(c, &res->disk,
412                                                 disk_sectors,
413                                                 !check_enospc
414                                                 ? BCH_DISK_RESERVATION_NOFAIL
415                                                 : 0);
416                 if (unlikely(ret))
417                         return ret;
418         }
419
420         if (quota_sectors) {
421                 ret = bch2_quota_reservation_add(c, inode, &res->quota,
422                                                  quota_sectors,
423                                                  check_enospc);
424                 if (unlikely(ret)) {
425                         struct disk_reservation tmp = {
426                                 .sectors = disk_sectors
427                         };
428
429                         bch2_disk_reservation_put(c, &tmp);
430                         res->disk.sectors -= disk_sectors;
431                         return ret;
432                 }
433         }
434
435         return 0;
436 }
437
438 static void bch2_clear_page_bits(struct page *page)
439 {
440         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
441         struct bch_fs *c = inode->v.i_sb->s_fs_info;
442         struct bch_page_state *s = bch2_page_state(page);
443         struct disk_reservation disk_res = { 0 };
444         int i, dirty_sectors = 0;
445
446         if (!s)
447                 return;
448
449         EBUG_ON(!PageLocked(page));
450         EBUG_ON(PageWriteback(page));
451
452         for (i = 0; i < ARRAY_SIZE(s->s); i++) {
453                 disk_res.sectors += s->s[i].replicas_reserved;
454                 s->s[i].replicas_reserved = 0;
455
456                 if (s->s[i].state == SECTOR_DIRTY) {
457                         dirty_sectors++;
458                         s->s[i].state = SECTOR_UNALLOCATED;
459                 }
460         }
461
462         bch2_disk_reservation_put(c, &disk_res);
463
464         if (dirty_sectors)
465                 i_sectors_acct(c, inode, NULL, -dirty_sectors);
466
467         bch2_page_state_release(page);
468 }
469
470 static void bch2_set_page_dirty(struct bch_fs *c,
471                         struct bch_inode_info *inode, struct page *page,
472                         struct bch2_page_reservation *res,
473                         unsigned offset, unsigned len)
474 {
475         struct bch_page_state *s = bch2_page_state(page);
476         unsigned i, dirty_sectors = 0;
477
478         WARN_ON((u64) page_offset(page) + offset + len >
479                 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
480
481         spin_lock(&s->lock);
482
483         for (i = round_down(offset, block_bytes(c)) >> 9;
484              i < round_up(offset + len, block_bytes(c)) >> 9;
485              i++) {
486                 unsigned sectors = sectors_to_reserve(&s->s[i],
487                                                 res->disk.nr_replicas);
488
489                 /*
490                  * This can happen if we race with the error path in
491                  * bch2_writepage_io_done():
492                  */
493                 sectors = min_t(unsigned, sectors, res->disk.sectors);
494
495                 s->s[i].replicas_reserved += sectors;
496                 res->disk.sectors -= sectors;
497
498                 if (s->s[i].state == SECTOR_UNALLOCATED)
499                         dirty_sectors++;
500
501                 s->s[i].state = max_t(unsigned, s->s[i].state, SECTOR_DIRTY);
502         }
503
504         spin_unlock(&s->lock);
505
506         if (dirty_sectors)
507                 i_sectors_acct(c, inode, &res->quota, dirty_sectors);
508
509         if (!PageDirty(page))
510                 __set_page_dirty_nobuffers(page);
511 }
512
513 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
514 {
515         struct file *file = vmf->vma->vm_file;
516         struct bch_inode_info *inode = file_bch_inode(file);
517         int ret;
518
519         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
520         ret = filemap_fault(vmf);
521         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
522
523         return ret;
524 }
525
526 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
527 {
528         struct page *page = vmf->page;
529         struct file *file = vmf->vma->vm_file;
530         struct bch_inode_info *inode = file_bch_inode(file);
531         struct address_space *mapping = file->f_mapping;
532         struct bch_fs *c = inode->v.i_sb->s_fs_info;
533         struct bch2_page_reservation res;
534         unsigned len;
535         loff_t isize;
536         int ret = VM_FAULT_LOCKED;
537
538         bch2_page_reservation_init(c, inode, &res);
539
540         sb_start_pagefault(inode->v.i_sb);
541         file_update_time(file);
542
543         /*
544          * Not strictly necessary, but helps avoid dio writes livelocking in
545          * write_invalidate_inode_pages_range() - can drop this if/when we get
546          * a write_invalidate_inode_pages_range() that works without dropping
547          * page lock before invalidating page
548          */
549         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
550
551         lock_page(page);
552         isize = i_size_read(&inode->v);
553
554         if (page->mapping != mapping || page_offset(page) >= isize) {
555                 unlock_page(page);
556                 ret = VM_FAULT_NOPAGE;
557                 goto out;
558         }
559
560         len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
561
562         if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
563                 unlock_page(page);
564                 ret = VM_FAULT_SIGBUS;
565                 goto out;
566         }
567
568         bch2_set_page_dirty(c, inode, page, &res, 0, len);
569         bch2_page_reservation_put(c, inode, &res);
570
571         wait_for_stable_page(page);
572 out:
573         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
574         sb_end_pagefault(inode->v.i_sb);
575
576         return ret;
577 }
578
579 void bch2_invalidatepage(struct page *page, unsigned int offset,
580                          unsigned int length)
581 {
582         if (offset || length < PAGE_SIZE)
583                 return;
584
585         bch2_clear_page_bits(page);
586 }
587
588 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
589 {
590         if (PageDirty(page))
591                 return 0;
592
593         bch2_clear_page_bits(page);
594         return 1;
595 }
596
597 #ifdef CONFIG_MIGRATION
598 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
599                       struct page *page, enum migrate_mode mode)
600 {
601         int ret;
602
603         EBUG_ON(!PageLocked(page));
604         EBUG_ON(!PageLocked(newpage));
605
606         ret = migrate_page_move_mapping(mapping, newpage, page, 0);
607         if (ret != MIGRATEPAGE_SUCCESS)
608                 return ret;
609
610         if (PagePrivate(page)) {
611                 ClearPagePrivate(page);
612                 get_page(newpage);
613                 set_page_private(newpage, page_private(page));
614                 set_page_private(page, 0);
615                 put_page(page);
616                 SetPagePrivate(newpage);
617         }
618
619         if (mode != MIGRATE_SYNC_NO_COPY)
620                 migrate_page_copy(newpage, page);
621         else
622                 migrate_page_states(newpage, page);
623         return MIGRATEPAGE_SUCCESS;
624 }
625 #endif
626
627 /* readpage(s): */
628
629 static void bch2_readpages_end_io(struct bio *bio)
630 {
631         struct bvec_iter_all iter;
632         struct bio_vec *bv;
633
634         bio_for_each_segment_all(bv, bio, iter) {
635                 struct page *page = bv->bv_page;
636
637                 if (!bio->bi_status) {
638                         SetPageUptodate(page);
639                 } else {
640                         ClearPageUptodate(page);
641                         SetPageError(page);
642                 }
643                 unlock_page(page);
644         }
645
646         bio_put(bio);
647 }
648
649 static inline void page_state_init_for_read(struct page *page)
650 {
651         SetPagePrivate(page);
652         page->private = 0;
653 }
654
655 struct readpages_iter {
656         struct address_space    *mapping;
657         struct page             **pages;
658         unsigned                nr_pages;
659         unsigned                nr_added;
660         unsigned                idx;
661         pgoff_t                 offset;
662 };
663
664 static int readpages_iter_init(struct readpages_iter *iter,
665                                struct address_space *mapping,
666                                struct list_head *pages, unsigned nr_pages)
667 {
668         memset(iter, 0, sizeof(*iter));
669
670         iter->mapping   = mapping;
671         iter->offset    = list_last_entry(pages, struct page, lru)->index;
672
673         iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
674         if (!iter->pages)
675                 return -ENOMEM;
676
677         while (!list_empty(pages)) {
678                 struct page *page = list_last_entry(pages, struct page, lru);
679
680                 __bch2_page_state_create(page, __GFP_NOFAIL);
681
682                 iter->pages[iter->nr_pages++] = page;
683                 list_del(&page->lru);
684         }
685
686         return 0;
687 }
688
689 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
690 {
691         struct page *page;
692         unsigned i;
693         int ret;
694
695         BUG_ON(iter->idx > iter->nr_added);
696         BUG_ON(iter->nr_added > iter->nr_pages);
697
698         if (iter->idx < iter->nr_added)
699                 goto out;
700
701         while (1) {
702                 if (iter->idx == iter->nr_pages)
703                         return NULL;
704
705                 ret = add_to_page_cache_lru_vec(iter->mapping,
706                                 iter->pages     + iter->nr_added,
707                                 iter->nr_pages  - iter->nr_added,
708                                 iter->offset    + iter->nr_added,
709                                 GFP_NOFS);
710                 if (ret > 0)
711                         break;
712
713                 page = iter->pages[iter->nr_added];
714                 iter->idx++;
715                 iter->nr_added++;
716
717                 __bch2_page_state_release(page);
718                 put_page(page);
719         }
720
721         iter->nr_added += ret;
722
723         for (i = iter->idx; i < iter->nr_added; i++)
724                 put_page(iter->pages[i]);
725 out:
726         EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
727
728         return iter->pages[iter->idx];
729 }
730
731 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
732 {
733         struct bvec_iter iter;
734         struct bio_vec bv;
735         unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
736                 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
737         unsigned state = k.k->type == KEY_TYPE_reservation
738                 ? SECTOR_RESERVED
739                 : SECTOR_ALLOCATED;
740
741         bio_for_each_segment(bv, bio, iter) {
742                 struct bch_page_state *s = bch2_page_state(bv.bv_page);
743                 unsigned i;
744
745                 for (i = bv.bv_offset >> 9;
746                      i < (bv.bv_offset + bv.bv_len) >> 9;
747                      i++) {
748                         s->s[i].nr_replicas = nr_ptrs;
749                         s->s[i].state = state;
750                 }
751         }
752 }
753
754 static bool extent_partial_reads_expensive(struct bkey_s_c k)
755 {
756         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
757         struct bch_extent_crc_unpacked crc;
758         const union bch_extent_entry *i;
759
760         bkey_for_each_crc(k.k, ptrs, crc, i)
761                 if (crc.csum_type || crc.compression_type)
762                         return true;
763         return false;
764 }
765
766 static void readpage_bio_extend(struct readpages_iter *iter,
767                                 struct bio *bio,
768                                 unsigned sectors_this_extent,
769                                 bool get_more)
770 {
771         while (bio_sectors(bio) < sectors_this_extent &&
772                bio->bi_vcnt < bio->bi_max_vecs) {
773                 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
774                 struct page *page = readpage_iter_next(iter);
775                 int ret;
776
777                 if (page) {
778                         if (iter->offset + iter->idx != page_offset)
779                                 break;
780
781                         iter->idx++;
782                 } else {
783                         if (!get_more)
784                                 break;
785
786                         page = xa_load(&iter->mapping->i_pages, page_offset);
787                         if (page && !xa_is_value(page))
788                                 break;
789
790                         page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
791                         if (!page)
792                                 break;
793
794                         if (!__bch2_page_state_create(page, 0)) {
795                                 put_page(page);
796                                 break;
797                         }
798
799                         ret = add_to_page_cache_lru(page, iter->mapping,
800                                                     page_offset, GFP_NOFS);
801                         if (ret) {
802                                 __bch2_page_state_release(page);
803                                 put_page(page);
804                                 break;
805                         }
806
807                         put_page(page);
808                 }
809
810                 BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
811         }
812 }
813
814 static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
815                        struct bch_read_bio *rbio, u64 inum,
816                        struct readpages_iter *readpages_iter)
817 {
818         struct bch_fs *c = trans->c;
819         struct bkey_on_stack sk;
820         int flags = BCH_READ_RETRY_IF_STALE|
821                 BCH_READ_MAY_PROMOTE;
822         int ret = 0;
823
824         rbio->c = c;
825         rbio->start_time = local_clock();
826
827         bkey_on_stack_init(&sk);
828 retry:
829         while (1) {
830                 struct bkey_s_c k;
831                 unsigned bytes, sectors, offset_into_extent;
832
833                 bch2_btree_iter_set_pos(iter,
834                                 POS(inum, rbio->bio.bi_iter.bi_sector));
835
836                 k = bch2_btree_iter_peek_slot(iter);
837                 ret = bkey_err(k);
838                 if (ret)
839                         break;
840
841                 bkey_on_stack_reassemble(&sk, c, k);
842                 k = bkey_i_to_s_c(sk.k);
843
844                 offset_into_extent = iter->pos.offset -
845                         bkey_start_offset(k.k);
846                 sectors = k.k->size - offset_into_extent;
847
848                 ret = bch2_read_indirect_extent(trans,
849                                         &offset_into_extent, &sk);
850                 if (ret)
851                         break;
852
853                 sectors = min(sectors, k.k->size - offset_into_extent);
854
855                 bch2_trans_unlock(trans);
856
857                 if (readpages_iter)
858                         readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
859                                             extent_partial_reads_expensive(k));
860
861                 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
862                 swap(rbio->bio.bi_iter.bi_size, bytes);
863
864                 if (rbio->bio.bi_iter.bi_size == bytes)
865                         flags |= BCH_READ_LAST_FRAGMENT;
866
867                 if (bkey_extent_is_allocation(k.k))
868                         bch2_add_page_sectors(&rbio->bio, k);
869
870                 bch2_read_extent(c, rbio, k, offset_into_extent, flags);
871
872                 if (flags & BCH_READ_LAST_FRAGMENT)
873                         break;
874
875                 swap(rbio->bio.bi_iter.bi_size, bytes);
876                 bio_advance(&rbio->bio, bytes);
877         }
878
879         if (ret == -EINTR)
880                 goto retry;
881
882         if (ret) {
883                 bcache_io_error(c, &rbio->bio, "btree IO error %i", ret);
884                 bio_endio(&rbio->bio);
885         }
886
887         bkey_on_stack_exit(&sk, c);
888 }
889
890 int bch2_readpages(struct file *file, struct address_space *mapping,
891                    struct list_head *pages, unsigned nr_pages)
892 {
893         struct bch_inode_info *inode = to_bch_ei(mapping->host);
894         struct bch_fs *c = inode->v.i_sb->s_fs_info;
895         struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
896         struct btree_trans trans;
897         struct btree_iter *iter;
898         struct page *page;
899         struct readpages_iter readpages_iter;
900         int ret;
901
902         ret = readpages_iter_init(&readpages_iter, mapping, pages, nr_pages);
903         BUG_ON(ret);
904
905         bch2_trans_init(&trans, c, 0, 0);
906
907         iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
908                                    BTREE_ITER_SLOTS);
909
910         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
911
912         while ((page = readpage_iter_next(&readpages_iter))) {
913                 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
914                 unsigned n = min_t(unsigned,
915                                    readpages_iter.nr_pages -
916                                    readpages_iter.idx,
917                                    BIO_MAX_PAGES);
918                 struct bch_read_bio *rbio =
919                         rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
920                                   opts);
921
922                 readpages_iter.idx++;
923
924                 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
925                 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTOR_SHIFT;
926                 rbio->bio.bi_end_io = bch2_readpages_end_io;
927                 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
928
929                 bchfs_read(&trans, iter, rbio, inode->v.i_ino,
930                            &readpages_iter);
931         }
932
933         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
934
935         bch2_trans_exit(&trans);
936         kfree(readpages_iter.pages);
937
938         return 0;
939 }
940
941 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
942                              u64 inum, struct page *page)
943 {
944         struct btree_trans trans;
945         struct btree_iter *iter;
946
947         bch2_page_state_create(page, __GFP_NOFAIL);
948
949         bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
950         rbio->bio.bi_iter.bi_sector =
951                 (sector_t) page->index << PAGE_SECTOR_SHIFT;
952         BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
953
954         bch2_trans_init(&trans, c, 0, 0);
955         iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
956                                    BTREE_ITER_SLOTS);
957
958         bchfs_read(&trans, iter, rbio, inum, NULL);
959
960         bch2_trans_exit(&trans);
961 }
962
963 int bch2_readpage(struct file *file, struct page *page)
964 {
965         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
966         struct bch_fs *c = inode->v.i_sb->s_fs_info;
967         struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
968         struct bch_read_bio *rbio;
969
970         rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
971         rbio->bio.bi_end_io = bch2_readpages_end_io;
972
973         __bchfs_readpage(c, rbio, inode->v.i_ino, page);
974         return 0;
975 }
976
977 static void bch2_read_single_page_end_io(struct bio *bio)
978 {
979         complete(bio->bi_private);
980 }
981
982 static int bch2_read_single_page(struct page *page,
983                                  struct address_space *mapping)
984 {
985         struct bch_inode_info *inode = to_bch_ei(mapping->host);
986         struct bch_fs *c = inode->v.i_sb->s_fs_info;
987         struct bch_read_bio *rbio;
988         int ret;
989         DECLARE_COMPLETION_ONSTACK(done);
990
991         rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
992                          io_opts(c, &inode->ei_inode));
993         rbio->bio.bi_private = &done;
994         rbio->bio.bi_end_io = bch2_read_single_page_end_io;
995
996         __bchfs_readpage(c, rbio, inode->v.i_ino, page);
997         wait_for_completion(&done);
998
999         ret = blk_status_to_errno(rbio->bio.bi_status);
1000         bio_put(&rbio->bio);
1001
1002         if (ret < 0)
1003                 return ret;
1004
1005         SetPageUptodate(page);
1006         return 0;
1007 }
1008
1009 /* writepages: */
1010
1011 struct bch_writepage_state {
1012         struct bch_writepage_io *io;
1013         struct bch_io_opts      opts;
1014 };
1015
1016 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1017                                                                   struct bch_inode_info *inode)
1018 {
1019         return (struct bch_writepage_state) {
1020                 .opts = io_opts(c, &inode->ei_inode)
1021         };
1022 }
1023
1024 static void bch2_writepage_io_free(struct closure *cl)
1025 {
1026         struct bch_writepage_io *io = container_of(cl,
1027                                         struct bch_writepage_io, cl);
1028
1029         bio_put(&io->op.wbio.bio);
1030 }
1031
1032 static void bch2_writepage_io_done(struct closure *cl)
1033 {
1034         struct bch_writepage_io *io = container_of(cl,
1035                                         struct bch_writepage_io, cl);
1036         struct bch_fs *c = io->op.c;
1037         struct bio *bio = &io->op.wbio.bio;
1038         struct bvec_iter_all iter;
1039         struct bio_vec *bvec;
1040         unsigned i;
1041
1042         if (io->op.error) {
1043                 bio_for_each_segment_all(bvec, bio, iter) {
1044                         struct bch_page_state *s;
1045
1046                         SetPageError(bvec->bv_page);
1047                         mapping_set_error(bvec->bv_page->mapping, -EIO);
1048
1049                         s = __bch2_page_state(bvec->bv_page);
1050                         spin_lock(&s->lock);
1051                         for (i = 0; i < PAGE_SECTORS; i++)
1052                                 s->s[i].nr_replicas = 0;
1053                         spin_unlock(&s->lock);
1054                 }
1055         }
1056
1057         if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1058                 bio_for_each_segment_all(bvec, bio, iter) {
1059                         struct bch_page_state *s;
1060
1061                         s = __bch2_page_state(bvec->bv_page);
1062                         spin_lock(&s->lock);
1063                         for (i = 0; i < PAGE_SECTORS; i++)
1064                                 s->s[i].nr_replicas = 0;
1065                         spin_unlock(&s->lock);
1066                 }
1067         }
1068
1069         /*
1070          * racing with fallocate can cause us to add fewer sectors than
1071          * expected - but we shouldn't add more sectors than expected:
1072          */
1073         BUG_ON(io->op.i_sectors_delta > 0);
1074
1075         /*
1076          * (error (due to going RO) halfway through a page can screw that up
1077          * slightly)
1078          * XXX wtf?
1079            BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1080          */
1081
1082         /*
1083          * PageWriteback is effectively our ref on the inode - fixup i_blocks
1084          * before calling end_page_writeback:
1085          */
1086         i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1087
1088         bio_for_each_segment_all(bvec, bio, iter) {
1089                 struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
1090
1091                 if (atomic_dec_and_test(&s->write_count))
1092                         end_page_writeback(bvec->bv_page);
1093         }
1094
1095         closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1096 }
1097
1098 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1099 {
1100         struct bch_writepage_io *io = w->io;
1101
1102         w->io = NULL;
1103         closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
1104         continue_at(&io->cl, bch2_writepage_io_done, NULL);
1105 }
1106
1107 /*
1108  * Get a bch_writepage_io and add @page to it - appending to an existing one if
1109  * possible, else allocating a new one:
1110  */
1111 static void bch2_writepage_io_alloc(struct bch_fs *c,
1112                                     struct writeback_control *wbc,
1113                                     struct bch_writepage_state *w,
1114                                     struct bch_inode_info *inode,
1115                                     u64 sector,
1116                                     unsigned nr_replicas)
1117 {
1118         struct bch_write_op *op;
1119
1120         w->io = container_of(bio_alloc_bioset(GFP_NOFS,
1121                                               BIO_MAX_PAGES,
1122                                               &c->writepage_bioset),
1123                              struct bch_writepage_io, op.wbio.bio);
1124
1125         closure_init(&w->io->cl, NULL);
1126         w->io->inode            = inode;
1127
1128         op                      = &w->io->op;
1129         bch2_write_op_init(op, c, w->opts);
1130         op->target              = w->opts.foreground_target;
1131         op_journal_seq_set(op, &inode->ei_journal_seq);
1132         op->nr_replicas         = nr_replicas;
1133         op->res.nr_replicas     = nr_replicas;
1134         op->write_point         = writepoint_hashed(inode->ei_last_dirtied);
1135         op->pos                 = POS(inode->v.i_ino, sector);
1136         op->wbio.bio.bi_iter.bi_sector = sector;
1137         op->wbio.bio.bi_opf     = wbc_to_write_flags(wbc);
1138 }
1139
1140 static int __bch2_writepage(struct page *page,
1141                             struct writeback_control *wbc,
1142                             void *data)
1143 {
1144         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1145         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1146         struct bch_writepage_state *w = data;
1147         struct bch_page_state *s, orig;
1148         unsigned i, offset, nr_replicas_this_write = U32_MAX;
1149         loff_t i_size = i_size_read(&inode->v);
1150         pgoff_t end_index = i_size >> PAGE_SHIFT;
1151         int ret;
1152
1153         EBUG_ON(!PageUptodate(page));
1154
1155         /* Is the page fully inside i_size? */
1156         if (page->index < end_index)
1157                 goto do_io;
1158
1159         /* Is the page fully outside i_size? (truncate in progress) */
1160         offset = i_size & (PAGE_SIZE - 1);
1161         if (page->index > end_index || !offset) {
1162                 unlock_page(page);
1163                 return 0;
1164         }
1165
1166         /*
1167          * The page straddles i_size.  It must be zeroed out on each and every
1168          * writepage invocation because it may be mmapped.  "A file is mapped
1169          * in multiples of the page size.  For a file that is not a multiple of
1170          * the  page size, the remaining memory is zeroed when mapped, and
1171          * writes to that region are not written out to the file."
1172          */
1173         zero_user_segment(page, offset, PAGE_SIZE);
1174 do_io:
1175         s = bch2_page_state_create(page, __GFP_NOFAIL);
1176
1177         ret = bch2_get_page_disk_reservation(c, inode, page, true);
1178         if (ret) {
1179                 SetPageError(page);
1180                 mapping_set_error(page->mapping, ret);
1181                 unlock_page(page);
1182                 return 0;
1183         }
1184
1185         /* Before unlocking the page, get copy of reservations: */
1186         orig = *s;
1187
1188         for (i = 0; i < PAGE_SECTORS; i++) {
1189                 if (s->s[i].state < SECTOR_DIRTY)
1190                         continue;
1191
1192                 nr_replicas_this_write =
1193                         min_t(unsigned, nr_replicas_this_write,
1194                               s->s[i].nr_replicas +
1195                               s->s[i].replicas_reserved);
1196         }
1197
1198         for (i = 0; i < PAGE_SECTORS; i++) {
1199                 if (s->s[i].state < SECTOR_DIRTY)
1200                         continue;
1201
1202                 s->s[i].nr_replicas = w->opts.compression
1203                         ? 0 : nr_replicas_this_write;
1204
1205                 s->s[i].replicas_reserved = 0;
1206                 s->s[i].state = SECTOR_ALLOCATED;
1207         }
1208
1209         BUG_ON(atomic_read(&s->write_count));
1210         atomic_set(&s->write_count, 1);
1211
1212         BUG_ON(PageWriteback(page));
1213         set_page_writeback(page);
1214
1215         unlock_page(page);
1216
1217         offset = 0;
1218         while (1) {
1219                 unsigned sectors = 1, dirty_sectors = 0, reserved_sectors = 0;
1220                 u64 sector;
1221
1222                 while (offset < PAGE_SECTORS &&
1223                        orig.s[offset].state < SECTOR_DIRTY)
1224                         offset++;
1225
1226                 if (offset == PAGE_SECTORS)
1227                         break;
1228
1229                 sector = ((u64) page->index << PAGE_SECTOR_SHIFT) + offset;
1230
1231                 while (offset + sectors < PAGE_SECTORS &&
1232                        orig.s[offset + sectors].state >= SECTOR_DIRTY)
1233                         sectors++;
1234
1235                 for (i = offset; i < offset + sectors; i++) {
1236                         reserved_sectors += orig.s[i].replicas_reserved;
1237                         dirty_sectors += orig.s[i].state == SECTOR_DIRTY;
1238                 }
1239
1240                 if (w->io &&
1241                     (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1242                      bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
1243                      w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1244                      (BIO_MAX_PAGES * PAGE_SIZE) ||
1245                      bio_end_sector(&w->io->op.wbio.bio) != sector))
1246                         bch2_writepage_do_io(w);
1247
1248                 if (!w->io)
1249                         bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1250                                                 nr_replicas_this_write);
1251
1252                 atomic_inc(&s->write_count);
1253
1254                 BUG_ON(inode != w->io->inode);
1255                 BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
1256                                      sectors << 9, offset << 9));
1257
1258                 /* Check for writing past i_size: */
1259                 WARN_ON((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1260                         round_up(i_size, block_bytes(c)));
1261
1262                 w->io->op.res.sectors += reserved_sectors;
1263                 w->io->op.i_sectors_delta -= dirty_sectors;
1264                 w->io->op.new_i_size = i_size;
1265
1266                 offset += sectors;
1267         }
1268
1269         if (atomic_dec_and_test(&s->write_count))
1270                 end_page_writeback(page);
1271
1272         return 0;
1273 }
1274
1275 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1276 {
1277         struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1278         struct bch_writepage_state w =
1279                 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1280         struct blk_plug plug;
1281         int ret;
1282
1283         blk_start_plug(&plug);
1284         ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1285         if (w.io)
1286                 bch2_writepage_do_io(&w);
1287         blk_finish_plug(&plug);
1288         return ret;
1289 }
1290
1291 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1292 {
1293         struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1294         struct bch_writepage_state w =
1295                 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1296         int ret;
1297
1298         ret = __bch2_writepage(page, wbc, &w);
1299         if (w.io)
1300                 bch2_writepage_do_io(&w);
1301
1302         return ret;
1303 }
1304
1305 /* buffered writes: */
1306
1307 int bch2_write_begin(struct file *file, struct address_space *mapping,
1308                      loff_t pos, unsigned len, unsigned flags,
1309                      struct page **pagep, void **fsdata)
1310 {
1311         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1312         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1313         struct bch2_page_reservation *res;
1314         pgoff_t index = pos >> PAGE_SHIFT;
1315         unsigned offset = pos & (PAGE_SIZE - 1);
1316         struct page *page;
1317         int ret = -ENOMEM;
1318
1319         res = kmalloc(sizeof(*res), GFP_KERNEL);
1320         if (!res)
1321                 return -ENOMEM;
1322
1323         bch2_page_reservation_init(c, inode, res);
1324         *fsdata = res;
1325
1326         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1327
1328         page = grab_cache_page_write_begin(mapping, index, flags);
1329         if (!page)
1330                 goto err_unlock;
1331
1332         if (PageUptodate(page))
1333                 goto out;
1334
1335         /* If we're writing entire page, don't need to read it in first: */
1336         if (len == PAGE_SIZE)
1337                 goto out;
1338
1339         if (!offset && pos + len >= inode->v.i_size) {
1340                 zero_user_segment(page, len, PAGE_SIZE);
1341                 flush_dcache_page(page);
1342                 goto out;
1343         }
1344
1345         if (index > inode->v.i_size >> PAGE_SHIFT) {
1346                 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1347                 flush_dcache_page(page);
1348                 goto out;
1349         }
1350 readpage:
1351         ret = bch2_read_single_page(page, mapping);
1352         if (ret)
1353                 goto err;
1354 out:
1355         ret = bch2_page_reservation_get(c, inode, page, res,
1356                                         offset, len, true);
1357         if (ret) {
1358                 if (!PageUptodate(page)) {
1359                         /*
1360                          * If the page hasn't been read in, we won't know if we
1361                          * actually need a reservation - we don't actually need
1362                          * to read here, we just need to check if the page is
1363                          * fully backed by uncompressed data:
1364                          */
1365                         goto readpage;
1366                 }
1367
1368                 goto err;
1369         }
1370
1371         *pagep = page;
1372         return 0;
1373 err:
1374         unlock_page(page);
1375         put_page(page);
1376         *pagep = NULL;
1377 err_unlock:
1378         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1379         kfree(res);
1380         *fsdata = NULL;
1381         return ret;
1382 }
1383
1384 int bch2_write_end(struct file *file, struct address_space *mapping,
1385                    loff_t pos, unsigned len, unsigned copied,
1386                    struct page *page, void *fsdata)
1387 {
1388         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1389         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1390         struct bch2_page_reservation *res = fsdata;
1391         unsigned offset = pos & (PAGE_SIZE - 1);
1392
1393         lockdep_assert_held(&inode->v.i_rwsem);
1394
1395         if (unlikely(copied < len && !PageUptodate(page))) {
1396                 /*
1397                  * The page needs to be read in, but that would destroy
1398                  * our partial write - simplest thing is to just force
1399                  * userspace to redo the write:
1400                  */
1401                 zero_user(page, 0, PAGE_SIZE);
1402                 flush_dcache_page(page);
1403                 copied = 0;
1404         }
1405
1406         spin_lock(&inode->v.i_lock);
1407         if (pos + copied > inode->v.i_size)
1408                 i_size_write(&inode->v, pos + copied);
1409         spin_unlock(&inode->v.i_lock);
1410
1411         if (copied) {
1412                 if (!PageUptodate(page))
1413                         SetPageUptodate(page);
1414
1415                 bch2_set_page_dirty(c, inode, page, res, offset, copied);
1416
1417                 inode->ei_last_dirtied = (unsigned long) current;
1418         }
1419
1420         unlock_page(page);
1421         put_page(page);
1422         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1423
1424         bch2_page_reservation_put(c, inode, res);
1425         kfree(res);
1426
1427         return copied;
1428 }
1429
1430 #define WRITE_BATCH_PAGES       32
1431
1432 static int __bch2_buffered_write(struct bch_inode_info *inode,
1433                                  struct address_space *mapping,
1434                                  struct iov_iter *iter,
1435                                  loff_t pos, unsigned len)
1436 {
1437         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1438         struct page *pages[WRITE_BATCH_PAGES];
1439         struct bch2_page_reservation res;
1440         unsigned long index = pos >> PAGE_SHIFT;
1441         unsigned offset = pos & (PAGE_SIZE - 1);
1442         unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1443         unsigned i, reserved = 0, set_dirty = 0;
1444         unsigned copied = 0, nr_pages_copied = 0;
1445         int ret = 0;
1446
1447         BUG_ON(!len);
1448         BUG_ON(nr_pages > ARRAY_SIZE(pages));
1449
1450         bch2_page_reservation_init(c, inode, &res);
1451
1452         for (i = 0; i < nr_pages; i++) {
1453                 pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
1454                 if (!pages[i]) {
1455                         nr_pages = i;
1456                         if (!i) {
1457                                 ret = -ENOMEM;
1458                                 goto out;
1459                         }
1460                         len = min_t(unsigned, len,
1461                                     nr_pages * PAGE_SIZE - offset);
1462                         break;
1463                 }
1464         }
1465
1466         if (offset && !PageUptodate(pages[0])) {
1467                 ret = bch2_read_single_page(pages[0], mapping);
1468                 if (ret)
1469                         goto out;
1470         }
1471
1472         if ((pos + len) & (PAGE_SIZE - 1) &&
1473             !PageUptodate(pages[nr_pages - 1])) {
1474                 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1475                         zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1476                 } else {
1477                         ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1478                         if (ret)
1479                                 goto out;
1480                 }
1481         }
1482
1483         while (reserved < len) {
1484                 struct page *page = pages[(offset + reserved) >> PAGE_SHIFT];
1485                 unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
1486                 unsigned pg_len = min_t(unsigned, len - reserved,
1487                                         PAGE_SIZE - pg_offset);
1488 retry_reservation:
1489                 ret = bch2_page_reservation_get(c, inode, page, &res,
1490                                                 pg_offset, pg_len, true);
1491
1492                 if (ret && !PageUptodate(page)) {
1493                         ret = bch2_read_single_page(page, mapping);
1494                         if (!ret)
1495                                 goto retry_reservation;
1496                 }
1497
1498                 if (ret)
1499                         goto out;
1500
1501                 reserved += pg_len;
1502         }
1503
1504         if (mapping_writably_mapped(mapping))
1505                 for (i = 0; i < nr_pages; i++)
1506                         flush_dcache_page(pages[i]);
1507
1508         while (copied < len) {
1509                 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1510                 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1511                 unsigned pg_len = min_t(unsigned, len - copied,
1512                                         PAGE_SIZE - pg_offset);
1513                 unsigned pg_copied = iov_iter_copy_from_user_atomic(page,
1514                                                 iter, pg_offset, pg_len);
1515
1516                 if (!pg_copied)
1517                         break;
1518
1519                 flush_dcache_page(page);
1520                 iov_iter_advance(iter, pg_copied);
1521                 copied += pg_copied;
1522         }
1523
1524         if (!copied)
1525                 goto out;
1526
1527         if (copied < len &&
1528             ((offset + copied) & (PAGE_SIZE - 1))) {
1529                 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1530
1531                 if (!PageUptodate(page)) {
1532                         zero_user(page, 0, PAGE_SIZE);
1533                         copied -= (offset + copied) & (PAGE_SIZE - 1);
1534                 }
1535         }
1536
1537         spin_lock(&inode->v.i_lock);
1538         if (pos + copied > inode->v.i_size)
1539                 i_size_write(&inode->v, pos + copied);
1540         spin_unlock(&inode->v.i_lock);
1541
1542         while (set_dirty < copied) {
1543                 struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
1544                 unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
1545                 unsigned pg_len = min_t(unsigned, copied - set_dirty,
1546                                         PAGE_SIZE - pg_offset);
1547
1548                 if (!PageUptodate(page))
1549                         SetPageUptodate(page);
1550
1551                 bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len);
1552                 unlock_page(page);
1553                 put_page(page);
1554
1555                 set_dirty += pg_len;
1556         }
1557
1558         nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1559         inode->ei_last_dirtied = (unsigned long) current;
1560 out:
1561         for (i = nr_pages_copied; i < nr_pages; i++) {
1562                 unlock_page(pages[i]);
1563                 put_page(pages[i]);
1564         }
1565
1566         bch2_page_reservation_put(c, inode, &res);
1567
1568         return copied ?: ret;
1569 }
1570
1571 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1572 {
1573         struct file *file = iocb->ki_filp;
1574         struct address_space *mapping = file->f_mapping;
1575         struct bch_inode_info *inode = file_bch_inode(file);
1576         loff_t pos = iocb->ki_pos;
1577         ssize_t written = 0;
1578         int ret = 0;
1579
1580         bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1581
1582         do {
1583                 unsigned offset = pos & (PAGE_SIZE - 1);
1584                 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1585                               PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1586 again:
1587                 /*
1588                  * Bring in the user page that we will copy from _first_.
1589                  * Otherwise there's a nasty deadlock on copying from the
1590                  * same page as we're writing to, without it being marked
1591                  * up-to-date.
1592                  *
1593                  * Not only is this an optimisation, but it is also required
1594                  * to check that the address is actually valid, when atomic
1595                  * usercopies are used, below.
1596                  */
1597                 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1598                         bytes = min_t(unsigned long, iov_iter_count(iter),
1599                                       PAGE_SIZE - offset);
1600
1601                         if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1602                                 ret = -EFAULT;
1603                                 break;
1604                         }
1605                 }
1606
1607                 if (unlikely(fatal_signal_pending(current))) {
1608                         ret = -EINTR;
1609                         break;
1610                 }
1611
1612                 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1613                 if (unlikely(ret < 0))
1614                         break;
1615
1616                 cond_resched();
1617
1618                 if (unlikely(ret == 0)) {
1619                         /*
1620                          * If we were unable to copy any data at all, we must
1621                          * fall back to a single segment length write.
1622                          *
1623                          * If we didn't fallback here, we could livelock
1624                          * because not all segments in the iov can be copied at
1625                          * once without a pagefault.
1626                          */
1627                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
1628                                       iov_iter_single_seg_count(iter));
1629                         goto again;
1630                 }
1631                 pos += ret;
1632                 written += ret;
1633
1634                 balance_dirty_pages_ratelimited(mapping);
1635         } while (iov_iter_count(iter));
1636
1637         bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1638
1639         return written ? written : ret;
1640 }
1641
1642 /* O_DIRECT reads */
1643
1644 static void bch2_dio_read_complete(struct closure *cl)
1645 {
1646         struct dio_read *dio = container_of(cl, struct dio_read, cl);
1647
1648         dio->req->ki_complete(dio->req, dio->ret, 0);
1649         bio_check_pages_dirty(&dio->rbio.bio);  /* transfers ownership */
1650 }
1651
1652 static void bch2_direct_IO_read_endio(struct bio *bio)
1653 {
1654         struct dio_read *dio = bio->bi_private;
1655
1656         if (bio->bi_status)
1657                 dio->ret = blk_status_to_errno(bio->bi_status);
1658
1659         closure_put(&dio->cl);
1660 }
1661
1662 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1663 {
1664         bch2_direct_IO_read_endio(bio);
1665         bio_check_pages_dirty(bio);     /* transfers ownership */
1666 }
1667
1668 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1669 {
1670         struct file *file = req->ki_filp;
1671         struct bch_inode_info *inode = file_bch_inode(file);
1672         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1673         struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1674         struct dio_read *dio;
1675         struct bio *bio;
1676         loff_t offset = req->ki_pos;
1677         bool sync = is_sync_kiocb(req);
1678         size_t shorten;
1679         ssize_t ret;
1680
1681         if ((offset|iter->count) & (block_bytes(c) - 1))
1682                 return -EINVAL;
1683
1684         ret = min_t(loff_t, iter->count,
1685                     max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1686
1687         if (!ret)
1688                 return ret;
1689
1690         shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1691         iter->count -= shorten;
1692
1693         bio = bio_alloc_bioset(GFP_KERNEL,
1694                                iov_iter_npages(iter, BIO_MAX_PAGES),
1695                                &c->dio_read_bioset);
1696
1697         bio->bi_end_io = bch2_direct_IO_read_endio;
1698
1699         dio = container_of(bio, struct dio_read, rbio.bio);
1700         closure_init(&dio->cl, NULL);
1701
1702         /*
1703          * this is a _really_ horrible hack just to avoid an atomic sub at the
1704          * end:
1705          */
1706         if (!sync) {
1707                 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1708                 atomic_set(&dio->cl.remaining,
1709                            CLOSURE_REMAINING_INITIALIZER -
1710                            CLOSURE_RUNNING +
1711                            CLOSURE_DESTRUCTOR);
1712         } else {
1713                 atomic_set(&dio->cl.remaining,
1714                            CLOSURE_REMAINING_INITIALIZER + 1);
1715         }
1716
1717         dio->req        = req;
1718         dio->ret        = ret;
1719
1720         goto start;
1721         while (iter->count) {
1722                 bio = bio_alloc_bioset(GFP_KERNEL,
1723                                        iov_iter_npages(iter, BIO_MAX_PAGES),
1724                                        &c->bio_read);
1725                 bio->bi_end_io          = bch2_direct_IO_read_split_endio;
1726 start:
1727                 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1728                 bio->bi_iter.bi_sector  = offset >> 9;
1729                 bio->bi_private         = dio;
1730
1731                 ret = bio_iov_iter_get_pages(bio, iter);
1732                 if (ret < 0) {
1733                         /* XXX: fault inject this path */
1734                         bio->bi_status = BLK_STS_RESOURCE;
1735                         bio_endio(bio);
1736                         break;
1737                 }
1738
1739                 offset += bio->bi_iter.bi_size;
1740                 bio_set_pages_dirty(bio);
1741
1742                 if (iter->count)
1743                         closure_get(&dio->cl);
1744
1745                 bch2_read(c, rbio_init(bio, opts), inode->v.i_ino);
1746         }
1747
1748         iter->count += shorten;
1749
1750         if (sync) {
1751                 closure_sync(&dio->cl);
1752                 closure_debug_destroy(&dio->cl);
1753                 ret = dio->ret;
1754                 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1755                 return ret;
1756         } else {
1757                 return -EIOCBQUEUED;
1758         }
1759 }
1760
1761 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1762 {
1763         struct file *file = iocb->ki_filp;
1764         struct bch_inode_info *inode = file_bch_inode(file);
1765         struct address_space *mapping = file->f_mapping;
1766         size_t count = iov_iter_count(iter);
1767         ssize_t ret;
1768
1769         if (!count)
1770                 return 0; /* skip atime */
1771
1772         if (iocb->ki_flags & IOCB_DIRECT) {
1773                 struct blk_plug plug;
1774
1775                 ret = filemap_write_and_wait_range(mapping,
1776                                         iocb->ki_pos,
1777                                         iocb->ki_pos + count - 1);
1778                 if (ret < 0)
1779                         return ret;
1780
1781                 file_accessed(file);
1782
1783                 blk_start_plug(&plug);
1784                 ret = bch2_direct_IO_read(iocb, iter);
1785                 blk_finish_plug(&plug);
1786
1787                 if (ret >= 0)
1788                         iocb->ki_pos += ret;
1789         } else {
1790                 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1791                 ret = generic_file_read_iter(iocb, iter);
1792                 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1793         }
1794
1795         return ret;
1796 }
1797
1798 /* O_DIRECT writes */
1799
1800 static void bch2_dio_write_loop_async(struct bch_write_op *);
1801
1802 static long bch2_dio_write_loop(struct dio_write *dio)
1803 {
1804         bool kthread = (current->flags & PF_KTHREAD) != 0;
1805         struct kiocb *req = dio->req;
1806         struct address_space *mapping = req->ki_filp->f_mapping;
1807         struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
1808         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1809         struct bio *bio = &dio->op.wbio.bio;
1810         struct bvec_iter_all iter;
1811         struct bio_vec *bv;
1812         unsigned unaligned;
1813         bool sync = dio->sync;
1814         long ret;
1815
1816         if (dio->loop)
1817                 goto loop;
1818
1819         while (1) {
1820                 size_t extra = dio->iter.count -
1821                         min(BIO_MAX_PAGES * PAGE_SIZE, dio->iter.count);
1822
1823                 if (kthread)
1824                         use_mm(dio->mm);
1825                 BUG_ON(current->faults_disabled_mapping);
1826                 current->faults_disabled_mapping = mapping;
1827
1828                 /*
1829                  * Don't issue more than 2MB at once, the bcachefs io path in
1830                  * io.c can't bounce more than that:
1831                  */
1832
1833                 dio->iter.count -= extra;
1834                 ret = bio_iov_iter_get_pages(bio, &dio->iter);
1835                 dio->iter.count += extra;
1836
1837                 current->faults_disabled_mapping = NULL;
1838                 if (kthread)
1839                         unuse_mm(dio->mm);
1840
1841                 if (unlikely(ret < 0))
1842                         goto err;
1843
1844                 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
1845                 bio->bi_iter.bi_size -= unaligned;
1846                 iov_iter_revert(&dio->iter, unaligned);
1847
1848                 if (!bio->bi_iter.bi_size) {
1849                         /*
1850                          * bio_iov_iter_get_pages was only able to get <
1851                          * blocksize worth of pages:
1852                          */
1853                         bio_for_each_segment_all(bv, bio, iter)
1854                                 put_page(bv->bv_page);
1855                         ret = -EFAULT;
1856                         goto err;
1857                 }
1858
1859                 bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
1860                 dio->op.end_io          = bch2_dio_write_loop_async;
1861                 dio->op.target          = dio->op.opts.foreground_target;
1862                 op_journal_seq_set(&dio->op, &inode->ei_journal_seq);
1863                 dio->op.write_point     = writepoint_hashed((unsigned long) current);
1864                 dio->op.nr_replicas     = dio->op.opts.data_replicas;
1865                 dio->op.pos             = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
1866
1867                 if ((req->ki_flags & IOCB_DSYNC) &&
1868                     !c->opts.journal_flush_disabled)
1869                         dio->op.flags |= BCH_WRITE_FLUSH;
1870
1871                 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
1872                                                 dio->op.opts.data_replicas, 0);
1873                 if (unlikely(ret) &&
1874                     !bch2_check_range_allocated(c, dio->op.pos,
1875                                 bio_sectors(bio), dio->op.opts.data_replicas))
1876                         goto err;
1877
1878                 task_io_account_write(bio->bi_iter.bi_size);
1879
1880                 if (!dio->sync && !dio->loop && dio->iter.count) {
1881                         struct iovec *iov = dio->inline_vecs;
1882
1883                         if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1884                                 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
1885                                               GFP_KERNEL);
1886                                 if (unlikely(!iov)) {
1887                                         dio->sync = sync = true;
1888                                         goto do_io;
1889                                 }
1890
1891                                 dio->free_iov = true;
1892                         }
1893
1894                         memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
1895                         dio->iter.iov = iov;
1896                 }
1897 do_io:
1898                 dio->loop = true;
1899                 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
1900
1901                 if (sync)
1902                         wait_for_completion(&dio->done);
1903                 else
1904                         return -EIOCBQUEUED;
1905 loop:
1906                 i_sectors_acct(c, inode, &dio->quota_res,
1907                                dio->op.i_sectors_delta);
1908                 req->ki_pos += (u64) dio->op.written << 9;
1909                 dio->written += dio->op.written;
1910
1911                 spin_lock(&inode->v.i_lock);
1912                 if (req->ki_pos > inode->v.i_size)
1913                         i_size_write(&inode->v, req->ki_pos);
1914                 spin_unlock(&inode->v.i_lock);
1915
1916                 bio_for_each_segment_all(bv, bio, iter)
1917                         put_page(bv->bv_page);
1918                 if (!dio->iter.count || dio->op.error)
1919                         break;
1920
1921                 bio_reset(bio);
1922                 reinit_completion(&dio->done);
1923         }
1924
1925         ret = dio->op.error ?: ((long) dio->written << 9);
1926 err:
1927         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
1928         bch2_quota_reservation_put(c, inode, &dio->quota_res);
1929
1930         if (dio->free_iov)
1931                 kfree(dio->iter.iov);
1932
1933         bio_put(bio);
1934
1935         /* inode->i_dio_count is our ref on inode and thus bch_fs */
1936         inode_dio_end(&inode->v);
1937
1938         if (!sync) {
1939                 req->ki_complete(req, ret, 0);
1940                 ret = -EIOCBQUEUED;
1941         }
1942         return ret;
1943 }
1944
1945 static void bch2_dio_write_loop_async(struct bch_write_op *op)
1946 {
1947         struct dio_write *dio = container_of(op, struct dio_write, op);
1948
1949         if (dio->sync)
1950                 complete(&dio->done);
1951         else
1952                 bch2_dio_write_loop(dio);
1953 }
1954
1955 static noinline
1956 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
1957 {
1958         struct file *file = req->ki_filp;
1959         struct address_space *mapping = file->f_mapping;
1960         struct bch_inode_info *inode = file_bch_inode(file);
1961         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1962         struct dio_write *dio;
1963         struct bio *bio;
1964         bool locked = true, extending;
1965         ssize_t ret;
1966
1967         prefetch(&c->opts);
1968         prefetch((void *) &c->opts + 64);
1969         prefetch(&inode->ei_inode);
1970         prefetch((void *) &inode->ei_inode + 64);
1971
1972         inode_lock(&inode->v);
1973
1974         ret = generic_write_checks(req, iter);
1975         if (unlikely(ret <= 0))
1976                 goto err;
1977
1978         ret = file_remove_privs(file);
1979         if (unlikely(ret))
1980                 goto err;
1981
1982         ret = file_update_time(file);
1983         if (unlikely(ret))
1984                 goto err;
1985
1986         if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
1987                 goto err;
1988
1989         inode_dio_begin(&inode->v);
1990         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
1991
1992         extending = req->ki_pos + iter->count > inode->v.i_size;
1993         if (!extending) {
1994                 inode_unlock(&inode->v);
1995                 locked = false;
1996         }
1997
1998         bio = bio_alloc_bioset(GFP_KERNEL,
1999                                iov_iter_npages(iter, BIO_MAX_PAGES),
2000                                &c->dio_write_bioset);
2001         dio = container_of(bio, struct dio_write, op.wbio.bio);
2002         init_completion(&dio->done);
2003         dio->req                = req;
2004         dio->mm                 = current->mm;
2005         dio->loop               = false;
2006         dio->sync               = is_sync_kiocb(req) || extending;
2007         dio->free_iov           = false;
2008         dio->quota_res.sectors  = 0;
2009         dio->written            = 0;
2010         dio->iter               = *iter;
2011
2012         ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2013                                          iter->count >> 9, true);
2014         if (unlikely(ret))
2015                 goto err_put_bio;
2016
2017         ret = write_invalidate_inode_pages_range(mapping,
2018                                         req->ki_pos,
2019                                         req->ki_pos + iter->count - 1);
2020         if (unlikely(ret))
2021                 goto err_put_bio;
2022
2023         ret = bch2_dio_write_loop(dio);
2024 err:
2025         if (locked)
2026                 inode_unlock(&inode->v);
2027         return ret;
2028 err_put_bio:
2029         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2030         bch2_quota_reservation_put(c, inode, &dio->quota_res);
2031         bio_put(bio);
2032         inode_dio_end(&inode->v);
2033         goto err;
2034 }
2035
2036 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2037 {
2038         struct file *file = iocb->ki_filp;
2039         struct bch_inode_info *inode = file_bch_inode(file);
2040         ssize_t ret;
2041
2042         if (iocb->ki_flags & IOCB_DIRECT)
2043                 return bch2_direct_write(iocb, from);
2044
2045         /* We can write back this queue in page reclaim */
2046         current->backing_dev_info = inode_to_bdi(&inode->v);
2047         inode_lock(&inode->v);
2048
2049         ret = generic_write_checks(iocb, from);
2050         if (ret <= 0)
2051                 goto unlock;
2052
2053         ret = file_remove_privs(file);
2054         if (ret)
2055                 goto unlock;
2056
2057         ret = file_update_time(file);
2058         if (ret)
2059                 goto unlock;
2060
2061         ret = bch2_buffered_write(iocb, from);
2062         if (likely(ret > 0))
2063                 iocb->ki_pos += ret;
2064 unlock:
2065         inode_unlock(&inode->v);
2066         current->backing_dev_info = NULL;
2067
2068         if (ret > 0)
2069                 ret = generic_write_sync(iocb, ret);
2070
2071         return ret;
2072 }
2073
2074 /* fsync: */
2075
2076 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2077 {
2078         struct bch_inode_info *inode = file_bch_inode(file);
2079         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2080         int ret, ret2;
2081
2082         ret = file_write_and_wait_range(file, start, end);
2083         if (ret)
2084                 return ret;
2085
2086         if (datasync && !(inode->v.i_state & I_DIRTY_DATASYNC))
2087                 goto out;
2088
2089         ret = sync_inode_metadata(&inode->v, 1);
2090         if (ret)
2091                 return ret;
2092 out:
2093         if (!c->opts.journal_flush_disabled)
2094                 ret = bch2_journal_flush_seq(&c->journal,
2095                                              inode->ei_journal_seq);
2096         ret2 = file_check_and_advance_wb_err(file);
2097
2098         return ret ?: ret2;
2099 }
2100
2101 /* truncate: */
2102
2103 static inline int range_has_data(struct bch_fs *c,
2104                                   struct bpos start,
2105                                   struct bpos end)
2106 {
2107         struct btree_trans trans;
2108         struct btree_iter *iter;
2109         struct bkey_s_c k;
2110         int ret = 0;
2111
2112         bch2_trans_init(&trans, c, 0, 0);
2113
2114         for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) {
2115                 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2116                         break;
2117
2118                 if (bkey_extent_is_data(k.k)) {
2119                         ret = 1;
2120                         break;
2121                 }
2122         }
2123
2124         return bch2_trans_exit(&trans) ?: ret;
2125 }
2126
2127 static int __bch2_truncate_page(struct bch_inode_info *inode,
2128                                 pgoff_t index, loff_t start, loff_t end)
2129 {
2130         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2131         struct address_space *mapping = inode->v.i_mapping;
2132         struct bch_page_state *s;
2133         unsigned start_offset = start & (PAGE_SIZE - 1);
2134         unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2135         unsigned i;
2136         struct page *page;
2137         int ret = 0;
2138
2139         /* Page boundary? Nothing to do */
2140         if (!((index == start >> PAGE_SHIFT && start_offset) ||
2141               (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2142                 return 0;
2143
2144         /* Above i_size? */
2145         if (index << PAGE_SHIFT >= inode->v.i_size)
2146                 return 0;
2147
2148         page = find_lock_page(mapping, index);
2149         if (!page) {
2150                 /*
2151                  * XXX: we're doing two index lookups when we end up reading the
2152                  * page
2153                  */
2154                 ret = range_has_data(c,
2155                                 POS(inode->v.i_ino, index << PAGE_SECTOR_SHIFT),
2156                                 POS(inode->v.i_ino, (index + 1) << PAGE_SECTOR_SHIFT));
2157                 if (ret <= 0)
2158                         return ret;
2159
2160                 page = find_or_create_page(mapping, index, GFP_KERNEL);
2161                 if (unlikely(!page)) {
2162                         ret = -ENOMEM;
2163                         goto out;
2164                 }
2165         }
2166
2167         s = bch2_page_state_create(page, 0);
2168         if (!s) {
2169                 ret = -ENOMEM;
2170                 goto unlock;
2171         }
2172
2173         if (!PageUptodate(page)) {
2174                 ret = bch2_read_single_page(page, mapping);
2175                 if (ret)
2176                         goto unlock;
2177         }
2178
2179         if (index != start >> PAGE_SHIFT)
2180                 start_offset = 0;
2181         if (index != end >> PAGE_SHIFT)
2182                 end_offset = PAGE_SIZE;
2183
2184         for (i = round_up(start_offset, block_bytes(c)) >> 9;
2185              i < round_down(end_offset, block_bytes(c)) >> 9;
2186              i++) {
2187                 s->s[i].nr_replicas     = 0;
2188                 s->s[i].state           = SECTOR_UNALLOCATED;
2189         }
2190
2191         zero_user_segment(page, start_offset, end_offset);
2192
2193         /*
2194          * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2195          *
2196          * XXX: because we aren't currently tracking whether the page has actual
2197          * data in it (vs. just 0s, or only partially written) this wrong. ick.
2198          */
2199         ret = bch2_get_page_disk_reservation(c, inode, page, false);
2200         BUG_ON(ret);
2201
2202         __set_page_dirty_nobuffers(page);
2203 unlock:
2204         unlock_page(page);
2205         put_page(page);
2206 out:
2207         return ret;
2208 }
2209
2210 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2211 {
2212         return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2213                                     from, round_up(from, PAGE_SIZE));
2214 }
2215
2216 static int bch2_extend(struct bch_inode_info *inode,
2217                        struct bch_inode_unpacked *inode_u,
2218                        struct iattr *iattr)
2219 {
2220         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2221         struct address_space *mapping = inode->v.i_mapping;
2222         int ret;
2223
2224         /*
2225          * sync appends:
2226          *
2227          * this has to be done _before_ extending i_size:
2228          */
2229         ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2230         if (ret)
2231                 return ret;
2232
2233         truncate_setsize(&inode->v, iattr->ia_size);
2234         setattr_copy(&inode->v, iattr);
2235
2236         mutex_lock(&inode->ei_update_lock);
2237         ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2238                                     ATTR_MTIME|ATTR_CTIME);
2239         mutex_unlock(&inode->ei_update_lock);
2240
2241         return ret;
2242 }
2243
2244 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2245                                    struct bch_inode_unpacked *bi,
2246                                    void *p)
2247 {
2248         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2249
2250         bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2251         bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2252         return 0;
2253 }
2254
2255 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2256                                   struct bch_inode_unpacked *bi, void *p)
2257 {
2258         u64 *new_i_size = p;
2259
2260         bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2261         bi->bi_size = *new_i_size;
2262         return 0;
2263 }
2264
2265 int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
2266 {
2267         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2268         struct address_space *mapping = inode->v.i_mapping;
2269         struct bch_inode_unpacked inode_u;
2270         struct btree_trans trans;
2271         struct btree_iter *iter;
2272         u64 new_i_size = iattr->ia_size;
2273         s64 i_sectors_delta = 0;
2274         int ret = 0;
2275
2276         inode_dio_wait(&inode->v);
2277         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2278
2279         /*
2280          * fetch current on disk i_size: inode is locked, i_size can only
2281          * increase underneath us:
2282          */
2283         bch2_trans_init(&trans, c, 0, 0);
2284         iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino, 0);
2285         ret = PTR_ERR_OR_ZERO(iter);
2286         bch2_trans_exit(&trans);
2287
2288         if (ret)
2289                 goto err;
2290
2291         /*
2292          * check this before next assertion; on filesystem error our normal
2293          * invariants are a bit broken (truncate has to truncate the page cache
2294          * before the inode).
2295          */
2296         ret = bch2_journal_error(&c->journal);
2297         if (ret)
2298                 goto err;
2299
2300         BUG_ON(inode->v.i_size < inode_u.bi_size);
2301
2302         if (iattr->ia_size > inode->v.i_size) {
2303                 ret = bch2_extend(inode, &inode_u, iattr);
2304                 goto err;
2305         }
2306
2307         ret = bch2_truncate_page(inode, iattr->ia_size);
2308         if (unlikely(ret))
2309                 goto err;
2310
2311         /*
2312          * When extending, we're going to write the new i_size to disk
2313          * immediately so we need to flush anything above the current on disk
2314          * i_size first:
2315          *
2316          * Also, when extending we need to flush the page that i_size currently
2317          * straddles - if it's mapped to userspace, we need to ensure that
2318          * userspace has to redirty it and call .mkwrite -> set_page_dirty
2319          * again to allocate the part of the page that was extended.
2320          */
2321         if (iattr->ia_size > inode_u.bi_size)
2322                 ret = filemap_write_and_wait_range(mapping,
2323                                 inode_u.bi_size,
2324                                 iattr->ia_size - 1);
2325         else if (iattr->ia_size & (PAGE_SIZE - 1))
2326                 ret = filemap_write_and_wait_range(mapping,
2327                                 round_down(iattr->ia_size, PAGE_SIZE),
2328                                 iattr->ia_size - 1);
2329         if (ret)
2330                 goto err;
2331
2332         mutex_lock(&inode->ei_update_lock);
2333         ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2334                                &new_i_size, 0);
2335         mutex_unlock(&inode->ei_update_lock);
2336
2337         if (unlikely(ret))
2338                 goto err;
2339
2340         truncate_setsize(&inode->v, iattr->ia_size);
2341
2342         ret = bch2_fpunch(c, inode->v.i_ino,
2343                         round_up(iattr->ia_size, block_bytes(c)) >> 9,
2344                         U64_MAX, &inode->ei_journal_seq, &i_sectors_delta);
2345         i_sectors_acct(c, inode, NULL, i_sectors_delta);
2346
2347         if (unlikely(ret))
2348                 goto err;
2349
2350         setattr_copy(&inode->v, iattr);
2351
2352         mutex_lock(&inode->ei_update_lock);
2353         ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL,
2354                                ATTR_MTIME|ATTR_CTIME);
2355         mutex_unlock(&inode->ei_update_lock);
2356 err:
2357         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2358         return ret;
2359 }
2360
2361 /* fallocate: */
2362
2363 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2364 {
2365         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2366         u64 discard_start = round_up(offset, block_bytes(c)) >> 9;
2367         u64 discard_end = round_down(offset + len, block_bytes(c)) >> 9;
2368         int ret = 0;
2369
2370         inode_lock(&inode->v);
2371         inode_dio_wait(&inode->v);
2372         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2373
2374         ret = __bch2_truncate_page(inode,
2375                                    offset >> PAGE_SHIFT,
2376                                    offset, offset + len);
2377         if (unlikely(ret))
2378                 goto err;
2379
2380         if (offset >> PAGE_SHIFT !=
2381             (offset + len) >> PAGE_SHIFT) {
2382                 ret = __bch2_truncate_page(inode,
2383                                            (offset + len) >> PAGE_SHIFT,
2384                                            offset, offset + len);
2385                 if (unlikely(ret))
2386                         goto err;
2387         }
2388
2389         truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2390
2391         if (discard_start < discard_end) {
2392                 s64 i_sectors_delta = 0;
2393
2394                 ret = bch2_fpunch(c, inode->v.i_ino,
2395                                   discard_start, discard_end,
2396                                   &inode->ei_journal_seq,
2397                                   &i_sectors_delta);
2398                 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2399         }
2400 err:
2401         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2402         inode_unlock(&inode->v);
2403
2404         return ret;
2405 }
2406
2407 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
2408                                    loff_t offset, loff_t len,
2409                                    bool insert)
2410 {
2411         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2412         struct address_space *mapping = inode->v.i_mapping;
2413         struct bkey_on_stack copy;
2414         struct btree_trans trans;
2415         struct btree_iter *src, *dst;
2416         loff_t shift, new_size;
2417         u64 src_start;
2418         int ret;
2419
2420         if ((offset | len) & (block_bytes(c) - 1))
2421                 return -EINVAL;
2422
2423         bkey_on_stack_init(&copy);
2424         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
2425
2426         /*
2427          * We need i_mutex to keep the page cache consistent with the extents
2428          * btree, and the btree consistent with i_size - we don't need outside
2429          * locking for the extents btree itself, because we're using linked
2430          * iterators
2431          */
2432         inode_lock(&inode->v);
2433         inode_dio_wait(&inode->v);
2434         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2435
2436         if (insert) {
2437                 ret = -EFBIG;
2438                 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
2439                         goto err;
2440
2441                 ret = -EINVAL;
2442                 if (offset >= inode->v.i_size)
2443                         goto err;
2444
2445                 src_start       = U64_MAX;
2446                 shift           = len;
2447         } else {
2448                 ret = -EINVAL;
2449                 if (offset + len >= inode->v.i_size)
2450                         goto err;
2451
2452                 src_start       = offset + len;
2453                 shift           = -len;
2454         }
2455
2456         new_size = inode->v.i_size + shift;
2457
2458         ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2459         if (ret)
2460                 goto err;
2461
2462         if (insert) {
2463                 i_size_write(&inode->v, new_size);
2464                 mutex_lock(&inode->ei_update_lock);
2465                 ret = bch2_write_inode_size(c, inode, new_size,
2466                                             ATTR_MTIME|ATTR_CTIME);
2467                 mutex_unlock(&inode->ei_update_lock);
2468         } else {
2469                 s64 i_sectors_delta = 0;
2470
2471                 ret = bch2_fpunch(c, inode->v.i_ino,
2472                                   offset >> 9, (offset + len) >> 9,
2473                                   &inode->ei_journal_seq,
2474                                   &i_sectors_delta);
2475                 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2476
2477                 if (ret)
2478                         goto err;
2479         }
2480
2481         src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2482                         POS(inode->v.i_ino, src_start >> 9),
2483                         BTREE_ITER_INTENT);
2484         BUG_ON(IS_ERR_OR_NULL(src));
2485
2486         dst = bch2_trans_copy_iter(&trans, src);
2487         BUG_ON(IS_ERR_OR_NULL(dst));
2488
2489         while (1) {
2490                 struct disk_reservation disk_res =
2491                         bch2_disk_reservation_init(c, 0);
2492                 struct bkey_i delete;
2493                 struct bkey_s_c k;
2494                 struct bpos next_pos;
2495                 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
2496                 struct bpos atomic_end;
2497                 unsigned trigger_flags = 0;
2498
2499                 k = insert
2500                         ? bch2_btree_iter_peek_prev(src)
2501                         : bch2_btree_iter_peek(src);
2502                 if ((ret = bkey_err(k)))
2503                         goto bkey_err;
2504
2505                 if (!k.k || k.k->p.inode != inode->v.i_ino)
2506                         break;
2507
2508                 BUG_ON(bkey_cmp(src->pos, bkey_start_pos(k.k)));
2509
2510                 if (insert &&
2511                     bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
2512                         break;
2513 reassemble:
2514                 bkey_on_stack_reassemble(&copy, c, k);
2515
2516                 if (insert &&
2517                     bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
2518                         bch2_cut_front(move_pos, copy.k);
2519
2520                 copy.k->k.p.offset += shift >> 9;
2521                 bch2_btree_iter_set_pos(dst, bkey_start_pos(&copy.k->k));
2522
2523                 ret = bch2_extent_atomic_end(dst, copy.k, &atomic_end);
2524                 if (ret)
2525                         goto bkey_err;
2526
2527                 if (bkey_cmp(atomic_end, copy.k->k.p)) {
2528                         if (insert) {
2529                                 move_pos = atomic_end;
2530                                 move_pos.offset -= shift >> 9;
2531                                 goto reassemble;
2532                         } else {
2533                                 bch2_cut_back(atomic_end, copy.k);
2534                         }
2535                 }
2536
2537                 bkey_init(&delete.k);
2538                 delete.k.p = copy.k->k.p;
2539                 delete.k.size = copy.k->k.size;
2540                 delete.k.p.offset -= shift >> 9;
2541
2542                 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
2543
2544                 if (copy.k->k.size == k.k->size) {
2545                         /*
2546                          * If we're moving the entire extent, we can skip
2547                          * running triggers:
2548                          */
2549                         trigger_flags |= BTREE_TRIGGER_NORUN;
2550                 } else {
2551                         /* We might end up splitting compressed extents: */
2552                         unsigned nr_ptrs =
2553                                 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
2554
2555                         ret = bch2_disk_reservation_get(c, &disk_res,
2556                                         copy.k->k.size, nr_ptrs,
2557                                         BCH_DISK_RESERVATION_NOFAIL);
2558                         BUG_ON(ret);
2559                 }
2560
2561                 bch2_btree_iter_set_pos(src, bkey_start_pos(&delete.k));
2562
2563                 ret =   bch2_trans_update(&trans, src, &delete, trigger_flags) ?:
2564                         bch2_trans_update(&trans, dst, copy.k, trigger_flags) ?:
2565                         bch2_trans_commit(&trans, &disk_res,
2566                                           &inode->ei_journal_seq,
2567                                           BTREE_INSERT_NOFAIL);
2568                 bch2_disk_reservation_put(c, &disk_res);
2569 bkey_err:
2570                 if (!ret)
2571                         bch2_btree_iter_set_pos(src, next_pos);
2572
2573                 if (ret == -EINTR)
2574                         ret = 0;
2575                 if (ret)
2576                         goto err;
2577
2578                 bch2_trans_cond_resched(&trans);
2579         }
2580         bch2_trans_unlock(&trans);
2581
2582         if (!insert) {
2583                 i_size_write(&inode->v, new_size);
2584                 mutex_lock(&inode->ei_update_lock);
2585                 ret = bch2_write_inode_size(c, inode, new_size,
2586                                             ATTR_MTIME|ATTR_CTIME);
2587                 mutex_unlock(&inode->ei_update_lock);
2588         }
2589 err:
2590         bch2_trans_exit(&trans);
2591         bkey_on_stack_exit(&copy, c);
2592         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2593         inode_unlock(&inode->v);
2594         return ret;
2595 }
2596
2597 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
2598                             loff_t offset, loff_t len)
2599 {
2600         struct address_space *mapping = inode->v.i_mapping;
2601         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2602         struct btree_trans trans;
2603         struct btree_iter *iter;
2604         struct bpos end_pos;
2605         loff_t end              = offset + len;
2606         loff_t block_start      = round_down(offset,    block_bytes(c));
2607         loff_t block_end        = round_up(end,         block_bytes(c));
2608         unsigned sectors;
2609         unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
2610         int ret;
2611
2612         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
2613
2614         inode_lock(&inode->v);
2615         inode_dio_wait(&inode->v);
2616         bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2617
2618         if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
2619                 ret = inode_newsize_ok(&inode->v, end);
2620                 if (ret)
2621                         goto err;
2622         }
2623
2624         if (mode & FALLOC_FL_ZERO_RANGE) {
2625                 ret = __bch2_truncate_page(inode,
2626                                            offset >> PAGE_SHIFT,
2627                                            offset, end);
2628
2629                 if (!ret &&
2630                     offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
2631                         ret = __bch2_truncate_page(inode,
2632                                                    end >> PAGE_SHIFT,
2633                                                    offset, end);
2634
2635                 if (unlikely(ret))
2636                         goto err;
2637
2638                 truncate_pagecache_range(&inode->v, offset, end - 1);
2639         }
2640
2641         iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2642                         POS(inode->v.i_ino, block_start >> 9),
2643                         BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2644         end_pos = POS(inode->v.i_ino, block_end >> 9);
2645
2646         while (bkey_cmp(iter->pos, end_pos) < 0) {
2647                 s64 i_sectors_delta = 0;
2648                 struct disk_reservation disk_res = { 0 };
2649                 struct quota_res quota_res = { 0 };
2650                 struct bkey_i_reservation reservation;
2651                 struct bkey_s_c k;
2652
2653                 bch2_trans_begin(&trans);
2654
2655                 k = bch2_btree_iter_peek_slot(iter);
2656                 if ((ret = bkey_err(k)))
2657                         goto bkey_err;
2658
2659                 /* already reserved */
2660                 if (k.k->type == KEY_TYPE_reservation &&
2661                     bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2662                         bch2_btree_iter_next_slot(iter);
2663                         continue;
2664                 }
2665
2666                 if (bkey_extent_is_data(k.k) &&
2667                     !(mode & FALLOC_FL_ZERO_RANGE)) {
2668                         bch2_btree_iter_next_slot(iter);
2669                         continue;
2670                 }
2671
2672                 bkey_reservation_init(&reservation.k_i);
2673                 reservation.k.type      = KEY_TYPE_reservation;
2674                 reservation.k.p         = k.k->p;
2675                 reservation.k.size      = k.k->size;
2676
2677                 bch2_cut_front(iter->pos,       &reservation.k_i);
2678                 bch2_cut_back(end_pos,          &reservation.k_i);
2679
2680                 sectors = reservation.k.size;
2681                 reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
2682
2683                 if (!bkey_extent_is_allocation(k.k)) {
2684                         ret = bch2_quota_reservation_add(c, inode,
2685                                         &quota_res,
2686                                         sectors, true);
2687                         if (unlikely(ret))
2688                                 goto bkey_err;
2689                 }
2690
2691                 if (reservation.v.nr_replicas < replicas ||
2692                     bch2_bkey_sectors_compressed(k)) {
2693                         ret = bch2_disk_reservation_get(c, &disk_res, sectors,
2694                                                         replicas, 0);
2695                         if (unlikely(ret))
2696                                 goto bkey_err;
2697
2698                         reservation.v.nr_replicas = disk_res.nr_replicas;
2699                 }
2700
2701                 ret = bch2_extent_update(&trans, iter, &reservation.k_i,
2702                                 &disk_res, &inode->ei_journal_seq,
2703                                 0, &i_sectors_delta);
2704                 i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
2705 bkey_err:
2706                 bch2_quota_reservation_put(c, inode, &quota_res);
2707                 bch2_disk_reservation_put(c, &disk_res);
2708                 if (ret == -EINTR)
2709                         ret = 0;
2710                 if (ret)
2711                         goto err;
2712         }
2713
2714         /*
2715          * Do we need to extend the file?
2716          *
2717          * If we zeroed up to the end of the file, we dropped whatever writes
2718          * were going to write out the current i_size, so we have to extend
2719          * manually even if FL_KEEP_SIZE was set:
2720          */
2721         if (end >= inode->v.i_size &&
2722             (!(mode & FALLOC_FL_KEEP_SIZE) ||
2723              (mode & FALLOC_FL_ZERO_RANGE))) {
2724                 struct btree_iter *inode_iter;
2725                 struct bch_inode_unpacked inode_u;
2726
2727                 do {
2728                         bch2_trans_begin(&trans);
2729                         inode_iter = bch2_inode_peek(&trans, &inode_u,
2730                                                      inode->v.i_ino, 0);
2731                         ret = PTR_ERR_OR_ZERO(inode_iter);
2732                 } while (ret == -EINTR);
2733
2734                 bch2_trans_unlock(&trans);
2735
2736                 if (ret)
2737                         goto err;
2738
2739                 /*
2740                  * Sync existing appends before extending i_size,
2741                  * as in bch2_extend():
2742                  */
2743                 ret = filemap_write_and_wait_range(mapping,
2744                                         inode_u.bi_size, S64_MAX);
2745                 if (ret)
2746                         goto err;
2747
2748                 if (mode & FALLOC_FL_KEEP_SIZE)
2749                         end = inode->v.i_size;
2750                 else
2751                         i_size_write(&inode->v, end);
2752
2753                 mutex_lock(&inode->ei_update_lock);
2754                 ret = bch2_write_inode_size(c, inode, end, 0);
2755                 mutex_unlock(&inode->ei_update_lock);
2756         }
2757 err:
2758         bch2_trans_exit(&trans);
2759         bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2760         inode_unlock(&inode->v);
2761         return ret;
2762 }
2763
2764 long bch2_fallocate_dispatch(struct file *file, int mode,
2765                              loff_t offset, loff_t len)
2766 {
2767         struct bch_inode_info *inode = file_bch_inode(file);
2768         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2769         long ret;
2770
2771         if (!percpu_ref_tryget(&c->writes))
2772                 return -EROFS;
2773
2774         if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2775                 ret = bchfs_fallocate(inode, mode, offset, len);
2776         else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2777                 ret = bchfs_fpunch(inode, offset, len);
2778         else if (mode == FALLOC_FL_INSERT_RANGE)
2779                 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
2780         else if (mode == FALLOC_FL_COLLAPSE_RANGE)
2781                 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
2782         else
2783                 ret = -EOPNOTSUPP;
2784
2785         percpu_ref_put(&c->writes);
2786
2787         return ret;
2788 }
2789
2790 static void mark_range_unallocated(struct bch_inode_info *inode,
2791                                    loff_t start, loff_t end)
2792 {
2793         pgoff_t index = start >> PAGE_SHIFT;
2794         pgoff_t end_index = (end - 1) >> PAGE_SHIFT;
2795         struct pagevec pvec;
2796
2797         pagevec_init(&pvec);
2798
2799         do {
2800                 unsigned nr_pages, i, j;
2801
2802                 nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
2803                                                 &index, end_index);
2804                 if (nr_pages == 0)
2805                         break;
2806
2807                 for (i = 0; i < nr_pages; i++) {
2808                         struct page *page = pvec.pages[i];
2809                         struct bch_page_state *s;
2810
2811                         lock_page(page);
2812                         s = bch2_page_state(page);
2813
2814                         if (s) {
2815                                 spin_lock(&s->lock);
2816                                 for (j = 0; j < PAGE_SECTORS; j++)
2817                                         s->s[j].nr_replicas = 0;
2818                                 spin_unlock(&s->lock);
2819                         }
2820
2821                         unlock_page(page);
2822                 }
2823                 pagevec_release(&pvec);
2824         } while (index <= end_index);
2825 }
2826
2827 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
2828                              struct file *file_dst, loff_t pos_dst,
2829                              loff_t len, unsigned remap_flags)
2830 {
2831         struct bch_inode_info *src = file_bch_inode(file_src);
2832         struct bch_inode_info *dst = file_bch_inode(file_dst);
2833         struct bch_fs *c = src->v.i_sb->s_fs_info;
2834         s64 i_sectors_delta = 0;
2835         u64 aligned_len;
2836         loff_t ret = 0;
2837
2838         if (!c->opts.reflink)
2839                 return -EOPNOTSUPP;
2840
2841         if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
2842                 return -EINVAL;
2843
2844         if (remap_flags & REMAP_FILE_DEDUP)
2845                 return -EOPNOTSUPP;
2846
2847         if ((pos_src & (block_bytes(c) - 1)) ||
2848             (pos_dst & (block_bytes(c) - 1)))
2849                 return -EINVAL;
2850
2851         if (src == dst &&
2852             abs(pos_src - pos_dst) < len)
2853                 return -EINVAL;
2854
2855         bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
2856
2857         file_update_time(file_dst);
2858
2859         inode_dio_wait(&src->v);
2860         inode_dio_wait(&dst->v);
2861
2862         ret = generic_remap_file_range_prep(file_src, pos_src,
2863                                             file_dst, pos_dst,
2864                                             &len, remap_flags);
2865         if (ret < 0 || len == 0)
2866                 goto err;
2867
2868         aligned_len = round_up((u64) len, block_bytes(c));
2869
2870         ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
2871                                 pos_dst, pos_dst + len - 1);
2872         if (ret)
2873                 goto err;
2874
2875         mark_range_unallocated(src, pos_src, pos_src + aligned_len);
2876
2877         ret = bch2_remap_range(c,
2878                                POS(dst->v.i_ino, pos_dst >> 9),
2879                                POS(src->v.i_ino, pos_src >> 9),
2880                                aligned_len >> 9,
2881                                &dst->ei_journal_seq,
2882                                pos_dst + len, &i_sectors_delta);
2883         if (ret < 0)
2884                 goto err;
2885
2886         /*
2887          * due to alignment, we might have remapped slightly more than requsted
2888          */
2889         ret = min((u64) ret << 9, (u64) len);
2890
2891         /* XXX get a quota reservation */
2892         i_sectors_acct(c, dst, NULL, i_sectors_delta);
2893
2894         spin_lock(&dst->v.i_lock);
2895         if (pos_dst + ret > dst->v.i_size)
2896                 i_size_write(&dst->v, pos_dst + ret);
2897         spin_unlock(&dst->v.i_lock);
2898 err:
2899         bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
2900
2901         return ret;
2902 }
2903
2904 /* fseek: */
2905
2906 static int page_data_offset(struct page *page, unsigned offset)
2907 {
2908         struct bch_page_state *s = bch2_page_state(page);
2909         unsigned i;
2910
2911         if (s)
2912                 for (i = offset >> 9; i < PAGE_SECTORS; i++)
2913                         if (s->s[i].state >= SECTOR_DIRTY)
2914                                 return i << 9;
2915
2916         return -1;
2917 }
2918
2919 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
2920                                        loff_t start_offset,
2921                                        loff_t end_offset)
2922 {
2923         struct address_space *mapping = vinode->i_mapping;
2924         struct page *page;
2925         pgoff_t start_index     = start_offset >> PAGE_SHIFT;
2926         pgoff_t end_index       = end_offset >> PAGE_SHIFT;
2927         pgoff_t index           = start_index;
2928         loff_t ret;
2929         int offset;
2930
2931         while (index <= end_index) {
2932                 if (find_get_pages_range(mapping, &index, end_index, 1, &page)) {
2933                         lock_page(page);
2934
2935                         offset = page_data_offset(page,
2936                                         page->index == start_index
2937                                         ? start_offset & (PAGE_SIZE - 1)
2938                                         : 0);
2939                         if (offset >= 0) {
2940                                 ret = clamp(((loff_t) page->index << PAGE_SHIFT) +
2941                                             offset,
2942                                             start_offset, end_offset);
2943                                 unlock_page(page);
2944                                 put_page(page);
2945                                 return ret;
2946                         }
2947
2948                         unlock_page(page);
2949                         put_page(page);
2950                 } else {
2951                         break;
2952                 }
2953         }
2954
2955         return end_offset;
2956 }
2957
2958 static loff_t bch2_seek_data(struct file *file, u64 offset)
2959 {
2960         struct bch_inode_info *inode = file_bch_inode(file);
2961         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2962         struct btree_trans trans;
2963         struct btree_iter *iter;
2964         struct bkey_s_c k;
2965         u64 isize, next_data = MAX_LFS_FILESIZE;
2966         int ret;
2967
2968         isize = i_size_read(&inode->v);
2969         if (offset >= isize)
2970                 return -ENXIO;
2971
2972         bch2_trans_init(&trans, c, 0, 0);
2973
2974         for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
2975                            POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
2976                 if (k.k->p.inode != inode->v.i_ino) {
2977                         break;
2978                 } else if (bkey_extent_is_data(k.k)) {
2979                         next_data = max(offset, bkey_start_offset(k.k) << 9);
2980                         break;
2981                 } else if (k.k->p.offset >> 9 > isize)
2982                         break;
2983         }
2984
2985         ret = bch2_trans_exit(&trans) ?: ret;
2986         if (ret)
2987                 return ret;
2988
2989         if (next_data > offset)
2990                 next_data = bch2_seek_pagecache_data(&inode->v,
2991                                                      offset, next_data);
2992
2993         if (next_data >= isize)
2994                 return -ENXIO;
2995
2996         return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
2997 }
2998
2999 static int __page_hole_offset(struct page *page, unsigned offset)
3000 {
3001         struct bch_page_state *s = bch2_page_state(page);
3002         unsigned i;
3003
3004         if (!s)
3005                 return 0;
3006
3007         for (i = offset >> 9; i < PAGE_SECTORS; i++)
3008                 if (s->s[i].state < SECTOR_DIRTY)
3009                         return i << 9;
3010
3011         return -1;
3012 }
3013
3014 static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
3015 {
3016         pgoff_t index = offset >> PAGE_SHIFT;
3017         struct page *page;
3018         int pg_offset;
3019         loff_t ret = -1;
3020
3021         page = find_lock_entry(mapping, index);
3022         if (!page || xa_is_value(page))
3023                 return offset;
3024
3025         pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
3026         if (pg_offset >= 0)
3027                 ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
3028
3029         unlock_page(page);
3030
3031         return ret;
3032 }
3033
3034 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3035                                        loff_t start_offset,
3036                                        loff_t end_offset)
3037 {
3038         struct address_space *mapping = vinode->i_mapping;
3039         loff_t offset = start_offset, hole;
3040
3041         while (offset < end_offset) {
3042                 hole = page_hole_offset(mapping, offset);
3043                 if (hole >= 0 && hole <= end_offset)
3044                         return max(start_offset, hole);
3045
3046                 offset += PAGE_SIZE;
3047                 offset &= PAGE_MASK;
3048         }
3049
3050         return end_offset;
3051 }
3052
3053 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3054 {
3055         struct bch_inode_info *inode = file_bch_inode(file);
3056         struct bch_fs *c = inode->v.i_sb->s_fs_info;
3057         struct btree_trans trans;
3058         struct btree_iter *iter;
3059         struct bkey_s_c k;
3060         u64 isize, next_hole = MAX_LFS_FILESIZE;
3061         int ret;
3062
3063         isize = i_size_read(&inode->v);
3064         if (offset >= isize)
3065                 return -ENXIO;
3066
3067         bch2_trans_init(&trans, c, 0, 0);
3068
3069         for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
3070                            POS(inode->v.i_ino, offset >> 9),
3071                            BTREE_ITER_SLOTS, k, ret) {
3072                 if (k.k->p.inode != inode->v.i_ino) {
3073                         next_hole = bch2_seek_pagecache_hole(&inode->v,
3074                                         offset, MAX_LFS_FILESIZE);
3075                         break;
3076                 } else if (!bkey_extent_is_data(k.k)) {
3077                         next_hole = bch2_seek_pagecache_hole(&inode->v,
3078                                         max(offset, bkey_start_offset(k.k) << 9),
3079                                         k.k->p.offset << 9);
3080
3081                         if (next_hole < k.k->p.offset << 9)
3082                                 break;
3083                 } else {
3084                         offset = max(offset, bkey_start_offset(k.k) << 9);
3085                 }
3086         }
3087
3088         ret = bch2_trans_exit(&trans) ?: ret;
3089         if (ret)
3090                 return ret;
3091
3092         if (next_hole > isize)
3093                 next_hole = isize;
3094
3095         return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3096 }
3097
3098 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3099 {
3100         switch (whence) {
3101         case SEEK_SET:
3102         case SEEK_CUR:
3103         case SEEK_END:
3104                 return generic_file_llseek(file, offset, whence);
3105         case SEEK_DATA:
3106                 return bch2_seek_data(file, offset);
3107         case SEEK_HOLE:
3108                 return bch2_seek_hole(file, offset);
3109         }
3110
3111         return -EINVAL;
3112 }
3113
3114 void bch2_fs_fsio_exit(struct bch_fs *c)
3115 {
3116         bioset_exit(&c->dio_write_bioset);
3117         bioset_exit(&c->dio_read_bioset);
3118         bioset_exit(&c->writepage_bioset);
3119 }
3120
3121 int bch2_fs_fsio_init(struct bch_fs *c)
3122 {
3123         int ret = 0;
3124
3125         pr_verbose_init(c->opts, "");
3126
3127         if (bioset_init(&c->writepage_bioset,
3128                         4, offsetof(struct bch_writepage_io, op.wbio.bio),
3129                         BIOSET_NEED_BVECS) ||
3130             bioset_init(&c->dio_read_bioset,
3131                         4, offsetof(struct dio_read, rbio.bio),
3132                         BIOSET_NEED_BVECS) ||
3133             bioset_init(&c->dio_write_bioset,
3134                         4, offsetof(struct dio_write, op.wbio.bio),
3135                         BIOSET_NEED_BVECS))
3136                 ret = -ENOMEM;
3137
3138         pr_verbose_init(c->opts, "ret %i", ret);
3139         return ret;
3140 }
3141
3142 #endif /* NO_BCACHEFS_FS */