]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/fs-io.c
721215ee00424b7403335ca1b4f8924d74b6a39f
[bcachefs-tools-debian] / libbcachefs / fs-io.c
1 #ifndef NO_BCACHEFS_FS
2
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "btree_update.h"
6 #include "buckets.h"
7 #include "clock.h"
8 #include "error.h"
9 #include "extents.h"
10 #include "fs.h"
11 #include "fs-io.h"
12 #include "fsck.h"
13 #include "inode.h"
14 #include "journal.h"
15 #include "io.h"
16 #include "keylist.h"
17 #include "quota.h"
18
19 #include <linux/aio.h>
20 #include <linux/backing-dev.h>
21 #include <linux/falloc.h>
22 #include <linux/migrate.h>
23 #include <linux/mmu_context.h>
24 #include <linux/pagevec.h>
25 #include <linux/sched/signal.h>
26 #include <linux/task_io_accounting_ops.h>
27 #include <linux/uio.h>
28 #include <linux/writeback.h>
29
30 #include <trace/events/bcachefs.h>
31 #include <trace/events/writeback.h>
32
33 struct quota_res {
34         u64                             sectors;
35 };
36
37 struct bchfs_write_op {
38         struct bch_inode_info           *inode;
39         s64                             sectors_added;
40         bool                            is_dio;
41         bool                            unalloc;
42         u64                             new_i_size;
43
44         /* must be last: */
45         struct bch_write_op             op;
46 };
47
48 struct bch_writepage_io {
49         struct closure                  cl;
50         u64                             new_sectors;
51
52         /* must be last: */
53         struct bchfs_write_op           op;
54 };
55
56 struct dio_write {
57         struct closure                  cl;
58         struct kiocb                    *req;
59         struct mm_struct                *mm;
60         unsigned                        loop:1,
61                                         sync:1,
62                                         free_iov:1;
63         struct quota_res                quota_res;
64
65         struct iov_iter                 iter;
66         struct iovec                    inline_vecs[2];
67
68         /* must be last: */
69         struct bchfs_write_op           iop;
70 };
71
72 struct dio_read {
73         struct closure                  cl;
74         struct kiocb                    *req;
75         long                            ret;
76         struct bch_read_bio             rbio;
77 };
78
79 /* pagecache_block must be held */
80 static int write_invalidate_inode_pages_range(struct address_space *mapping,
81                                               loff_t start, loff_t end)
82 {
83         int ret;
84
85         /*
86          * XXX: the way this is currently implemented, we can spin if a process
87          * is continually redirtying a specific page
88          */
89         do {
90                 if (!mapping->nrpages &&
91                     !mapping->nrexceptional)
92                         return 0;
93
94                 ret = filemap_write_and_wait_range(mapping, start, end);
95                 if (ret)
96                         break;
97
98                 if (!mapping->nrpages)
99                         return 0;
100
101                 ret = invalidate_inode_pages2_range(mapping,
102                                 start >> PAGE_SHIFT,
103                                 end >> PAGE_SHIFT);
104         } while (ret == -EBUSY);
105
106         return ret;
107 }
108
109 /* quotas */
110
111 #ifdef CONFIG_BCACHEFS_QUOTA
112
113 static void bch2_quota_reservation_put(struct bch_fs *c,
114                                        struct bch_inode_info *inode,
115                                        struct quota_res *res)
116 {
117         if (!res->sectors)
118                 return;
119
120         mutex_lock(&inode->ei_quota_lock);
121         BUG_ON(res->sectors > inode->ei_quota_reserved);
122
123         bch2_quota_acct(c, inode->ei_qid, Q_SPC,
124                         -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
125         inode->ei_quota_reserved -= res->sectors;
126         mutex_unlock(&inode->ei_quota_lock);
127
128         res->sectors = 0;
129 }
130
131 static int bch2_quota_reservation_add(struct bch_fs *c,
132                                       struct bch_inode_info *inode,
133                                       struct quota_res *res,
134                                       unsigned sectors,
135                                       bool check_enospc)
136 {
137         int ret;
138
139         mutex_lock(&inode->ei_quota_lock);
140         ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
141                               check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
142         if (likely(!ret)) {
143                 inode->ei_quota_reserved += sectors;
144                 res->sectors += sectors;
145         }
146         mutex_unlock(&inode->ei_quota_lock);
147
148         return ret;
149 }
150
151 #else
152
153 static void bch2_quota_reservation_put(struct bch_fs *c,
154                                        struct bch_inode_info *inode,
155                                        struct quota_res *res)
156 {
157 }
158
159 static int bch2_quota_reservation_add(struct bch_fs *c,
160                                       struct bch_inode_info *inode,
161                                       struct quota_res *res,
162                                       unsigned sectors,
163                                       bool check_enospc)
164 {
165         return 0;
166 }
167
168 #endif
169
170 /* i_size updates: */
171
172 struct inode_new_size {
173         loff_t          new_size;
174         u64             now;
175         unsigned        fields;
176 };
177
178 static int inode_set_size(struct bch_inode_info *inode,
179                           struct bch_inode_unpacked *bi,
180                           void *p)
181 {
182         struct inode_new_size *s = p;
183
184         bi->bi_size = s->new_size;
185         if (s->fields & ATTR_ATIME)
186                 bi->bi_atime = s->now;
187         if (s->fields & ATTR_MTIME)
188                 bi->bi_mtime = s->now;
189         if (s->fields & ATTR_CTIME)
190                 bi->bi_ctime = s->now;
191
192         return 0;
193 }
194
195 static int __must_check bch2_write_inode_size(struct bch_fs *c,
196                                               struct bch_inode_info *inode,
197                                               loff_t new_size, unsigned fields)
198 {
199         struct inode_new_size s = {
200                 .new_size       = new_size,
201                 .now            = bch2_current_time(c),
202                 .fields         = fields,
203         };
204
205         return bch2_write_inode(c, inode, inode_set_size, &s, fields);
206 }
207
208 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
209                            struct quota_res *quota_res, s64 sectors)
210 {
211         if (!sectors)
212                 return;
213
214         mutex_lock(&inode->ei_quota_lock);
215 #ifdef CONFIG_BCACHEFS_QUOTA
216         if (quota_res && sectors > 0) {
217                 BUG_ON(sectors > quota_res->sectors);
218                 BUG_ON(sectors > inode->ei_quota_reserved);
219
220                 quota_res->sectors -= sectors;
221                 inode->ei_quota_reserved -= sectors;
222         } else {
223                 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
224         }
225 #endif
226         inode->v.i_blocks += sectors;
227         mutex_unlock(&inode->ei_quota_lock);
228 }
229
230 /* normal i_size/i_sectors update machinery: */
231
232 static int sum_sector_overwrites(struct btree_trans *trans,
233                                  struct btree_iter *extent_iter,
234                                  struct bkey_i *new, bool *allocating,
235                                  s64 *delta)
236 {
237         struct btree_iter *iter;
238         struct bkey_s_c old;
239
240         *delta = 0;
241
242         iter = bch2_trans_copy_iter(trans, extent_iter);
243         if (IS_ERR(iter))
244                 return PTR_ERR(iter);
245
246         old = bch2_btree_iter_peek_slot(iter);
247
248         while (1) {
249                 /*
250                  * should not be possible to get an error here, since we're
251                  * carefully not advancing past @new and thus whatever leaf node
252                  * @_iter currently points to:
253                  */
254                 BUG_ON(bkey_err(old));
255
256                 if (allocating &&
257                     !*allocating &&
258                     bch2_bkey_nr_ptrs_allocated(old) <
259                     bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(new)))
260                         *allocating = true;
261
262                 *delta += (min(new->k.p.offset,
263                               old.k->p.offset) -
264                           max(bkey_start_offset(&new->k),
265                               bkey_start_offset(old.k))) *
266                         (bkey_extent_is_allocation(&new->k) -
267                          bkey_extent_is_allocation(old.k));
268
269                 if (bkey_cmp(old.k->p, new->k.p) >= 0)
270                         break;
271
272                 old = bch2_btree_iter_next_slot(iter);
273         }
274
275         bch2_trans_iter_free(trans, iter);
276         return 0;
277 }
278
279 static int bch2_extent_update(struct btree_trans *trans,
280                               struct bch_inode_info *inode,
281                               struct disk_reservation *disk_res,
282                               struct quota_res *quota_res,
283                               struct btree_iter *extent_iter,
284                               struct bkey_i *k,
285                               u64 new_i_size,
286                               bool may_allocate,
287                               bool direct,
288                               s64 *total_delta)
289 {
290         struct bch_fs *c = trans->c;
291         struct btree_iter *inode_iter = NULL;
292         struct bch_inode_unpacked inode_u;
293         struct bkey_inode_buf inode_p;
294         bool allocating = false;
295         bool extended = false;
296         bool inode_locked = false;
297         s64 i_sectors_delta;
298         int ret;
299
300         bch2_trans_begin_updates(trans);
301
302         ret = bch2_btree_iter_traverse(extent_iter);
303         if (ret)
304                 return ret;
305
306         bch2_extent_trim_atomic(k, extent_iter);
307
308         ret = sum_sector_overwrites(trans, extent_iter,
309                                     k, &allocating,
310                                     &i_sectors_delta);
311         if (ret)
312                 return ret;
313
314         if (!may_allocate && allocating)
315                 return -ENOSPC;
316
317         bch2_trans_update(trans, BTREE_INSERT_ENTRY(extent_iter, k));
318
319         new_i_size = min(k->k.p.offset << 9, new_i_size);
320
321         /* XXX: inode->i_size locking */
322         if (i_sectors_delta ||
323             new_i_size > inode->ei_inode.bi_size) {
324                 if (c->opts.new_inode_updates) {
325                         bch2_btree_trans_unlock(trans);
326                         mutex_lock(&inode->ei_update_lock);
327
328                         if (!bch2_btree_trans_relock(trans)) {
329                                 mutex_unlock(&inode->ei_update_lock);
330                                 return -EINTR;
331                         }
332
333                         inode_locked = true;
334
335                         if (!inode->ei_inode_update)
336                                 inode->ei_inode_update =
337                                         bch2_deferred_update_alloc(c,
338                                                                 BTREE_ID_INODES, 64);
339
340                         inode_u = inode->ei_inode;
341                         inode_u.bi_sectors += i_sectors_delta;
342
343                         /* XXX: this is slightly suspect */
344                         if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
345                             new_i_size > inode_u.bi_size) {
346                                 inode_u.bi_size = new_i_size;
347                                 extended = true;
348                         }
349
350                         bch2_inode_pack(&inode_p, &inode_u);
351                         bch2_trans_update(trans,
352                                 BTREE_INSERT_DEFERRED(inode->ei_inode_update,
353                                                       &inode_p.inode.k_i));
354                 } else {
355                         inode_iter = bch2_trans_get_iter(trans,
356                                 BTREE_ID_INODES,
357                                 POS(k->k.p.inode, 0),
358                                 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
359                         if (IS_ERR(inode_iter))
360                                 return PTR_ERR(inode_iter);
361
362                         ret = bch2_btree_iter_traverse(inode_iter);
363                         if (ret)
364                                 goto err;
365
366                         inode_u = inode->ei_inode;
367                         inode_u.bi_sectors += i_sectors_delta;
368
369                         /* XXX: this is slightly suspect */
370                         if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
371                             new_i_size > inode_u.bi_size) {
372                                 inode_u.bi_size = new_i_size;
373                                 extended = true;
374                         }
375
376                         bch2_inode_pack(&inode_p, &inode_u);
377                         bch2_trans_update(trans,
378                                 BTREE_INSERT_ENTRY(inode_iter, &inode_p.inode.k_i));
379                 }
380         }
381
382         ret = bch2_trans_commit(trans, disk_res,
383                                 &inode->ei_journal_seq,
384                                 BTREE_INSERT_NOFAIL|
385                                 BTREE_INSERT_ATOMIC|
386                                 BTREE_INSERT_NOUNLOCK|
387                                 BTREE_INSERT_USE_RESERVE);
388         if (ret)
389                 goto err;
390
391         inode->ei_inode.bi_sectors += i_sectors_delta;
392
393         EBUG_ON(i_sectors_delta &&
394                 inode->ei_inode.bi_sectors != inode_u.bi_sectors);
395
396         if (extended) {
397                 inode->ei_inode.bi_size = new_i_size;
398
399                 if (direct) {
400                         spin_lock(&inode->v.i_lock);
401                         if (new_i_size > inode->v.i_size)
402                                 i_size_write(&inode->v, new_i_size);
403                         spin_unlock(&inode->v.i_lock);
404                 }
405         }
406
407         if (direct)
408                 i_sectors_acct(c, inode, quota_res, i_sectors_delta);
409
410         if (total_delta)
411                 *total_delta += i_sectors_delta;
412 err:
413         if (!IS_ERR_OR_NULL(inode_iter))
414                 bch2_trans_iter_put(trans, inode_iter);
415         if (inode_locked)
416                 mutex_unlock(&inode->ei_update_lock);
417
418         return ret;
419 }
420
421 static int bchfs_write_index_update(struct bch_write_op *wop)
422 {
423         struct bch_fs *c = wop->c;
424         struct bchfs_write_op *op = container_of(wop,
425                                 struct bchfs_write_op, op);
426         struct quota_res *quota_res = op->is_dio
427                 ? &container_of(op, struct dio_write, iop)->quota_res
428                 : NULL;
429         struct bch_inode_info *inode = op->inode;
430         struct keylist *keys = &op->op.insert_keys;
431         struct bkey_i *k = bch2_keylist_front(keys);
432         struct btree_trans trans;
433         struct btree_iter *iter;
434         int ret;
435
436         BUG_ON(k->k.p.inode != inode->v.i_ino);
437
438         bch2_trans_init(&trans, c);
439         bch2_trans_preload_iters(&trans);
440
441         iter = bch2_trans_get_iter(&trans,
442                                 BTREE_ID_EXTENTS,
443                                 bkey_start_pos(&k->k),
444                                 BTREE_ITER_INTENT);
445
446         do {
447                 BKEY_PADDED(k) tmp;
448
449                 bkey_copy(&tmp.k, bch2_keylist_front(keys));
450
451                 ret = bch2_extent_update(&trans, inode,
452                                 &wop->res, quota_res,
453                                 iter, &tmp.k,
454                                 op->new_i_size,
455                                 !op->unalloc,
456                                 op->is_dio,
457                                 &op->sectors_added);
458                 if (ret == -EINTR)
459                         continue;
460                 if (ret)
461                         break;
462
463                 if (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) < 0)
464                         bch2_cut_front(iter->pos, bch2_keylist_front(keys));
465                 else
466                         bch2_keylist_pop_front(keys);
467         } while (!bch2_keylist_empty(keys));
468
469         bch2_trans_exit(&trans);
470
471         return ret;
472 }
473
474 static inline void bch2_fswrite_op_init(struct bchfs_write_op *op,
475                                         struct bch_fs *c,
476                                         struct bch_inode_info *inode,
477                                         struct bch_io_opts opts,
478                                         bool is_dio)
479 {
480         op->inode               = inode;
481         op->sectors_added       = 0;
482         op->is_dio              = is_dio;
483         op->unalloc             = false;
484         op->new_i_size          = U64_MAX;
485
486         bch2_write_op_init(&op->op, c, opts);
487         op->op.target           = opts.foreground_target;
488         op->op.index_update_fn  = bchfs_write_index_update;
489         op_journal_seq_set(&op->op, &inode->ei_journal_seq);
490 }
491
492 static inline struct bch_io_opts io_opts(struct bch_fs *c, struct bch_inode_info *inode)
493 {
494         struct bch_io_opts opts = bch2_opts_to_inode_opts(c->opts);
495
496         bch2_io_opts_apply(&opts, bch2_inode_opts_get(&inode->ei_inode));
497         return opts;
498 }
499
500 /* page state: */
501
502 /* stored in page->private: */
503
504 /*
505  * bch_page_state has to (unfortunately) be manipulated with cmpxchg - we could
506  * almost protected it with the page lock, except that bch2_writepage_io_done has
507  * to update the sector counts (and from interrupt/bottom half context).
508  */
509 struct bch_page_state {
510 union { struct {
511         /* existing data: */
512         unsigned                sectors:PAGE_SECTOR_SHIFT + 1;
513
514         /* Uncompressed, fully allocated replicas: */
515         unsigned                nr_replicas:4;
516
517         /* Owns PAGE_SECTORS * replicas_reserved sized reservation: */
518         unsigned                replicas_reserved:4;
519
520         /* Owns PAGE_SECTORS sized quota reservation: */
521         unsigned                quota_reserved:1;
522
523         /*
524          * Number of sectors on disk - for i_blocks
525          * Uncompressed size, not compressed size:
526          */
527         unsigned                dirty_sectors:PAGE_SECTOR_SHIFT + 1;
528 };
529         /* for cmpxchg: */
530         unsigned long           v;
531 };
532 };
533
534 #define page_state_cmpxchg(_ptr, _new, _expr)                           \
535 ({                                                                      \
536         unsigned long _v = READ_ONCE((_ptr)->v);                        \
537         struct bch_page_state _old;                                     \
538                                                                         \
539         do {                                                            \
540                 _old.v = _new.v = _v;                                   \
541                 _expr;                                                  \
542                                                                         \
543                 EBUG_ON(_new.sectors + _new.dirty_sectors > PAGE_SECTORS);\
544         } while (_old.v != _new.v &&                                    \
545                  (_v = cmpxchg(&(_ptr)->v, _old.v, _new.v)) != _old.v); \
546                                                                         \
547         _old;                                                           \
548 })
549
550 static inline struct bch_page_state *page_state(struct page *page)
551 {
552         struct bch_page_state *s = (void *) &page->private;
553
554         BUILD_BUG_ON(sizeof(*s) > sizeof(page->private));
555
556         if (!PagePrivate(page))
557                 SetPagePrivate(page);
558
559         return s;
560 }
561
562 static inline unsigned page_res_sectors(struct bch_page_state s)
563 {
564
565         return s.replicas_reserved * PAGE_SECTORS;
566 }
567
568 static void __bch2_put_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
569                                         struct bch_page_state s)
570 {
571         struct disk_reservation res = { .sectors = page_res_sectors(s) };
572         struct quota_res quota_res = { .sectors = s.quota_reserved ? PAGE_SECTORS : 0 };
573
574         bch2_quota_reservation_put(c, inode, &quota_res);
575         bch2_disk_reservation_put(c, &res);
576 }
577
578 static void bch2_put_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
579                                       struct page *page)
580 {
581         struct bch_page_state s;
582
583         EBUG_ON(!PageLocked(page));
584
585         s = page_state_cmpxchg(page_state(page), s, {
586                 s.replicas_reserved     = 0;
587                 s.quota_reserved        = 0;
588         });
589
590         __bch2_put_page_reservation(c, inode, s);
591 }
592
593 static int bch2_get_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
594                                      struct page *page, bool check_enospc)
595 {
596         struct bch_page_state *s = page_state(page), new;
597
598         /* XXX: this should not be open coded */
599         unsigned nr_replicas = inode->ei_inode.bi_data_replicas
600                 ? inode->ei_inode.bi_data_replicas - 1
601                 : c->opts.data_replicas;
602         struct disk_reservation disk_res;
603         struct quota_res quota_res = { 0 };
604         int ret;
605
606         EBUG_ON(!PageLocked(page));
607
608         if (s->replicas_reserved < nr_replicas) {
609                 ret = bch2_disk_reservation_get(c, &disk_res, PAGE_SECTORS,
610                                 nr_replicas - s->replicas_reserved,
611                                 !check_enospc ? BCH_DISK_RESERVATION_NOFAIL : 0);
612                 if (unlikely(ret))
613                         return ret;
614
615                 page_state_cmpxchg(s, new, ({
616                         BUG_ON(new.replicas_reserved +
617                                disk_res.nr_replicas != nr_replicas);
618                         new.replicas_reserved += disk_res.nr_replicas;
619                 }));
620         }
621
622         if (!s->quota_reserved &&
623             s->sectors + s->dirty_sectors < PAGE_SECTORS) {
624                 ret = bch2_quota_reservation_add(c, inode, &quota_res,
625                                                  PAGE_SECTORS,
626                                                  check_enospc);
627                 if (unlikely(ret))
628                         return ret;
629
630                 page_state_cmpxchg(s, new, ({
631                         BUG_ON(new.quota_reserved);
632                         new.quota_reserved = 1;
633                 }));
634         }
635
636         return ret;
637 }
638
639 static void bch2_clear_page_bits(struct page *page)
640 {
641         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
642         struct bch_fs *c = inode->v.i_sb->s_fs_info;
643         struct bch_page_state s;
644
645         EBUG_ON(!PageLocked(page));
646
647         if (!PagePrivate(page))
648                 return;
649
650         s.v = xchg(&page_state(page)->v, 0);
651         ClearPagePrivate(page);
652
653         if (s.dirty_sectors)
654                 i_sectors_acct(c, inode, NULL, -s.dirty_sectors);
655
656         __bch2_put_page_reservation(c, inode, s);
657 }
658
659 int bch2_set_page_dirty(struct page *page)
660 {
661         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
662         struct bch_fs *c = inode->v.i_sb->s_fs_info;
663         struct quota_res quota_res = { 0 };
664         struct bch_page_state old, new;
665
666         old = page_state_cmpxchg(page_state(page), new,
667                 new.dirty_sectors = PAGE_SECTORS - new.sectors;
668                 new.quota_reserved = 0;
669         );
670
671         quota_res.sectors += old.quota_reserved * PAGE_SECTORS;
672
673         if (old.dirty_sectors != new.dirty_sectors)
674                 i_sectors_acct(c, inode, &quota_res,
675                                new.dirty_sectors - old.dirty_sectors);
676         bch2_quota_reservation_put(c, inode, &quota_res);
677
678         return __set_page_dirty_nobuffers(page);
679 }
680
681 int bch2_page_mkwrite(struct vm_fault *vmf)
682 {
683         struct page *page = vmf->page;
684         struct file *file = vmf->vma->vm_file;
685         struct bch_inode_info *inode = file_bch_inode(file);
686         struct address_space *mapping = inode->v.i_mapping;
687         struct bch_fs *c = inode->v.i_sb->s_fs_info;
688         int ret = VM_FAULT_LOCKED;
689
690         sb_start_pagefault(inode->v.i_sb);
691         file_update_time(file);
692
693         /*
694          * Not strictly necessary, but helps avoid dio writes livelocking in
695          * write_invalidate_inode_pages_range() - can drop this if/when we get
696          * a write_invalidate_inode_pages_range() that works without dropping
697          * page lock before invalidating page
698          */
699         if (current->pagecache_lock != &mapping->add_lock)
700                 pagecache_add_get(&mapping->add_lock);
701
702         lock_page(page);
703         if (page->mapping != mapping ||
704             page_offset(page) > i_size_read(&inode->v)) {
705                 unlock_page(page);
706                 ret = VM_FAULT_NOPAGE;
707                 goto out;
708         }
709
710         if (bch2_get_page_reservation(c, inode, page, true)) {
711                 unlock_page(page);
712                 ret = VM_FAULT_SIGBUS;
713                 goto out;
714         }
715
716         if (!PageDirty(page))
717                 set_page_dirty(page);
718         wait_for_stable_page(page);
719 out:
720         if (current->pagecache_lock != &mapping->add_lock)
721                 pagecache_add_put(&mapping->add_lock);
722         sb_end_pagefault(inode->v.i_sb);
723         return ret;
724 }
725
726 void bch2_invalidatepage(struct page *page, unsigned int offset,
727                          unsigned int length)
728 {
729         EBUG_ON(!PageLocked(page));
730         EBUG_ON(PageWriteback(page));
731
732         if (offset || length < PAGE_SIZE)
733                 return;
734
735         bch2_clear_page_bits(page);
736 }
737
738 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
739 {
740         /* XXX: this can't take locks that are held while we allocate memory */
741         EBUG_ON(!PageLocked(page));
742         EBUG_ON(PageWriteback(page));
743
744         if (PageDirty(page))
745                 return 0;
746
747         bch2_clear_page_bits(page);
748         return 1;
749 }
750
751 #ifdef CONFIG_MIGRATION
752 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
753                       struct page *page, enum migrate_mode mode)
754 {
755         int ret;
756
757         EBUG_ON(!PageLocked(page));
758         EBUG_ON(!PageLocked(newpage));
759
760         ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
761         if (ret != MIGRATEPAGE_SUCCESS)
762                 return ret;
763
764         if (PagePrivate(page)) {
765                 *page_state(newpage) = *page_state(page);
766                 ClearPagePrivate(page);
767         }
768
769         migrate_page_copy(newpage, page);
770         return MIGRATEPAGE_SUCCESS;
771 }
772 #endif
773
774 /* readpages/writepages: */
775
776 static bool bio_can_add_page_contig(struct bio *bio, struct page *page)
777 {
778         sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
779
780         return bio->bi_vcnt < bio->bi_max_vecs &&
781                 bio_end_sector(bio) == offset;
782 }
783
784 static int bio_add_page_contig(struct bio *bio, struct page *page)
785 {
786         sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
787
788         EBUG_ON(!bio->bi_max_vecs);
789
790         if (!bio->bi_vcnt)
791                 bio->bi_iter.bi_sector = offset;
792         else if (!bio_can_add_page_contig(bio, page))
793                 return -1;
794
795         __bio_add_page(bio, page, PAGE_SIZE, 0);
796         return 0;
797 }
798
799 /* readpage(s): */
800
801 static void bch2_readpages_end_io(struct bio *bio)
802 {
803         struct bio_vec *bv;
804         int i;
805
806         bio_for_each_segment_all(bv, bio, i) {
807                 struct page *page = bv->bv_page;
808
809                 if (!bio->bi_status) {
810                         SetPageUptodate(page);
811                 } else {
812                         ClearPageUptodate(page);
813                         SetPageError(page);
814                 }
815                 unlock_page(page);
816         }
817
818         bio_put(bio);
819 }
820
821 static inline void page_state_init_for_read(struct page *page)
822 {
823         SetPagePrivate(page);
824         page->private = 0;
825 }
826
827 struct readpages_iter {
828         struct address_space    *mapping;
829         struct page             **pages;
830         unsigned                nr_pages;
831         unsigned                nr_added;
832         unsigned                idx;
833         pgoff_t                 offset;
834 };
835
836 static int readpages_iter_init(struct readpages_iter *iter,
837                                struct address_space *mapping,
838                                struct list_head *pages, unsigned nr_pages)
839 {
840         memset(iter, 0, sizeof(*iter));
841
842         iter->mapping   = mapping;
843         iter->offset    = list_last_entry(pages, struct page, lru)->index;
844
845         iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
846         if (!iter->pages)
847                 return -ENOMEM;
848
849         while (!list_empty(pages)) {
850                 struct page *page = list_last_entry(pages, struct page, lru);
851
852                 prefetchw(&page->flags);
853                 iter->pages[iter->nr_pages++] = page;
854                 list_del(&page->lru);
855         }
856
857         return 0;
858 }
859
860 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
861 {
862         struct page *page;
863         unsigned i;
864         int ret;
865
866         BUG_ON(iter->idx > iter->nr_added);
867         BUG_ON(iter->nr_added > iter->nr_pages);
868
869         if (iter->idx < iter->nr_added)
870                 goto out;
871
872         while (1) {
873                 if (iter->idx == iter->nr_pages)
874                         return NULL;
875
876                 ret = add_to_page_cache_lru_vec(iter->mapping,
877                                 iter->pages     + iter->nr_added,
878                                 iter->nr_pages  - iter->nr_added,
879                                 iter->offset    + iter->nr_added,
880                                 GFP_NOFS);
881                 if (ret > 0)
882                         break;
883
884                 page = iter->pages[iter->nr_added];
885                 iter->idx++;
886                 iter->nr_added++;
887
888                 put_page(page);
889         }
890
891         iter->nr_added += ret;
892
893         for (i = iter->idx; i < iter->nr_added; i++)
894                 put_page(iter->pages[i]);
895 out:
896         EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
897
898         page_state_init_for_read(iter->pages[iter->idx]);
899         return iter->pages[iter->idx];
900 }
901
902 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
903 {
904         struct bvec_iter iter;
905         struct bio_vec bv;
906         unsigned nr_ptrs = bch2_bkey_nr_ptrs_allocated(k);
907
908         bio_for_each_segment(bv, bio, iter) {
909                 /* brand new pages, don't need to be locked: */
910
911                 struct bch_page_state *s = page_state(bv.bv_page);
912
913                 /* sectors in @k from the start of this page: */
914                 unsigned k_sectors = k.k->size - (iter.bi_sector - k.k->p.offset);
915
916                 unsigned page_sectors = min(bv.bv_len >> 9, k_sectors);
917
918                 s->nr_replicas = page_sectors == PAGE_SECTORS
919                         ? nr_ptrs : 0;
920
921                 BUG_ON(s->sectors + page_sectors > PAGE_SECTORS);
922                 s->sectors += page_sectors;
923         }
924 }
925
926 static void readpage_bio_extend(struct readpages_iter *iter,
927                                 struct bio *bio, u64 offset,
928                                 bool get_more)
929 {
930         while (bio_end_sector(bio) < offset &&
931                bio->bi_vcnt < bio->bi_max_vecs) {
932                 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
933                 struct page *page = readpage_iter_next(iter);
934                 int ret;
935
936                 if (page) {
937                         if (iter->offset + iter->idx != page_offset)
938                                 break;
939
940                         iter->idx++;
941                 } else {
942                         if (!get_more)
943                                 break;
944
945                         page = xa_load(&iter->mapping->i_pages, page_offset);
946                         if (page && !xa_is_value(page))
947                                 break;
948
949                         page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
950                         if (!page)
951                                 break;
952
953                         page_state_init_for_read(page);
954
955                         ret = add_to_page_cache_lru(page, iter->mapping,
956                                                     page_offset, GFP_NOFS);
957                         if (ret) {
958                                 ClearPagePrivate(page);
959                                 put_page(page);
960                                 break;
961                         }
962
963                         put_page(page);
964                 }
965
966                 __bio_add_page(bio, page, PAGE_SIZE, 0);
967         }
968 }
969
970 static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
971                        struct bch_read_bio *rbio, u64 inum,
972                        struct readpages_iter *readpages_iter)
973 {
974         struct bch_fs *c = trans->c;
975         struct bio *bio = &rbio->bio;
976         int flags = BCH_READ_RETRY_IF_STALE|
977                 BCH_READ_MAY_PROMOTE;
978
979         rbio->c = c;
980         rbio->start_time = local_clock();
981
982         while (1) {
983                 BKEY_PADDED(k) tmp;
984                 struct bkey_s_c k;
985                 unsigned bytes;
986
987                 bch2_btree_iter_set_pos(iter, POS(inum, bio->bi_iter.bi_sector));
988
989                 k = bch2_btree_iter_peek_slot(iter);
990                 BUG_ON(!k.k);
991
992                 if (IS_ERR(k.k)) {
993                         int ret = btree_iter_err(iter);
994                         BUG_ON(!ret);
995                         bcache_io_error(c, bio, "btree IO error %i", ret);
996                         bio_endio(bio);
997                         return;
998                 }
999
1000                 bkey_reassemble(&tmp.k, k);
1001                 bch2_btree_trans_unlock(trans);
1002                 k = bkey_i_to_s_c(&tmp.k);
1003
1004                 if (readpages_iter) {
1005                         bool want_full_extent = false;
1006
1007                         if (bkey_extent_is_data(k.k)) {
1008                                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1009                                 const union bch_extent_entry *i;
1010                                 struct extent_ptr_decoded p;
1011
1012                                 extent_for_each_ptr_decode(e, p, i)
1013                                         want_full_extent |= ((p.crc.csum_type != 0) |
1014                                                              (p.crc.compression_type != 0));
1015                         }
1016
1017                         readpage_bio_extend(readpages_iter,
1018                                             bio, k.k->p.offset,
1019                                             want_full_extent);
1020                 }
1021
1022                 bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
1023                          bio->bi_iter.bi_sector) << 9;
1024                 swap(bio->bi_iter.bi_size, bytes);
1025
1026                 if (bytes == bio->bi_iter.bi_size)
1027                         flags |= BCH_READ_LAST_FRAGMENT;
1028
1029                 if (bkey_extent_is_allocation(k.k))
1030                         bch2_add_page_sectors(bio, k);
1031
1032                 bch2_read_extent(c, rbio, k, flags);
1033
1034                 if (flags & BCH_READ_LAST_FRAGMENT)
1035                         return;
1036
1037                 swap(bio->bi_iter.bi_size, bytes);
1038                 bio_advance(bio, bytes);
1039         }
1040 }
1041
1042 int bch2_readpages(struct file *file, struct address_space *mapping,
1043                    struct list_head *pages, unsigned nr_pages)
1044 {
1045         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1046         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1047         struct bch_io_opts opts = io_opts(c, inode);
1048         struct btree_trans trans;
1049         struct btree_iter *iter;
1050         struct page *page;
1051         struct readpages_iter readpages_iter;
1052         int ret;
1053
1054         ret = readpages_iter_init(&readpages_iter, mapping, pages, nr_pages);
1055         BUG_ON(ret);
1056
1057         bch2_trans_init(&trans, c);
1058
1059         iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
1060                                    BTREE_ITER_SLOTS);
1061
1062         if (current->pagecache_lock != &mapping->add_lock)
1063                 pagecache_add_get(&mapping->add_lock);
1064
1065         while ((page = readpage_iter_next(&readpages_iter))) {
1066                 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
1067                 unsigned n = min_t(unsigned,
1068                                    readpages_iter.nr_pages -
1069                                    readpages_iter.idx,
1070                                    BIO_MAX_PAGES);
1071                 struct bch_read_bio *rbio =
1072                         rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
1073                                   opts);
1074
1075                 readpages_iter.idx++;
1076
1077                 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
1078                 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTOR_SHIFT;
1079                 rbio->bio.bi_end_io = bch2_readpages_end_io;
1080                 __bio_add_page(&rbio->bio, page, PAGE_SIZE, 0);
1081
1082                 bchfs_read(&trans, iter, rbio, inode->v.i_ino,
1083                            &readpages_iter);
1084         }
1085
1086         if (current->pagecache_lock != &mapping->add_lock)
1087                 pagecache_add_put(&mapping->add_lock);
1088
1089         bch2_trans_exit(&trans);
1090         kfree(readpages_iter.pages);
1091
1092         return 0;
1093 }
1094
1095 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
1096                              u64 inum, struct page *page)
1097 {
1098         struct btree_trans trans;
1099         struct btree_iter *iter;
1100
1101         page_state_init_for_read(page);
1102
1103         bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
1104         bio_add_page_contig(&rbio->bio, page);
1105
1106         bch2_trans_init(&trans, c);
1107         iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
1108                                    BTREE_ITER_SLOTS);
1109
1110         bchfs_read(&trans, iter, rbio, inum, NULL);
1111
1112         bch2_trans_exit(&trans);
1113 }
1114
1115 int bch2_readpage(struct file *file, struct page *page)
1116 {
1117         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1118         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1119         struct bch_io_opts opts = io_opts(c, inode);
1120         struct bch_read_bio *rbio;
1121
1122         rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
1123         rbio->bio.bi_end_io = bch2_readpages_end_io;
1124
1125         __bchfs_readpage(c, rbio, inode->v.i_ino, page);
1126         return 0;
1127 }
1128
1129 static void bch2_read_single_page_end_io(struct bio *bio)
1130 {
1131         complete(bio->bi_private);
1132 }
1133
1134 static int bch2_read_single_page(struct page *page,
1135                                  struct address_space *mapping)
1136 {
1137         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1138         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1139         struct bch_read_bio *rbio;
1140         int ret;
1141         DECLARE_COMPLETION_ONSTACK(done);
1142
1143         rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
1144                          io_opts(c, inode));
1145         rbio->bio.bi_private = &done;
1146         rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1147
1148         __bchfs_readpage(c, rbio, inode->v.i_ino, page);
1149         wait_for_completion(&done);
1150
1151         ret = blk_status_to_errno(rbio->bio.bi_status);
1152         bio_put(&rbio->bio);
1153
1154         if (ret < 0)
1155                 return ret;
1156
1157         SetPageUptodate(page);
1158         return 0;
1159 }
1160
1161 /* writepages: */
1162
1163 struct bch_writepage_state {
1164         struct bch_writepage_io *io;
1165         struct bch_io_opts      opts;
1166 };
1167
1168 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1169                                                                   struct bch_inode_info *inode)
1170 {
1171         return (struct bch_writepage_state) { .opts = io_opts(c, inode) };
1172 }
1173
1174 static void bch2_writepage_io_free(struct closure *cl)
1175 {
1176         struct bch_writepage_io *io = container_of(cl,
1177                                         struct bch_writepage_io, cl);
1178
1179         bio_put(&io->op.op.wbio.bio);
1180 }
1181
1182 static void bch2_writepage_io_done(struct closure *cl)
1183 {
1184         struct bch_writepage_io *io = container_of(cl,
1185                                         struct bch_writepage_io, cl);
1186         struct bch_fs *c = io->op.op.c;
1187         struct bio *bio = &io->op.op.wbio.bio;
1188         struct bio_vec *bvec;
1189         unsigned i;
1190
1191         if (io->op.op.error) {
1192                 bio_for_each_segment_all(bvec, bio, i)
1193                         SetPageError(bvec->bv_page);
1194                 set_bit(AS_EIO, &io->op.inode->v.i_mapping->flags);
1195         }
1196
1197         /*
1198          * racing with fallocate can cause us to add fewer sectors than
1199          * expected - but we shouldn't add more sectors than expected:
1200          */
1201         BUG_ON(io->op.sectors_added > (s64) io->new_sectors);
1202
1203         /*
1204          * (error (due to going RO) halfway through a page can screw that up
1205          * slightly)
1206          * XXX wtf?
1207            BUG_ON(io->op.sectors_added - io->new_sectors >= (s64) PAGE_SECTORS);
1208          */
1209
1210         /*
1211          * PageWriteback is effectively our ref on the inode - fixup i_blocks
1212          * before calling end_page_writeback:
1213          */
1214         if (io->op.sectors_added != io->new_sectors)
1215                 i_sectors_acct(c, io->op.inode, NULL,
1216                                io->op.sectors_added - (s64) io->new_sectors);
1217
1218         bio_for_each_segment_all(bvec, bio, i)
1219                 end_page_writeback(bvec->bv_page);
1220
1221         closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1222 }
1223
1224 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1225 {
1226         struct bch_writepage_io *io = w->io;
1227
1228         w->io = NULL;
1229         closure_call(&io->op.op.cl, bch2_write, NULL, &io->cl);
1230         continue_at(&io->cl, bch2_writepage_io_done, NULL);
1231 }
1232
1233 /*
1234  * Get a bch_writepage_io and add @page to it - appending to an existing one if
1235  * possible, else allocating a new one:
1236  */
1237 static void bch2_writepage_io_alloc(struct bch_fs *c,
1238                                     struct bch_writepage_state *w,
1239                                     struct bch_inode_info *inode,
1240                                     struct page *page,
1241                                     unsigned nr_replicas)
1242 {
1243         struct bch_write_op *op;
1244         u64 offset = (u64) page->index << PAGE_SECTOR_SHIFT;
1245
1246         w->io = container_of(bio_alloc_bioset(GFP_NOFS,
1247                                               BIO_MAX_PAGES,
1248                                               &c->writepage_bioset),
1249                              struct bch_writepage_io, op.op.wbio.bio);
1250
1251         closure_init(&w->io->cl, NULL);
1252         w->io->new_sectors      = 0;
1253         bch2_fswrite_op_init(&w->io->op, c, inode, w->opts, false);
1254         op                      = &w->io->op.op;
1255         op->nr_replicas         = nr_replicas;
1256         op->res.nr_replicas     = nr_replicas;
1257         op->write_point         = writepoint_hashed(inode->ei_last_dirtied);
1258         op->pos                 = POS(inode->v.i_ino, offset);
1259         op->wbio.bio.bi_iter.bi_sector = offset;
1260 }
1261
1262 static int __bch2_writepage(struct page *page,
1263                             struct writeback_control *wbc,
1264                             void *data)
1265 {
1266         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1267         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1268         struct bch_writepage_state *w = data;
1269         struct bch_page_state new, old;
1270         unsigned offset, nr_replicas_this_write;
1271         loff_t i_size = i_size_read(&inode->v);
1272         pgoff_t end_index = i_size >> PAGE_SHIFT;
1273
1274         EBUG_ON(!PageUptodate(page));
1275
1276         /* Is the page fully inside i_size? */
1277         if (page->index < end_index)
1278                 goto do_io;
1279
1280         /* Is the page fully outside i_size? (truncate in progress) */
1281         offset = i_size & (PAGE_SIZE - 1);
1282         if (page->index > end_index || !offset) {
1283                 unlock_page(page);
1284                 return 0;
1285         }
1286
1287         /*
1288          * The page straddles i_size.  It must be zeroed out on each and every
1289          * writepage invocation because it may be mmapped.  "A file is mapped
1290          * in multiples of the page size.  For a file that is not a multiple of
1291          * the  page size, the remaining memory is zeroed when mapped, and
1292          * writes to that region are not written out to the file."
1293          */
1294         zero_user_segment(page, offset, PAGE_SIZE);
1295 do_io:
1296         EBUG_ON(!PageLocked(page));
1297
1298         /* Before unlocking the page, transfer reservation to w->io: */
1299         old = page_state_cmpxchg(page_state(page), new, {
1300                 /*
1301                  * If we didn't get a reservation, we can only write out the
1302                  * number of (fully allocated) replicas that currently exist,
1303                  * and only if the entire page has been written:
1304                  */
1305                 nr_replicas_this_write =
1306                         max_t(unsigned,
1307                               new.replicas_reserved,
1308                               (new.sectors == PAGE_SECTORS
1309                                ? new.nr_replicas : 0));
1310
1311                 BUG_ON(!nr_replicas_this_write);
1312
1313                 new.nr_replicas = w->opts.compression
1314                         ? 0
1315                         : nr_replicas_this_write;
1316
1317                 new.replicas_reserved = 0;
1318
1319                 new.sectors += new.dirty_sectors;
1320                 BUG_ON(new.sectors != PAGE_SECTORS);
1321                 new.dirty_sectors = 0;
1322         });
1323
1324         BUG_ON(PageWriteback(page));
1325         set_page_writeback(page);
1326         unlock_page(page);
1327
1328         if (w->io &&
1329             (w->io->op.op.res.nr_replicas != nr_replicas_this_write ||
1330              !bio_can_add_page_contig(&w->io->op.op.wbio.bio, page)))
1331                 bch2_writepage_do_io(w);
1332
1333         if (!w->io)
1334                 bch2_writepage_io_alloc(c, w, inode, page,
1335                                         nr_replicas_this_write);
1336
1337         w->io->new_sectors += new.sectors - old.sectors;
1338
1339         BUG_ON(inode != w->io->op.inode);
1340         BUG_ON(bio_add_page_contig(&w->io->op.op.wbio.bio, page));
1341
1342         w->io->op.op.res.sectors += old.replicas_reserved * PAGE_SECTORS;
1343         w->io->op.new_i_size = i_size;
1344
1345         if (wbc->sync_mode == WB_SYNC_ALL)
1346                 w->io->op.op.wbio.bio.bi_opf |= REQ_SYNC;
1347
1348         return 0;
1349 }
1350
1351 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1352 {
1353         struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1354         struct bch_writepage_state w =
1355                 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1356         struct blk_plug plug;
1357         int ret;
1358
1359         blk_start_plug(&plug);
1360         ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1361         if (w.io)
1362                 bch2_writepage_do_io(&w);
1363         blk_finish_plug(&plug);
1364         return ret;
1365 }
1366
1367 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1368 {
1369         struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1370         struct bch_writepage_state w =
1371                 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1372         int ret;
1373
1374         ret = __bch2_writepage(page, wbc, &w);
1375         if (w.io)
1376                 bch2_writepage_do_io(&w);
1377
1378         return ret;
1379 }
1380
1381 /* buffered writes: */
1382
1383 int bch2_write_begin(struct file *file, struct address_space *mapping,
1384                      loff_t pos, unsigned len, unsigned flags,
1385                      struct page **pagep, void **fsdata)
1386 {
1387         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1388         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1389         pgoff_t index = pos >> PAGE_SHIFT;
1390         unsigned offset = pos & (PAGE_SIZE - 1);
1391         struct page *page;
1392         int ret = -ENOMEM;
1393
1394         BUG_ON(inode_unhashed(&inode->v));
1395
1396         /* Not strictly necessary - same reason as mkwrite(): */
1397         pagecache_add_get(&mapping->add_lock);
1398
1399         page = grab_cache_page_write_begin(mapping, index, flags);
1400         if (!page)
1401                 goto err_unlock;
1402
1403         if (PageUptodate(page))
1404                 goto out;
1405
1406         /* If we're writing entire page, don't need to read it in first: */
1407         if (len == PAGE_SIZE)
1408                 goto out;
1409
1410         if (!offset && pos + len >= inode->v.i_size) {
1411                 zero_user_segment(page, len, PAGE_SIZE);
1412                 flush_dcache_page(page);
1413                 goto out;
1414         }
1415
1416         if (index > inode->v.i_size >> PAGE_SHIFT) {
1417                 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1418                 flush_dcache_page(page);
1419                 goto out;
1420         }
1421 readpage:
1422         ret = bch2_read_single_page(page, mapping);
1423         if (ret)
1424                 goto err;
1425 out:
1426         ret = bch2_get_page_reservation(c, inode, page, true);
1427         if (ret) {
1428                 if (!PageUptodate(page)) {
1429                         /*
1430                          * If the page hasn't been read in, we won't know if we
1431                          * actually need a reservation - we don't actually need
1432                          * to read here, we just need to check if the page is
1433                          * fully backed by uncompressed data:
1434                          */
1435                         goto readpage;
1436                 }
1437
1438                 goto err;
1439         }
1440
1441         *pagep = page;
1442         return 0;
1443 err:
1444         unlock_page(page);
1445         put_page(page);
1446         *pagep = NULL;
1447 err_unlock:
1448         pagecache_add_put(&mapping->add_lock);
1449         return ret;
1450 }
1451
1452 int bch2_write_end(struct file *file, struct address_space *mapping,
1453                    loff_t pos, unsigned len, unsigned copied,
1454                    struct page *page, void *fsdata)
1455 {
1456         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1457         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1458
1459         lockdep_assert_held(&inode->v.i_rwsem);
1460
1461         if (unlikely(copied < len && !PageUptodate(page))) {
1462                 /*
1463                  * The page needs to be read in, but that would destroy
1464                  * our partial write - simplest thing is to just force
1465                  * userspace to redo the write:
1466                  */
1467                 zero_user(page, 0, PAGE_SIZE);
1468                 flush_dcache_page(page);
1469                 copied = 0;
1470         }
1471
1472         spin_lock(&inode->v.i_lock);
1473         if (pos + copied > inode->v.i_size)
1474                 i_size_write(&inode->v, pos + copied);
1475         spin_unlock(&inode->v.i_lock);
1476
1477         if (copied) {
1478                 if (!PageUptodate(page))
1479                         SetPageUptodate(page);
1480                 if (!PageDirty(page))
1481                         set_page_dirty(page);
1482
1483                 inode->ei_last_dirtied = (unsigned long) current;
1484         } else {
1485                 bch2_put_page_reservation(c, inode, page);
1486         }
1487
1488         unlock_page(page);
1489         put_page(page);
1490         pagecache_add_put(&mapping->add_lock);
1491
1492         return copied;
1493 }
1494
1495 #define WRITE_BATCH_PAGES       32
1496
1497 static int __bch2_buffered_write(struct bch_inode_info *inode,
1498                                  struct address_space *mapping,
1499                                  struct iov_iter *iter,
1500                                  loff_t pos, unsigned len)
1501 {
1502         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1503         struct page *pages[WRITE_BATCH_PAGES];
1504         unsigned long index = pos >> PAGE_SHIFT;
1505         unsigned offset = pos & (PAGE_SIZE - 1);
1506         unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1507         unsigned i, copied = 0, nr_pages_copied = 0;
1508         int ret = 0;
1509
1510         BUG_ON(!len);
1511         BUG_ON(nr_pages > ARRAY_SIZE(pages));
1512
1513         for (i = 0; i < nr_pages; i++) {
1514                 pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
1515                 if (!pages[i]) {
1516                         nr_pages = i;
1517                         ret = -ENOMEM;
1518                         goto out;
1519                 }
1520         }
1521
1522         if (offset && !PageUptodate(pages[0])) {
1523                 ret = bch2_read_single_page(pages[0], mapping);
1524                 if (ret)
1525                         goto out;
1526         }
1527
1528         if ((pos + len) & (PAGE_SIZE - 1) &&
1529             !PageUptodate(pages[nr_pages - 1])) {
1530                 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1531                         zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1532                 } else {
1533                         ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1534                         if (ret)
1535                                 goto out;
1536                 }
1537         }
1538
1539         for (i = 0; i < nr_pages; i++) {
1540                 ret = bch2_get_page_reservation(c, inode, pages[i], true);
1541
1542                 if (ret && !PageUptodate(pages[i])) {
1543                         ret = bch2_read_single_page(pages[i], mapping);
1544                         if (ret)
1545                                 goto out;
1546
1547                         ret = bch2_get_page_reservation(c, inode, pages[i], true);
1548                 }
1549
1550                 if (ret)
1551                         goto out;
1552         }
1553
1554         if (mapping_writably_mapped(mapping))
1555                 for (i = 0; i < nr_pages; i++)
1556                         flush_dcache_page(pages[i]);
1557
1558         while (copied < len) {
1559                 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1560                 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1561                 unsigned pg_bytes = min_t(unsigned, len - copied,
1562                                           PAGE_SIZE - pg_offset);
1563                 unsigned pg_copied = iov_iter_copy_from_user_atomic(page,
1564                                                 iter, pg_offset, pg_bytes);
1565
1566                 if (!pg_copied)
1567                         break;
1568
1569                 flush_dcache_page(page);
1570                 iov_iter_advance(iter, pg_copied);
1571                 copied += pg_copied;
1572         }
1573
1574         if (!copied)
1575                 goto out;
1576
1577         nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1578         inode->ei_last_dirtied = (unsigned long) current;
1579
1580         spin_lock(&inode->v.i_lock);
1581         if (pos + copied > inode->v.i_size)
1582                 i_size_write(&inode->v, pos + copied);
1583         spin_unlock(&inode->v.i_lock);
1584
1585         if (copied < len &&
1586             ((offset + copied) & (PAGE_SIZE - 1))) {
1587                 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1588
1589                 if (!PageUptodate(page)) {
1590                         zero_user(page, 0, PAGE_SIZE);
1591                         copied -= (offset + copied) & (PAGE_SIZE - 1);
1592                 }
1593         }
1594 out:
1595         for (i = 0; i < nr_pages_copied; i++) {
1596                 if (!PageUptodate(pages[i]))
1597                         SetPageUptodate(pages[i]);
1598                 if (!PageDirty(pages[i]))
1599                         set_page_dirty(pages[i]);
1600                 unlock_page(pages[i]);
1601                 put_page(pages[i]);
1602         }
1603
1604         for (i = nr_pages_copied; i < nr_pages; i++) {
1605                 if (!PageDirty(pages[i]))
1606                         bch2_put_page_reservation(c, inode, pages[i]);
1607                 unlock_page(pages[i]);
1608                 put_page(pages[i]);
1609         }
1610
1611         return copied ?: ret;
1612 }
1613
1614 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1615 {
1616         struct file *file = iocb->ki_filp;
1617         struct address_space *mapping = file->f_mapping;
1618         struct bch_inode_info *inode = file_bch_inode(file);
1619         loff_t pos = iocb->ki_pos;
1620         ssize_t written = 0;
1621         int ret = 0;
1622
1623         pagecache_add_get(&mapping->add_lock);
1624
1625         do {
1626                 unsigned offset = pos & (PAGE_SIZE - 1);
1627                 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1628                               PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1629 again:
1630                 /*
1631                  * Bring in the user page that we will copy from _first_.
1632                  * Otherwise there's a nasty deadlock on copying from the
1633                  * same page as we're writing to, without it being marked
1634                  * up-to-date.
1635                  *
1636                  * Not only is this an optimisation, but it is also required
1637                  * to check that the address is actually valid, when atomic
1638                  * usercopies are used, below.
1639                  */
1640                 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1641                         bytes = min_t(unsigned long, iov_iter_count(iter),
1642                                       PAGE_SIZE - offset);
1643
1644                         if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1645                                 ret = -EFAULT;
1646                                 break;
1647                         }
1648                 }
1649
1650                 if (unlikely(fatal_signal_pending(current))) {
1651                         ret = -EINTR;
1652                         break;
1653                 }
1654
1655                 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1656                 if (unlikely(ret < 0))
1657                         break;
1658
1659                 cond_resched();
1660
1661                 if (unlikely(ret == 0)) {
1662                         /*
1663                          * If we were unable to copy any data at all, we must
1664                          * fall back to a single segment length write.
1665                          *
1666                          * If we didn't fallback here, we could livelock
1667                          * because not all segments in the iov can be copied at
1668                          * once without a pagefault.
1669                          */
1670                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
1671                                       iov_iter_single_seg_count(iter));
1672                         goto again;
1673                 }
1674                 pos += ret;
1675                 written += ret;
1676
1677                 balance_dirty_pages_ratelimited(mapping);
1678         } while (iov_iter_count(iter));
1679
1680         pagecache_add_put(&mapping->add_lock);
1681
1682         return written ? written : ret;
1683 }
1684
1685 /* O_DIRECT reads */
1686
1687 static void bch2_dio_read_complete(struct closure *cl)
1688 {
1689         struct dio_read *dio = container_of(cl, struct dio_read, cl);
1690
1691         dio->req->ki_complete(dio->req, dio->ret, 0);
1692         bio_check_pages_dirty(&dio->rbio.bio);  /* transfers ownership */
1693 }
1694
1695 static void bch2_direct_IO_read_endio(struct bio *bio)
1696 {
1697         struct dio_read *dio = bio->bi_private;
1698
1699         if (bio->bi_status)
1700                 dio->ret = blk_status_to_errno(bio->bi_status);
1701
1702         closure_put(&dio->cl);
1703 }
1704
1705 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1706 {
1707         bch2_direct_IO_read_endio(bio);
1708         bio_check_pages_dirty(bio);     /* transfers ownership */
1709 }
1710
1711 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1712 {
1713         struct file *file = req->ki_filp;
1714         struct bch_inode_info *inode = file_bch_inode(file);
1715         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1716         struct bch_io_opts opts = io_opts(c, inode);
1717         struct dio_read *dio;
1718         struct bio *bio;
1719         loff_t offset = req->ki_pos;
1720         bool sync = is_sync_kiocb(req);
1721         size_t shorten;
1722         ssize_t ret;
1723
1724         if ((offset|iter->count) & (block_bytes(c) - 1))
1725                 return -EINVAL;
1726
1727         ret = min_t(loff_t, iter->count,
1728                     max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1729
1730         if (!ret)
1731                 return ret;
1732
1733         shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1734         iter->count -= shorten;
1735
1736         bio = bio_alloc_bioset(GFP_KERNEL,
1737                                iov_iter_npages(iter, BIO_MAX_PAGES),
1738                                &c->dio_read_bioset);
1739
1740         bio->bi_end_io = bch2_direct_IO_read_endio;
1741
1742         dio = container_of(bio, struct dio_read, rbio.bio);
1743         closure_init(&dio->cl, NULL);
1744
1745         /*
1746          * this is a _really_ horrible hack just to avoid an atomic sub at the
1747          * end:
1748          */
1749         if (!sync) {
1750                 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1751                 atomic_set(&dio->cl.remaining,
1752                            CLOSURE_REMAINING_INITIALIZER -
1753                            CLOSURE_RUNNING +
1754                            CLOSURE_DESTRUCTOR);
1755         } else {
1756                 atomic_set(&dio->cl.remaining,
1757                            CLOSURE_REMAINING_INITIALIZER + 1);
1758         }
1759
1760         dio->req        = req;
1761         dio->ret        = ret;
1762
1763         goto start;
1764         while (iter->count) {
1765                 bio = bio_alloc_bioset(GFP_KERNEL,
1766                                        iov_iter_npages(iter, BIO_MAX_PAGES),
1767                                        &c->bio_read);
1768                 bio->bi_end_io          = bch2_direct_IO_read_split_endio;
1769 start:
1770                 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1771                 bio->bi_iter.bi_sector  = offset >> 9;
1772                 bio->bi_private         = dio;
1773
1774                 ret = bio_iov_iter_get_pages(bio, iter);
1775                 if (ret < 0) {
1776                         /* XXX: fault inject this path */
1777                         bio->bi_status = BLK_STS_RESOURCE;
1778                         bio_endio(bio);
1779                         break;
1780                 }
1781
1782                 offset += bio->bi_iter.bi_size;
1783                 bio_set_pages_dirty(bio);
1784
1785                 if (iter->count)
1786                         closure_get(&dio->cl);
1787
1788                 bch2_read(c, rbio_init(bio, opts), inode->v.i_ino);
1789         }
1790
1791         iter->count += shorten;
1792
1793         if (sync) {
1794                 closure_sync(&dio->cl);
1795                 closure_debug_destroy(&dio->cl);
1796                 ret = dio->ret;
1797                 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1798                 return ret;
1799         } else {
1800                 return -EIOCBQUEUED;
1801         }
1802 }
1803
1804 /* O_DIRECT writes */
1805
1806 static void bch2_dio_write_loop_async(struct closure *);
1807
1808 static long bch2_dio_write_loop(struct dio_write *dio)
1809 {
1810         bool kthread = (current->flags & PF_KTHREAD) != 0;
1811         struct kiocb *req = dio->req;
1812         struct address_space *mapping = req->ki_filp->f_mapping;
1813         struct bch_inode_info *inode = dio->iop.inode;
1814         struct bio *bio = &dio->iop.op.wbio.bio;
1815         struct bio_vec *bv;
1816         loff_t offset;
1817         bool sync;
1818         long ret;
1819         int i;
1820
1821         if (dio->loop)
1822                 goto loop;
1823
1824         inode_dio_begin(&inode->v);
1825         __pagecache_block_get(&mapping->add_lock);
1826
1827         /* Write and invalidate pagecache range that we're writing to: */
1828         offset = req->ki_pos + (dio->iop.op.written << 9);
1829         ret = write_invalidate_inode_pages_range(mapping,
1830                                         offset,
1831                                         offset + iov_iter_count(&dio->iter) - 1);
1832         if (unlikely(ret))
1833                 goto err;
1834
1835         while (1) {
1836                 offset = req->ki_pos + (dio->iop.op.written << 9);
1837
1838                 BUG_ON(current->pagecache_lock);
1839                 current->pagecache_lock = &mapping->add_lock;
1840                 if (kthread)
1841                         use_mm(dio->mm);
1842
1843                 ret = bio_iov_iter_get_pages(bio, &dio->iter);
1844
1845                 if (kthread)
1846                         unuse_mm(dio->mm);
1847                 current->pagecache_lock = NULL;
1848
1849                 if (unlikely(ret < 0))
1850                         goto err;
1851
1852                 /* gup might have faulted pages back in: */
1853                 ret = write_invalidate_inode_pages_range(mapping,
1854                                 offset,
1855                                 offset + bio->bi_iter.bi_size - 1);
1856                 if (unlikely(ret))
1857                         goto err;
1858
1859                 dio->iop.op.pos = POS(inode->v.i_ino, offset >> 9);
1860
1861                 task_io_account_write(bio->bi_iter.bi_size);
1862
1863                 closure_call(&dio->iop.op.cl, bch2_write, NULL, &dio->cl);
1864
1865                 if (!dio->sync && !dio->loop && dio->iter.count) {
1866                         struct iovec *iov = dio->inline_vecs;
1867
1868                         if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1869                                 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
1870                                               GFP_KERNEL);
1871                                 if (unlikely(!iov)) {
1872                                         dio->iop.op.error = -ENOMEM;
1873                                         goto err_wait_io;
1874                                 }
1875
1876                                 dio->free_iov = true;
1877                         }
1878
1879                         memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
1880                         dio->iter.iov = iov;
1881                 }
1882 err_wait_io:
1883                 dio->loop = true;
1884
1885                 if (!dio->sync) {
1886                         continue_at(&dio->cl, bch2_dio_write_loop_async, NULL);
1887                         return -EIOCBQUEUED;
1888                 }
1889
1890                 closure_sync(&dio->cl);
1891 loop:
1892                 bio_for_each_segment_all(bv, bio, i)
1893                         put_page(bv->bv_page);
1894                 if (!dio->iter.count || dio->iop.op.error)
1895                         break;
1896                 bio_reset(bio);
1897         }
1898
1899         ret = dio->iop.op.error ?: ((long) dio->iop.op.written << 9);
1900 err:
1901         __pagecache_block_put(&mapping->add_lock);
1902         bch2_disk_reservation_put(dio->iop.op.c, &dio->iop.op.res);
1903         bch2_quota_reservation_put(dio->iop.op.c, inode, &dio->quota_res);
1904
1905         if (dio->free_iov)
1906                 kfree(dio->iter.iov);
1907
1908         closure_debug_destroy(&dio->cl);
1909
1910         sync = dio->sync;
1911         bio_put(bio);
1912
1913         /* inode->i_dio_count is our ref on inode and thus bch_fs */
1914         inode_dio_end(&inode->v);
1915
1916         if (!sync) {
1917                 req->ki_complete(req, ret, 0);
1918                 ret = -EIOCBQUEUED;
1919         }
1920         return ret;
1921 }
1922
1923 static void bch2_dio_write_loop_async(struct closure *cl)
1924 {
1925         struct dio_write *dio = container_of(cl, struct dio_write, cl);
1926
1927         bch2_dio_write_loop(dio);
1928 }
1929
1930 static int bch2_direct_IO_write(struct kiocb *req,
1931                                 struct iov_iter *iter,
1932                                 bool swap)
1933 {
1934         struct file *file = req->ki_filp;
1935         struct bch_inode_info *inode = file_bch_inode(file);
1936         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1937         struct dio_write *dio;
1938         struct bio *bio;
1939         ssize_t ret;
1940
1941         lockdep_assert_held(&inode->v.i_rwsem);
1942
1943         if (unlikely(!iter->count))
1944                 return 0;
1945
1946         if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
1947                 return -EINVAL;
1948
1949         bio = bio_alloc_bioset(GFP_KERNEL,
1950                                iov_iter_npages(iter, BIO_MAX_PAGES),
1951                                &c->dio_write_bioset);
1952         dio = container_of(bio, struct dio_write, iop.op.wbio.bio);
1953         closure_init(&dio->cl, NULL);
1954         dio->req                = req;
1955         dio->mm                 = current->mm;
1956         dio->loop               = false;
1957         dio->sync               = is_sync_kiocb(req) ||
1958                 req->ki_pos + iter->count > inode->v.i_size;
1959         dio->free_iov           = false;
1960         dio->quota_res.sectors  = 0;
1961         dio->iter               = *iter;
1962         bch2_fswrite_op_init(&dio->iop, c, inode, io_opts(c, inode), true);
1963         dio->iop.op.write_point = writepoint_hashed((unsigned long) current);
1964         dio->iop.op.flags |= BCH_WRITE_NOPUT_RESERVATION;
1965
1966         if ((req->ki_flags & IOCB_DSYNC) &&
1967             !c->opts.journal_flush_disabled)
1968                 dio->iop.op.flags |= BCH_WRITE_FLUSH;
1969
1970         ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
1971                                          iter->count >> 9, true);
1972         if (unlikely(ret))
1973                 goto err;
1974
1975         dio->iop.op.nr_replicas = dio->iop.op.opts.data_replicas;
1976
1977         ret = bch2_disk_reservation_get(c, &dio->iop.op.res, iter->count >> 9,
1978                                         dio->iop.op.opts.data_replicas, 0);
1979         if (unlikely(ret)) {
1980                 if (!bch2_check_range_allocated(c, POS(inode->v.i_ino,
1981                                                        req->ki_pos >> 9),
1982                                                 iter->count >> 9,
1983                                                 dio->iop.op.opts.data_replicas))
1984                         goto err;
1985
1986                 dio->iop.unalloc = true;
1987         }
1988
1989         return bch2_dio_write_loop(dio);
1990 err:
1991         bch2_disk_reservation_put(c, &dio->iop.op.res);
1992         bch2_quota_reservation_put(c, inode, &dio->quota_res);
1993         closure_debug_destroy(&dio->cl);
1994         bio_put(bio);
1995         return ret;
1996 }
1997
1998 ssize_t bch2_direct_IO(struct kiocb *req, struct iov_iter *iter)
1999 {
2000         struct blk_plug plug;
2001         ssize_t ret;
2002
2003         blk_start_plug(&plug);
2004         ret = iov_iter_rw(iter) == WRITE
2005                 ? bch2_direct_IO_write(req, iter, false)
2006                 : bch2_direct_IO_read(req, iter);
2007         blk_finish_plug(&plug);
2008
2009         return ret;
2010 }
2011
2012 static ssize_t
2013 bch2_direct_write(struct kiocb *iocb, struct iov_iter *iter)
2014 {
2015         return bch2_direct_IO_write(iocb, iter, true);
2016 }
2017
2018 static ssize_t __bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2019 {
2020         struct file *file = iocb->ki_filp;
2021         struct bch_inode_info *inode = file_bch_inode(file);
2022         ssize_t ret;
2023
2024         /* We can write back this queue in page reclaim */
2025         current->backing_dev_info = inode_to_bdi(&inode->v);
2026         ret = file_remove_privs(file);
2027         if (ret)
2028                 goto out;
2029
2030         ret = file_update_time(file);
2031         if (ret)
2032                 goto out;
2033
2034         ret = iocb->ki_flags & IOCB_DIRECT
2035                 ? bch2_direct_write(iocb, from)
2036                 : bch2_buffered_write(iocb, from);
2037
2038         if (likely(ret > 0))
2039                 iocb->ki_pos += ret;
2040 out:
2041         current->backing_dev_info = NULL;
2042         return ret;
2043 }
2044
2045 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2046 {
2047         struct bch_inode_info *inode = file_bch_inode(iocb->ki_filp);
2048         bool direct = iocb->ki_flags & IOCB_DIRECT;
2049         ssize_t ret;
2050
2051         inode_lock(&inode->v);
2052         ret = generic_write_checks(iocb, from);
2053         if (ret > 0)
2054                 ret = __bch2_write_iter(iocb, from);
2055         inode_unlock(&inode->v);
2056
2057         if (ret > 0 && !direct)
2058                 ret = generic_write_sync(iocb, ret);
2059
2060         return ret;
2061 }
2062
2063 /* fsync: */
2064
2065 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2066 {
2067         struct bch_inode_info *inode = file_bch_inode(file);
2068         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2069         int ret, ret2;
2070
2071         ret = file_write_and_wait_range(file, start, end);
2072         if (ret)
2073                 return ret;
2074
2075         if (datasync && !(inode->v.i_state & I_DIRTY_DATASYNC))
2076                 goto out;
2077
2078         ret = sync_inode_metadata(&inode->v, 1);
2079         if (ret)
2080                 return ret;
2081 out:
2082         if (c->opts.journal_flush_disabled)
2083                 return 0;
2084
2085         ret = bch2_journal_flush_seq(&c->journal, inode->ei_journal_seq);
2086         ret2 = file_check_and_advance_wb_err(file);
2087
2088         return ret ?: ret2;
2089 }
2090
2091 /* truncate: */
2092
2093 static int __bch2_fpunch(struct bch_fs *c, struct bch_inode_info *inode,
2094                          u64 start_offset, u64 end_offset, u64 *journal_seq)
2095 {
2096         struct bpos start       = POS(inode->v.i_ino, start_offset);
2097         struct bpos end         = POS(inode->v.i_ino, end_offset);
2098         unsigned max_sectors    = KEY_SIZE_MAX & (~0 << c->block_bits);
2099         struct btree_trans trans;
2100         struct btree_iter *iter;
2101         struct bkey_s_c k;
2102         int ret = 0;
2103
2104         bch2_trans_init(&trans, c);
2105         bch2_trans_preload_iters(&trans);
2106
2107         iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, start,
2108                                    BTREE_ITER_INTENT);
2109
2110         while ((k = bch2_btree_iter_peek(iter)).k &&
2111                !(ret = bkey_err(k)) &&
2112                bkey_cmp(iter->pos, end) < 0) {
2113                 struct disk_reservation disk_res =
2114                         bch2_disk_reservation_init(c, 0);
2115                 struct bkey_i delete;
2116
2117                 bkey_init(&delete.k);
2118                 delete.k.p = iter->pos;
2119
2120                 /* create the biggest key we can */
2121                 bch2_key_resize(&delete.k, max_sectors);
2122                 bch2_cut_back(end, &delete.k);
2123
2124                 ret = bch2_extent_update(&trans, inode,
2125                                 &disk_res, NULL, iter, &delete,
2126                                 0, true, true, NULL);
2127                 bch2_disk_reservation_put(c, &disk_res);
2128
2129                 if (ret == -EINTR)
2130                         ret = 0;
2131                 if (ret)
2132                         break;
2133
2134                 bch2_trans_cond_resched(&trans);
2135         }
2136
2137         bch2_trans_exit(&trans);
2138
2139         return ret;
2140 }
2141
2142 static inline int range_has_data(struct bch_fs *c,
2143                                   struct bpos start,
2144                                   struct bpos end)
2145 {
2146         struct btree_trans trans;
2147         struct btree_iter *iter;
2148         struct bkey_s_c k;
2149         int ret = 0;
2150
2151         bch2_trans_init(&trans, c);
2152
2153         for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k) {
2154                 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2155                         break;
2156
2157                 if (bkey_extent_is_data(k.k)) {
2158                         ret = 1;
2159                         break;
2160                 }
2161         }
2162
2163         return bch2_trans_exit(&trans) ?: ret;
2164 }
2165
2166 static int __bch2_truncate_page(struct bch_inode_info *inode,
2167                                 pgoff_t index, loff_t start, loff_t end)
2168 {
2169         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2170         struct address_space *mapping = inode->v.i_mapping;
2171         unsigned start_offset = start & (PAGE_SIZE - 1);
2172         unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2173         struct page *page;
2174         int ret = 0;
2175
2176         /* Page boundary? Nothing to do */
2177         if (!((index == start >> PAGE_SHIFT && start_offset) ||
2178               (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2179                 return 0;
2180
2181         /* Above i_size? */
2182         if (index << PAGE_SHIFT >= inode->v.i_size)
2183                 return 0;
2184
2185         page = find_lock_page(mapping, index);
2186         if (!page) {
2187                 /*
2188                  * XXX: we're doing two index lookups when we end up reading the
2189                  * page
2190                  */
2191                 ret = range_has_data(c,
2192                                 POS(inode->v.i_ino, index << PAGE_SECTOR_SHIFT),
2193                                 POS(inode->v.i_ino, (index + 1) << PAGE_SECTOR_SHIFT));
2194                 if (ret <= 0)
2195                         return ret;
2196
2197                 page = find_or_create_page(mapping, index, GFP_KERNEL);
2198                 if (unlikely(!page)) {
2199                         ret = -ENOMEM;
2200                         goto out;
2201                 }
2202         }
2203
2204         if (!PageUptodate(page)) {
2205                 ret = bch2_read_single_page(page, mapping);
2206                 if (ret)
2207                         goto unlock;
2208         }
2209
2210         /*
2211          * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2212          *
2213          * XXX: because we aren't currently tracking whether the page has actual
2214          * data in it (vs. just 0s, or only partially written) this wrong. ick.
2215          */
2216         ret = bch2_get_page_reservation(c, inode, page, false);
2217         BUG_ON(ret);
2218
2219         if (index == start >> PAGE_SHIFT &&
2220             index == end >> PAGE_SHIFT)
2221                 zero_user_segment(page, start_offset, end_offset);
2222         else if (index == start >> PAGE_SHIFT)
2223                 zero_user_segment(page, start_offset, PAGE_SIZE);
2224         else if (index == end >> PAGE_SHIFT)
2225                 zero_user_segment(page, 0, end_offset);
2226
2227         if (!PageDirty(page))
2228                 set_page_dirty(page);
2229 unlock:
2230         unlock_page(page);
2231         put_page(page);
2232 out:
2233         return ret;
2234 }
2235
2236 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2237 {
2238         return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2239                                     from, from + PAGE_SIZE);
2240 }
2241
2242 static int bch2_extend(struct bch_inode_info *inode, struct iattr *iattr)
2243 {
2244         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2245         struct address_space *mapping = inode->v.i_mapping;
2246         int ret;
2247
2248         ret = filemap_write_and_wait_range(mapping,
2249                         inode->ei_inode.bi_size, S64_MAX);
2250         if (ret)
2251                 return ret;
2252
2253         truncate_setsize(&inode->v, iattr->ia_size);
2254         setattr_copy(&inode->v, iattr);
2255
2256         mutex_lock(&inode->ei_update_lock);
2257         ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2258                                     ATTR_MTIME|ATTR_CTIME);
2259         mutex_unlock(&inode->ei_update_lock);
2260
2261         return ret;
2262 }
2263
2264 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2265                                    struct bch_inode_unpacked *bi,
2266                                    void *p)
2267 {
2268         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2269
2270         bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2271         bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2272         return 0;
2273 }
2274
2275 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2276                                   struct bch_inode_unpacked *bi, void *p)
2277 {
2278         u64 *new_i_size = p;
2279
2280         bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2281         bi->bi_size = *new_i_size;
2282         return 0;
2283 }
2284
2285 int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
2286 {
2287         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2288         struct address_space *mapping = inode->v.i_mapping;
2289         u64 new_i_size = iattr->ia_size;
2290         bool shrink;
2291         int ret = 0;
2292
2293         inode_dio_wait(&inode->v);
2294         pagecache_block_get(&mapping->add_lock);
2295
2296         BUG_ON(inode->v.i_size < inode->ei_inode.bi_size);
2297
2298         shrink = iattr->ia_size <= inode->v.i_size;
2299
2300         if (!shrink) {
2301                 ret = bch2_extend(inode, iattr);
2302                 goto err;
2303         }
2304
2305         ret = bch2_truncate_page(inode, iattr->ia_size);
2306         if (unlikely(ret))
2307                 goto err;
2308
2309         if (iattr->ia_size > inode->ei_inode.bi_size)
2310                 ret = filemap_write_and_wait_range(mapping,
2311                                 inode->ei_inode.bi_size,
2312                                 iattr->ia_size - 1);
2313         else if (iattr->ia_size & (PAGE_SIZE - 1))
2314                 ret = filemap_write_and_wait_range(mapping,
2315                                 round_down(iattr->ia_size, PAGE_SIZE),
2316                                 iattr->ia_size - 1);
2317         if (ret)
2318                 goto err;
2319
2320         mutex_lock(&inode->ei_update_lock);
2321         ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2322                                &new_i_size, 0);
2323         mutex_unlock(&inode->ei_update_lock);
2324
2325         if (unlikely(ret))
2326                 goto err;
2327
2328         truncate_setsize(&inode->v, iattr->ia_size);
2329
2330         /*
2331          * XXX: need a comment explaining why PAGE_SIZE and not block_bytes()
2332          * here:
2333          */
2334         ret = __bch2_fpunch(c, inode,
2335                         round_up(iattr->ia_size, PAGE_SIZE) >> 9,
2336                         U64_MAX, &inode->ei_journal_seq);
2337         if (unlikely(ret))
2338                 goto err;
2339
2340         setattr_copy(&inode->v, iattr);
2341
2342         mutex_lock(&inode->ei_update_lock);
2343         ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL,
2344                                ATTR_MTIME|ATTR_CTIME);
2345         mutex_unlock(&inode->ei_update_lock);
2346 err:
2347         pagecache_block_put(&mapping->add_lock);
2348         return ret;
2349 }
2350
2351 /* fallocate: */
2352
2353 static long bch2_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2354 {
2355         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2356         struct address_space *mapping = inode->v.i_mapping;
2357         u64 discard_start = round_up(offset, PAGE_SIZE) >> 9;
2358         u64 discard_end = round_down(offset + len, PAGE_SIZE) >> 9;
2359         int ret = 0;
2360
2361         inode_lock(&inode->v);
2362         inode_dio_wait(&inode->v);
2363         pagecache_block_get(&mapping->add_lock);
2364
2365         ret = __bch2_truncate_page(inode,
2366                                    offset >> PAGE_SHIFT,
2367                                    offset, offset + len);
2368         if (unlikely(ret))
2369                 goto err;
2370
2371         if (offset >> PAGE_SHIFT !=
2372             (offset + len) >> PAGE_SHIFT) {
2373                 ret = __bch2_truncate_page(inode,
2374                                            (offset + len) >> PAGE_SHIFT,
2375                                            offset, offset + len);
2376                 if (unlikely(ret))
2377                         goto err;
2378         }
2379
2380         truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2381
2382         if (discard_start < discard_end)
2383                 ret = __bch2_fpunch(c, inode, discard_start, discard_end,
2384                                     &inode->ei_journal_seq);
2385 err:
2386         pagecache_block_put(&mapping->add_lock);
2387         inode_unlock(&inode->v);
2388
2389         return ret;
2390 }
2391
2392 static long bch2_fcollapse(struct bch_inode_info *inode,
2393                            loff_t offset, loff_t len)
2394 {
2395         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2396         struct address_space *mapping = inode->v.i_mapping;
2397         struct btree_trans trans;
2398         struct btree_iter *src, *dst;
2399         BKEY_PADDED(k) copy;
2400         struct bkey_s_c k;
2401         loff_t new_size;
2402         int ret;
2403
2404         if ((offset | len) & (block_bytes(c) - 1))
2405                 return -EINVAL;
2406
2407         bch2_trans_init(&trans, c);
2408         bch2_trans_preload_iters(&trans);
2409
2410         /*
2411          * We need i_mutex to keep the page cache consistent with the extents
2412          * btree, and the btree consistent with i_size - we don't need outside
2413          * locking for the extents btree itself, because we're using linked
2414          * iterators
2415          */
2416         inode_lock(&inode->v);
2417         inode_dio_wait(&inode->v);
2418         pagecache_block_get(&mapping->add_lock);
2419
2420         ret = -EINVAL;
2421         if (offset + len >= inode->v.i_size)
2422                 goto err;
2423
2424         if (inode->v.i_size < len)
2425                 goto err;
2426
2427         new_size = inode->v.i_size - len;
2428
2429         ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2430         if (ret)
2431                 goto err;
2432
2433         dst = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2434                         POS(inode->v.i_ino, offset >> 9),
2435                         BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2436         BUG_ON(IS_ERR_OR_NULL(dst));
2437
2438         src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2439                         POS_MIN, BTREE_ITER_SLOTS);
2440         BUG_ON(IS_ERR_OR_NULL(src));
2441
2442         while (bkey_cmp(dst->pos,
2443                         POS(inode->v.i_ino,
2444                             round_up(new_size, PAGE_SIZE) >> 9)) < 0) {
2445                 struct disk_reservation disk_res;
2446
2447                 ret = bch2_btree_iter_traverse(dst);
2448                 if (ret)
2449                         goto bkey_err;
2450
2451                 bch2_btree_iter_set_pos(src,
2452                         POS(dst->pos.inode, dst->pos.offset + (len >> 9)));
2453
2454                 k = bch2_btree_iter_peek_slot(src);
2455                 if ((ret = bkey_err(k)))
2456                         goto bkey_err;
2457
2458                 bkey_reassemble(&copy.k, k);
2459
2460                 bch2_cut_front(src->pos, &copy.k);
2461                 copy.k.k.p.offset -= len >> 9;
2462
2463                 bch2_extent_trim_atomic(&copy.k, dst);
2464
2465                 BUG_ON(bkey_cmp(dst->pos, bkey_start_pos(&copy.k.k)));
2466
2467                 ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size,
2468                                 bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(&copy.k)),
2469                                 BCH_DISK_RESERVATION_NOFAIL);
2470                 BUG_ON(ret);
2471
2472                 ret = bch2_extent_update(&trans, inode,
2473                                 &disk_res, NULL,
2474                                 dst, &copy.k,
2475                                 0, true, true, NULL);
2476                 bch2_disk_reservation_put(c, &disk_res);
2477 bkey_err:
2478                 if (ret == -EINTR)
2479                         ret = 0;
2480                 if (ret)
2481                         goto err;
2482                 /*
2483                  * XXX: if we error here we've left data with multiple
2484                  * pointers... which isn't a _super_ serious problem...
2485                  */
2486
2487                 bch2_trans_cond_resched(&trans);
2488         }
2489         bch2_trans_unlock(&trans);
2490
2491         ret = __bch2_fpunch(c, inode,
2492                         round_up(new_size, block_bytes(c)) >> 9,
2493                         U64_MAX, &inode->ei_journal_seq);
2494         if (ret)
2495                 goto err;
2496
2497         i_size_write(&inode->v, new_size);
2498         mutex_lock(&inode->ei_update_lock);
2499         ret = bch2_write_inode_size(c, inode, new_size,
2500                                     ATTR_MTIME|ATTR_CTIME);
2501         mutex_unlock(&inode->ei_update_lock);
2502 err:
2503         bch2_trans_exit(&trans);
2504         pagecache_block_put(&mapping->add_lock);
2505         inode_unlock(&inode->v);
2506         return ret;
2507 }
2508
2509 static long bch2_fallocate(struct bch_inode_info *inode, int mode,
2510                            loff_t offset, loff_t len)
2511 {
2512         struct address_space *mapping = inode->v.i_mapping;
2513         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2514         struct btree_trans trans;
2515         struct btree_iter *iter;
2516         struct bpos end_pos;
2517         loff_t block_start, block_end;
2518         loff_t end = offset + len;
2519         unsigned sectors;
2520         unsigned replicas = io_opts(c, inode).data_replicas;
2521         int ret;
2522
2523         bch2_trans_init(&trans, c);
2524         bch2_trans_preload_iters(&trans);
2525
2526         inode_lock(&inode->v);
2527         inode_dio_wait(&inode->v);
2528         pagecache_block_get(&mapping->add_lock);
2529
2530         if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
2531                 ret = inode_newsize_ok(&inode->v, end);
2532                 if (ret)
2533                         goto err;
2534         }
2535
2536         if (mode & FALLOC_FL_ZERO_RANGE) {
2537                 ret = __bch2_truncate_page(inode,
2538                                            offset >> PAGE_SHIFT,
2539                                            offset, end);
2540
2541                 if (!ret &&
2542                     offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
2543                         ret = __bch2_truncate_page(inode,
2544                                                    end >> PAGE_SHIFT,
2545                                                    offset, end);
2546
2547                 if (unlikely(ret))
2548                         goto err;
2549
2550                 truncate_pagecache_range(&inode->v, offset, end - 1);
2551
2552                 block_start     = round_up(offset, PAGE_SIZE);
2553                 block_end       = round_down(end, PAGE_SIZE);
2554         } else {
2555                 block_start     = round_down(offset, PAGE_SIZE);
2556                 block_end       = round_up(end, PAGE_SIZE);
2557         }
2558
2559         iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2560                         POS(inode->v.i_ino, block_start >> 9),
2561                         BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2562         end_pos = POS(inode->v.i_ino, block_end >> 9);
2563
2564         while (bkey_cmp(iter->pos, end_pos) < 0) {
2565                 struct disk_reservation disk_res = { 0 };
2566                 struct quota_res quota_res = { 0 };
2567                 struct bkey_i_reservation reservation;
2568                 struct bkey_s_c k;
2569
2570                 k = bch2_btree_iter_peek_slot(iter);
2571                 if ((ret = bkey_err(k)))
2572                         goto bkey_err;
2573
2574                 /* already reserved */
2575                 if (k.k->type == KEY_TYPE_reservation &&
2576                     bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2577                         bch2_btree_iter_next_slot(iter);
2578                         continue;
2579                 }
2580
2581                 if (bkey_extent_is_data(k.k) &&
2582                     !(mode & FALLOC_FL_ZERO_RANGE)) {
2583                         bch2_btree_iter_next_slot(iter);
2584                         continue;
2585                 }
2586
2587                 bkey_reservation_init(&reservation.k_i);
2588                 reservation.k.type      = KEY_TYPE_reservation;
2589                 reservation.k.p         = k.k->p;
2590                 reservation.k.size      = k.k->size;
2591
2592                 bch2_cut_front(iter->pos, &reservation.k_i);
2593                 bch2_cut_back(end_pos, &reservation.k);
2594
2595                 sectors = reservation.k.size;
2596                 reservation.v.nr_replicas = bch2_bkey_nr_dirty_ptrs(k);
2597
2598                 if (!bkey_extent_is_allocation(k.k)) {
2599                         ret = bch2_quota_reservation_add(c, inode,
2600                                         &quota_res,
2601                                         sectors, true);
2602                         if (unlikely(ret))
2603                                 goto bkey_err;
2604                 }
2605
2606                 if (reservation.v.nr_replicas < replicas ||
2607                     bch2_extent_is_compressed(k)) {
2608                         ret = bch2_disk_reservation_get(c, &disk_res, sectors,
2609                                                         replicas, 0);
2610                         if (unlikely(ret))
2611                                 goto bkey_err;
2612
2613                         reservation.v.nr_replicas = disk_res.nr_replicas;
2614                 }
2615
2616                 ret = bch2_extent_update(&trans, inode,
2617                                 &disk_res, &quota_res,
2618                                 iter, &reservation.k_i,
2619                                 0, true, true, NULL);
2620 bkey_err:
2621                 bch2_quota_reservation_put(c, inode, &quota_res);
2622                 bch2_disk_reservation_put(c, &disk_res);
2623                 if (ret == -EINTR)
2624                         ret = 0;
2625                 if (ret)
2626                         goto err;
2627         }
2628         bch2_trans_unlock(&trans);
2629
2630         if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2631             end > inode->v.i_size) {
2632                 i_size_write(&inode->v, end);
2633
2634                 mutex_lock(&inode->ei_update_lock);
2635                 ret = bch2_write_inode_size(c, inode, inode->v.i_size, 0);
2636                 mutex_unlock(&inode->ei_update_lock);
2637         }
2638
2639         /* blech */
2640         if ((mode & FALLOC_FL_KEEP_SIZE) &&
2641             (mode & FALLOC_FL_ZERO_RANGE) &&
2642             inode->ei_inode.bi_size != inode->v.i_size) {
2643                 /* sync appends.. */
2644                 ret = filemap_write_and_wait_range(mapping,
2645                                         inode->ei_inode.bi_size, S64_MAX);
2646                 if (ret)
2647                         goto err;
2648
2649                 if (inode->ei_inode.bi_size != inode->v.i_size) {
2650                         mutex_lock(&inode->ei_update_lock);
2651                         ret = bch2_write_inode_size(c, inode,
2652                                                     inode->v.i_size, 0);
2653                         mutex_unlock(&inode->ei_update_lock);
2654                 }
2655         }
2656 err:
2657         bch2_trans_exit(&trans);
2658         pagecache_block_put(&mapping->add_lock);
2659         inode_unlock(&inode->v);
2660         return ret;
2661 }
2662
2663 long bch2_fallocate_dispatch(struct file *file, int mode,
2664                              loff_t offset, loff_t len)
2665 {
2666         struct bch_inode_info *inode = file_bch_inode(file);
2667
2668         if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2669                 return bch2_fallocate(inode, mode, offset, len);
2670
2671         if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2672                 return bch2_fpunch(inode, offset, len);
2673
2674         if (mode == FALLOC_FL_COLLAPSE_RANGE)
2675                 return bch2_fcollapse(inode, offset, len);
2676
2677         return -EOPNOTSUPP;
2678 }
2679
2680 /* fseek: */
2681
2682 static bool page_is_data(struct page *page)
2683 {
2684         EBUG_ON(!PageLocked(page));
2685
2686         /* XXX: should only have to check PageDirty */
2687         return PagePrivate(page) &&
2688                 (page_state(page)->sectors ||
2689                  page_state(page)->dirty_sectors);
2690 }
2691
2692 static loff_t bch2_next_pagecache_data(struct inode *vinode,
2693                                        loff_t start_offset,
2694                                        loff_t end_offset)
2695 {
2696         struct address_space *mapping = vinode->i_mapping;
2697         struct page *page;
2698         pgoff_t index;
2699
2700         for (index = start_offset >> PAGE_SHIFT;
2701              index < end_offset >> PAGE_SHIFT;
2702              index++) {
2703                 if (find_get_pages(mapping, &index, 1, &page)) {
2704                         lock_page(page);
2705
2706                         if (page_is_data(page))
2707                                 end_offset =
2708                                         min(end_offset,
2709                                         max(start_offset,
2710                                             ((loff_t) index) << PAGE_SHIFT));
2711                         unlock_page(page);
2712                         put_page(page);
2713                 } else {
2714                         break;
2715                 }
2716         }
2717
2718         return end_offset;
2719 }
2720
2721 static loff_t bch2_seek_data(struct file *file, u64 offset)
2722 {
2723         struct bch_inode_info *inode = file_bch_inode(file);
2724         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2725         struct btree_trans trans;
2726         struct btree_iter *iter;
2727         struct bkey_s_c k;
2728         u64 isize, next_data = MAX_LFS_FILESIZE;
2729         int ret;
2730
2731         isize = i_size_read(&inode->v);
2732         if (offset >= isize)
2733                 return -ENXIO;
2734
2735         bch2_trans_init(&trans, c);
2736
2737         for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
2738                            POS(inode->v.i_ino, offset >> 9), 0, k) {
2739                 if (k.k->p.inode != inode->v.i_ino) {
2740                         break;
2741                 } else if (bkey_extent_is_data(k.k)) {
2742                         next_data = max(offset, bkey_start_offset(k.k) << 9);
2743                         break;
2744                 } else if (k.k->p.offset >> 9 > isize)
2745                         break;
2746         }
2747
2748         ret = bch2_trans_exit(&trans);
2749         if (ret)
2750                 return ret;
2751
2752         if (next_data > offset)
2753                 next_data = bch2_next_pagecache_data(&inode->v,
2754                                                      offset, next_data);
2755
2756         if (next_data > isize)
2757                 return -ENXIO;
2758
2759         return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
2760 }
2761
2762 static bool page_slot_is_data(struct address_space *mapping, pgoff_t index)
2763 {
2764         struct page *page;
2765         bool ret;
2766
2767         page = find_lock_entry(mapping, index);
2768         if (!page || xa_is_value(page))
2769                 return false;
2770
2771         ret = page_is_data(page);
2772         unlock_page(page);
2773
2774         return ret;
2775 }
2776
2777 static loff_t bch2_next_pagecache_hole(struct inode *vinode,
2778                                        loff_t start_offset,
2779                                        loff_t end_offset)
2780 {
2781         struct address_space *mapping = vinode->i_mapping;
2782         pgoff_t index;
2783
2784         for (index = start_offset >> PAGE_SHIFT;
2785              index < end_offset >> PAGE_SHIFT;
2786              index++)
2787                 if (!page_slot_is_data(mapping, index))
2788                         end_offset = max(start_offset,
2789                                          ((loff_t) index) << PAGE_SHIFT);
2790
2791         return end_offset;
2792 }
2793
2794 static loff_t bch2_seek_hole(struct file *file, u64 offset)
2795 {
2796         struct bch_inode_info *inode = file_bch_inode(file);
2797         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2798         struct btree_trans trans;
2799         struct btree_iter *iter;
2800         struct bkey_s_c k;
2801         u64 isize, next_hole = MAX_LFS_FILESIZE;
2802         int ret;
2803
2804         isize = i_size_read(&inode->v);
2805         if (offset >= isize)
2806                 return -ENXIO;
2807
2808         bch2_trans_init(&trans, c);
2809
2810         for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
2811                            POS(inode->v.i_ino, offset >> 9),
2812                            BTREE_ITER_SLOTS, k) {
2813                 if (k.k->p.inode != inode->v.i_ino) {
2814                         next_hole = bch2_next_pagecache_hole(&inode->v,
2815                                         offset, MAX_LFS_FILESIZE);
2816                         break;
2817                 } else if (!bkey_extent_is_data(k.k)) {
2818                         next_hole = bch2_next_pagecache_hole(&inode->v,
2819                                         max(offset, bkey_start_offset(k.k) << 9),
2820                                         k.k->p.offset << 9);
2821
2822                         if (next_hole < k.k->p.offset << 9)
2823                                 break;
2824                 } else {
2825                         offset = max(offset, bkey_start_offset(k.k) << 9);
2826                 }
2827         }
2828
2829         ret = bch2_trans_exit(&trans);
2830         if (ret)
2831                 return ret;
2832
2833         if (next_hole > isize)
2834                 next_hole = isize;
2835
2836         return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
2837 }
2838
2839 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
2840 {
2841         switch (whence) {
2842         case SEEK_SET:
2843         case SEEK_CUR:
2844         case SEEK_END:
2845                 return generic_file_llseek(file, offset, whence);
2846         case SEEK_DATA:
2847                 return bch2_seek_data(file, offset);
2848         case SEEK_HOLE:
2849                 return bch2_seek_hole(file, offset);
2850         }
2851
2852         return -EINVAL;
2853 }
2854
2855 void bch2_fs_fsio_exit(struct bch_fs *c)
2856 {
2857         bioset_exit(&c->dio_write_bioset);
2858         bioset_exit(&c->dio_read_bioset);
2859         bioset_exit(&c->writepage_bioset);
2860 }
2861
2862 int bch2_fs_fsio_init(struct bch_fs *c)
2863 {
2864         int ret = 0;
2865
2866         pr_verbose_init(c->opts, "");
2867
2868         if (bioset_init(&c->writepage_bioset,
2869                         4, offsetof(struct bch_writepage_io, op.op.wbio.bio),
2870                         BIOSET_NEED_BVECS) ||
2871             bioset_init(&c->dio_read_bioset,
2872                         4, offsetof(struct dio_read, rbio.bio),
2873                         BIOSET_NEED_BVECS) ||
2874             bioset_init(&c->dio_write_bioset,
2875                         4, offsetof(struct dio_write, iop.op.wbio.bio),
2876                         BIOSET_NEED_BVECS))
2877                 ret = -ENOMEM;
2878
2879         pr_verbose_init(c->opts, "ret %i", ret);
2880         return ret;
2881 }
2882
2883 #endif /* NO_BCACHEFS_FS */