]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/fs-io.c
Update bcachefs sources to 0906b1fb49 bcachefs: fixes for 32 bit/big endian machines
[bcachefs-tools-debian] / libbcachefs / fs-io.c
1 #ifndef NO_BCACHEFS_FS
2
3 #include "bcachefs.h"
4 #include "btree_update.h"
5 #include "buckets.h"
6 #include "clock.h"
7 #include "error.h"
8 #include "fs.h"
9 #include "fs-io.h"
10 #include "fsck.h"
11 #include "inode.h"
12 #include "journal.h"
13 #include "io.h"
14 #include "keylist.h"
15 #include "quota.h"
16
17 #include <linux/aio.h>
18 #include <linux/backing-dev.h>
19 #include <linux/falloc.h>
20 #include <linux/migrate.h>
21 #include <linux/mmu_context.h>
22 #include <linux/pagevec.h>
23 #include <linux/sched/signal.h>
24 #include <linux/task_io_accounting_ops.h>
25 #include <linux/uio.h>
26 #include <linux/writeback.h>
27
28 #include <trace/events/bcachefs.h>
29 #include <trace/events/writeback.h>
30
31 struct quota_res {
32         u64                             sectors;
33 };
34
35 struct i_sectors_hook {
36         struct extent_insert_hook       hook;
37         struct bch_inode_info           *inode;
38         struct quota_res                quota_res;
39         s64                             sectors;
40         u64                             new_i_size;
41         unsigned                        flags;
42         unsigned                        appending:1;
43 };
44
45 struct bchfs_write_op {
46         struct bch_inode_info           *inode;
47         s64                             sectors_added;
48         bool                            is_dio;
49         bool                            unalloc;
50         u64                             new_i_size;
51
52         /* must be last: */
53         struct bch_write_op             op;
54 };
55
56 struct bch_writepage_io {
57         struct closure                  cl;
58         u64                             new_sectors;
59
60         /* must be last: */
61         struct bchfs_write_op           op;
62 };
63
64 struct dio_write {
65         struct closure                  cl;
66         struct kiocb                    *req;
67         struct task_struct              *task;
68         unsigned                        loop:1,
69                                         sync:1,
70                                         free_iov:1;
71         struct quota_res                quota_res;
72
73         struct iov_iter                 iter;
74         struct iovec                    inline_vecs[2];
75
76         /* must be last: */
77         struct bchfs_write_op           iop;
78 };
79
80 struct dio_read {
81         struct closure                  cl;
82         struct kiocb                    *req;
83         long                            ret;
84         struct bch_read_bio             rbio;
85 };
86
87 /* pagecache_block must be held */
88 static int write_invalidate_inode_pages_range(struct address_space *mapping,
89                                               loff_t start, loff_t end)
90 {
91         int ret;
92
93         /*
94          * XXX: the way this is currently implemented, we can spin if a process
95          * is continually redirtying a specific page
96          */
97         do {
98                 if (!mapping->nrpages &&
99                     !mapping->nrexceptional)
100                         return 0;
101
102                 ret = filemap_write_and_wait_range(mapping, start, end);
103                 if (ret)
104                         break;
105
106                 if (!mapping->nrpages)
107                         return 0;
108
109                 ret = invalidate_inode_pages2_range(mapping,
110                                 start >> PAGE_SHIFT,
111                                 end >> PAGE_SHIFT);
112         } while (ret == -EBUSY);
113
114         return ret;
115 }
116
117 /* quotas */
118
119 #ifdef CONFIG_BCACHEFS_QUOTA
120
121 static void bch2_quota_reservation_put(struct bch_fs *c,
122                                        struct bch_inode_info *inode,
123                                        struct quota_res *res)
124 {
125         if (!res->sectors)
126                 return;
127
128         mutex_lock(&inode->ei_quota_lock);
129         BUG_ON(res->sectors > inode->ei_quota_reserved);
130
131         bch2_quota_acct(c, inode->ei_qid, Q_SPC,
132                         -((s64) res->sectors), BCH_QUOTA_PREALLOC);
133         inode->ei_quota_reserved -= res->sectors;
134         mutex_unlock(&inode->ei_quota_lock);
135
136         res->sectors = 0;
137 }
138
139 static int bch2_quota_reservation_add(struct bch_fs *c,
140                                       struct bch_inode_info *inode,
141                                       struct quota_res *res,
142                                       unsigned sectors,
143                                       bool check_enospc)
144 {
145         int ret;
146
147         mutex_lock(&inode->ei_quota_lock);
148         ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
149                               check_enospc ? BCH_QUOTA_PREALLOC : BCH_QUOTA_NOCHECK);
150         if (likely(!ret)) {
151                 inode->ei_quota_reserved += sectors;
152                 res->sectors += sectors;
153         }
154         mutex_unlock(&inode->ei_quota_lock);
155
156         return ret;
157 }
158
159 #else
160
161 static void bch2_quota_reservation_put(struct bch_fs *c,
162                                        struct bch_inode_info *inode,
163                                        struct quota_res *res)
164 {
165 }
166
167 static int bch2_quota_reservation_add(struct bch_fs *c,
168                                       struct bch_inode_info *inode,
169                                       struct quota_res *res,
170                                       unsigned sectors,
171                                       bool check_enospc)
172 {
173         return 0;
174 }
175
176 #endif
177
178 /* i_size updates: */
179
180 static int inode_set_size(struct bch_inode_info *inode,
181                           struct bch_inode_unpacked *bi,
182                           void *p)
183 {
184         loff_t *new_i_size = p;
185
186         lockdep_assert_held(&inode->ei_update_lock);
187
188         bi->bi_size = *new_i_size;
189         return 0;
190 }
191
192 static int __must_check bch2_write_inode_size(struct bch_fs *c,
193                                               struct bch_inode_info *inode,
194                                               loff_t new_size)
195 {
196         return __bch2_write_inode(c, inode, inode_set_size, &new_size);
197 }
198
199 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
200                            struct quota_res *quota_res, int sectors)
201 {
202         mutex_lock(&inode->ei_quota_lock);
203 #ifdef CONFIG_BCACHEFS_QUOTA
204         if (quota_res && sectors > 0) {
205                 BUG_ON(sectors > quota_res->sectors);
206                 BUG_ON(sectors > inode->ei_quota_reserved);
207
208                 quota_res->sectors -= sectors;
209                 inode->ei_quota_reserved -= sectors;
210         } else {
211                 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, BCH_QUOTA_WARN);
212         }
213 #endif
214         inode->v.i_blocks += sectors;
215         mutex_unlock(&inode->ei_quota_lock);
216 }
217
218 /* i_sectors accounting: */
219
220 static enum btree_insert_ret
221 i_sectors_hook_fn(struct extent_insert_hook *hook,
222                   struct bpos committed_pos,
223                   struct bpos next_pos,
224                   struct bkey_s_c k,
225                   const struct bkey_i *insert)
226 {
227         struct i_sectors_hook *h = container_of(hook,
228                                 struct i_sectors_hook, hook);
229         s64 sectors = next_pos.offset - committed_pos.offset;
230         int sign = bkey_extent_is_allocation(&insert->k) -
231                 (k.k && bkey_extent_is_allocation(k.k));
232
233         EBUG_ON(!(h->inode->ei_inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY));
234
235         h->sectors += sectors * sign;
236
237         return BTREE_INSERT_OK;
238 }
239
240 static int i_sectors_dirty_finish_fn(struct bch_inode_info *inode,
241                                      struct bch_inode_unpacked *bi,
242                                      void *p)
243 {
244         struct i_sectors_hook *h = p;
245
246         if (h->new_i_size != U64_MAX &&
247             (!h->appending ||
248              h->new_i_size > bi->bi_size))
249                 bi->bi_size = h->new_i_size;
250         bi->bi_sectors  += h->sectors;
251         bi->bi_flags    &= ~h->flags;
252         return 0;
253 }
254
255 static int i_sectors_dirty_finish(struct bch_fs *c, struct i_sectors_hook *h)
256 {
257         int ret;
258
259         mutex_lock(&h->inode->ei_update_lock);
260         if (h->new_i_size != U64_MAX)
261                 i_size_write(&h->inode->v, h->new_i_size);
262
263         i_sectors_acct(c, h->inode, &h->quota_res, h->sectors);
264
265         ret = __bch2_write_inode(c, h->inode, i_sectors_dirty_finish_fn, h);
266         mutex_unlock(&h->inode->ei_update_lock);
267
268         bch2_quota_reservation_put(c, h->inode, &h->quota_res);
269
270         h->sectors = 0;
271
272         return ret;
273 }
274
275 static int i_sectors_dirty_start_fn(struct bch_inode_info *inode,
276                                     struct bch_inode_unpacked *bi, void *p)
277 {
278         struct i_sectors_hook *h = p;
279
280         if (h->flags & BCH_INODE_I_SIZE_DIRTY)
281                 bi->bi_size = h->new_i_size;
282
283         bi->bi_flags |= h->flags;
284         return 0;
285 }
286
287 static int i_sectors_dirty_start(struct bch_fs *c, struct i_sectors_hook *h)
288 {
289         int ret;
290
291         mutex_lock(&h->inode->ei_update_lock);
292         ret = __bch2_write_inode(c, h->inode, i_sectors_dirty_start_fn, h);
293         mutex_unlock(&h->inode->ei_update_lock);
294
295         return ret;
296 }
297
298 static inline struct i_sectors_hook
299 i_sectors_hook_init(struct bch_inode_info *inode, unsigned flags)
300 {
301         return (struct i_sectors_hook) {
302                 .hook.fn        = i_sectors_hook_fn,
303                 .inode          = inode,
304                 .sectors        = 0,
305                 .new_i_size     = U64_MAX,
306                 .flags          = flags|BCH_INODE_I_SECTORS_DIRTY,
307         };
308 }
309
310 /* normal i_size/i_sectors update machinery: */
311
312 struct bchfs_extent_trans_hook {
313         struct bchfs_write_op           *op;
314         struct extent_insert_hook       hook;
315
316         struct bch_inode_unpacked       inode_u;
317         struct bkey_inode_buf           inode_p;
318
319         bool                            need_inode_update;
320 };
321
322 static enum btree_insert_ret
323 bchfs_extent_update_hook(struct extent_insert_hook *hook,
324                          struct bpos committed_pos,
325                          struct bpos next_pos,
326                          struct bkey_s_c k,
327                          const struct bkey_i *insert)
328 {
329         struct bchfs_extent_trans_hook *h = container_of(hook,
330                                 struct bchfs_extent_trans_hook, hook);
331         struct bch_inode_info *inode = h->op->inode;
332         int sign = bkey_extent_is_allocation(&insert->k) -
333                 (k.k && bkey_extent_is_allocation(k.k));
334         s64 sectors = (s64) (next_pos.offset - committed_pos.offset) * sign;
335         u64 offset = min(next_pos.offset << 9, h->op->new_i_size);
336         bool do_pack = false;
337
338         if (h->op->unalloc &&
339             !bch2_extent_is_fully_allocated(k))
340                 return BTREE_INSERT_ENOSPC;
341
342         BUG_ON((next_pos.offset << 9) > round_up(offset, PAGE_SIZE));
343
344         /* XXX: inode->i_size locking */
345         if (offset > inode->ei_inode.bi_size) {
346                 if (!h->need_inode_update) {
347                         h->need_inode_update = true;
348                         return BTREE_INSERT_NEED_TRAVERSE;
349                 }
350
351                 BUG_ON(h->inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY);
352
353                 h->inode_u.bi_size = offset;
354                 do_pack = true;
355
356                 inode->ei_inode.bi_size = offset;
357
358                 if (h->op->is_dio)
359                         i_size_write(&inode->v, offset);
360         }
361
362         if (sectors) {
363                 if (!h->need_inode_update) {
364                         h->need_inode_update = true;
365                         return BTREE_INSERT_NEED_TRAVERSE;
366                 }
367
368                 h->inode_u.bi_sectors += sectors;
369                 do_pack = true;
370
371                 h->op->sectors_added += sectors;
372         }
373
374         if (do_pack)
375                 bch2_inode_pack(&h->inode_p, &h->inode_u);
376
377         return BTREE_INSERT_OK;
378 }
379
380 static int bchfs_write_index_update(struct bch_write_op *wop)
381 {
382         struct bchfs_write_op *op = container_of(wop,
383                                 struct bchfs_write_op, op);
384         struct keylist *keys = &op->op.insert_keys;
385         struct btree_iter extent_iter, inode_iter;
386         struct bchfs_extent_trans_hook hook;
387         struct bkey_i *k = bch2_keylist_front(keys);
388         s64 orig_sectors_added = op->sectors_added;
389         int ret;
390
391         BUG_ON(k->k.p.inode != op->inode->v.i_ino);
392
393         bch2_btree_iter_init(&extent_iter, wop->c, BTREE_ID_EXTENTS,
394                              bkey_start_pos(&bch2_keylist_front(keys)->k),
395                              BTREE_ITER_INTENT);
396         bch2_btree_iter_init(&inode_iter, wop->c, BTREE_ID_INODES,
397                              POS(extent_iter.pos.inode, 0),
398                              BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
399
400         hook.op                 = op;
401         hook.hook.fn            = bchfs_extent_update_hook;
402         hook.need_inode_update  = false;
403
404         do {
405                 /* XXX: inode->i_size locking */
406                 k = bch2_keylist_front(keys);
407                 if (min(k->k.p.offset << 9, op->new_i_size) >
408                     op->inode->ei_inode.bi_size)
409                         hook.need_inode_update = true;
410
411                 if (hook.need_inode_update) {
412                         struct bkey_s_c inode;
413
414                         if (!btree_iter_linked(&inode_iter))
415                                 bch2_btree_iter_link(&extent_iter, &inode_iter);
416
417                         inode = bch2_btree_iter_peek_slot(&inode_iter);
418                         if ((ret = btree_iter_err(inode)))
419                                 goto err;
420
421                         if (WARN_ONCE(inode.k->type != BCH_INODE_FS,
422                                       "inode %llu not found when updating",
423                                       extent_iter.pos.inode)) {
424                                 ret = -ENOENT;
425                                 break;
426                         }
427
428                         if (WARN_ONCE(bkey_bytes(inode.k) >
429                                       sizeof(hook.inode_p),
430                                       "inode %llu too big (%zu bytes, buf %zu)",
431                                       extent_iter.pos.inode,
432                                       bkey_bytes(inode.k),
433                                       sizeof(hook.inode_p))) {
434                                 ret = -ENOENT;
435                                 break;
436                         }
437
438                         bkey_reassemble(&hook.inode_p.inode.k_i, inode);
439                         ret = bch2_inode_unpack(bkey_s_c_to_inode(inode),
440                                                &hook.inode_u);
441                         if (WARN_ONCE(ret,
442                                       "error %i unpacking inode %llu",
443                                       ret, extent_iter.pos.inode)) {
444                                 ret = -ENOENT;
445                                 break;
446                         }
447
448                         ret = bch2_btree_insert_at(wop->c, &wop->res,
449                                         &hook.hook, op_journal_seq(wop),
450                                         BTREE_INSERT_NOFAIL|
451                                         BTREE_INSERT_ATOMIC|
452                                         BTREE_INSERT_USE_RESERVE,
453                                         BTREE_INSERT_ENTRY(&extent_iter, k),
454                                         BTREE_INSERT_ENTRY_EXTRA_RES(&inode_iter,
455                                                         &hook.inode_p.inode.k_i, 2));
456                 } else {
457                         ret = bch2_btree_insert_at(wop->c, &wop->res,
458                                         &hook.hook, op_journal_seq(wop),
459                                         BTREE_INSERT_NOFAIL|
460                                         BTREE_INSERT_ATOMIC|
461                                         BTREE_INSERT_USE_RESERVE,
462                                         BTREE_INSERT_ENTRY(&extent_iter, k));
463                 }
464
465                 BUG_ON(bkey_cmp(extent_iter.pos, bkey_start_pos(&k->k)));
466
467                 if (WARN_ONCE(!ret != !k->k.size,
468                               "ret %i k->size %u", ret, k->k.size))
469                         ret = k->k.size ? -EINTR : 0;
470 err:
471                 if (ret == -EINTR)
472                         continue;
473                 if (ret)
474                         break;
475
476                 BUG_ON(bkey_cmp(extent_iter.pos, k->k.p) < 0);
477                 bch2_keylist_pop_front(keys);
478         } while (!bch2_keylist_empty(keys));
479
480         bch2_btree_iter_unlock(&extent_iter);
481         bch2_btree_iter_unlock(&inode_iter);
482
483         if (op->is_dio) {
484                 struct dio_write *dio = container_of(op, struct dio_write, iop);
485
486                 i_sectors_acct(wop->c, op->inode, &dio->quota_res,
487                                op->sectors_added - orig_sectors_added);
488         }
489
490         return ret;
491 }
492
493 static inline void bch2_fswrite_op_init(struct bchfs_write_op *op,
494                                         struct bch_fs *c,
495                                         struct bch_inode_info *inode,
496                                         struct bch_io_opts opts,
497                                         bool is_dio)
498 {
499         op->inode               = inode;
500         op->sectors_added       = 0;
501         op->is_dio              = is_dio;
502         op->unalloc             = false;
503         op->new_i_size          = U64_MAX;
504
505         bch2_write_op_init(&op->op, c, opts);
506         op->op.target           = opts.foreground_target;
507         op->op.index_update_fn  = bchfs_write_index_update;
508         op_journal_seq_set(&op->op, &inode->ei_journal_seq);
509 }
510
511 static inline struct bch_io_opts io_opts(struct bch_fs *c, struct bch_inode_info *inode)
512 {
513         struct bch_io_opts opts = bch2_opts_to_inode_opts(c->opts);
514
515         bch2_io_opts_apply(&opts, bch2_inode_opts_get(&inode->ei_inode));
516         return opts;
517 }
518
519 /* page state: */
520
521 /* stored in page->private: */
522
523 /*
524  * bch_page_state has to (unfortunately) be manipulated with cmpxchg - we could
525  * almost protected it with the page lock, except that bch2_writepage_io_done has
526  * to update the sector counts (and from interrupt/bottom half context).
527  */
528 struct bch_page_state {
529 union { struct {
530         /* existing data: */
531         unsigned                sectors:PAGE_SECTOR_SHIFT + 1;
532         unsigned                nr_replicas:4;
533         unsigned                compressed:1;
534
535         /* Owns PAGE_SECTORS sized reservation: */
536         unsigned                reserved:1;
537         unsigned                reservation_replicas:4;
538
539         /* Owns PAGE_SECTORS sized quota reservation: */
540         unsigned                quota_reserved:1;
541
542         /*
543          * Number of sectors on disk - for i_blocks
544          * Uncompressed size, not compressed size:
545          */
546         unsigned                dirty_sectors:PAGE_SECTOR_SHIFT + 1;
547 };
548         /* for cmpxchg: */
549         unsigned long           v;
550 };
551 };
552
553 #define page_state_cmpxchg(_ptr, _new, _expr)                           \
554 ({                                                                      \
555         unsigned long _v = READ_ONCE((_ptr)->v);                        \
556         struct bch_page_state _old;                                     \
557                                                                         \
558         do {                                                            \
559                 _old.v = _new.v = _v;                                   \
560                 _expr;                                                  \
561                                                                         \
562                 EBUG_ON(_new.sectors + _new.dirty_sectors > PAGE_SECTORS);\
563         } while (_old.v != _new.v &&                                    \
564                  (_v = cmpxchg(&(_ptr)->v, _old.v, _new.v)) != _old.v); \
565                                                                         \
566         _old;                                                           \
567 })
568
569 static inline struct bch_page_state *page_state(struct page *page)
570 {
571         struct bch_page_state *s = (void *) &page->private;
572
573         BUILD_BUG_ON(sizeof(*s) > sizeof(page->private));
574
575         if (!PagePrivate(page))
576                 SetPagePrivate(page);
577
578         return s;
579 }
580
581 static inline unsigned page_res_sectors(struct bch_page_state s)
582 {
583
584         return s.reserved ? s.reservation_replicas * PAGE_SECTORS : 0;
585 }
586
587 static void __bch2_put_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
588                                         struct bch_page_state s)
589 {
590         struct disk_reservation res = { .sectors = page_res_sectors(s) };
591         struct quota_res quota_res = { .sectors = s.quota_reserved ? PAGE_SECTORS : 0 };
592
593         bch2_quota_reservation_put(c, inode, &quota_res);
594         bch2_disk_reservation_put(c, &res);
595 }
596
597 static void bch2_put_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
598                                       struct page *page)
599 {
600         struct bch_page_state s;
601
602         s = page_state_cmpxchg(page_state(page), s, {
603                 s.reserved              = 0;
604                 s.quota_reserved        = 0;
605         });
606
607         __bch2_put_page_reservation(c, inode, s);
608 }
609
610 static int bch2_get_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
611                                      struct page *page, bool check_enospc)
612 {
613         struct bch_page_state *s = page_state(page), new, old;
614
615         /* XXX: this should not be open coded */
616         unsigned nr_replicas = inode->ei_inode.bi_data_replicas
617                 ? inode->ei_inode.bi_data_replicas - 1
618                 : c->opts.data_replicas;
619
620         struct disk_reservation disk_res = bch2_disk_reservation_init(c,
621                                                 nr_replicas);
622         struct quota_res quota_res = { 0 };
623         int ret = 0;
624
625         /*
626          * XXX: this could likely be quite a bit simpler, page reservations
627          * _should_ only be manipulated with page locked:
628          */
629
630         old = page_state_cmpxchg(s, new, {
631                 if (new.reserved
632                     ? (new.reservation_replicas < disk_res.nr_replicas)
633                     : (new.sectors < PAGE_SECTORS ||
634                        new.nr_replicas < disk_res.nr_replicas ||
635                        new.compressed)) {
636                         int sectors = (disk_res.nr_replicas * PAGE_SECTORS -
637                                        page_res_sectors(new) -
638                                        disk_res.sectors);
639
640                         if (sectors > 0) {
641                                 ret = bch2_disk_reservation_add(c, &disk_res, sectors,
642                                                 !check_enospc
643                                                 ? BCH_DISK_RESERVATION_NOFAIL : 0);
644                                 if (unlikely(ret))
645                                         goto err;
646                         }
647
648                         new.reserved = 1;
649                         new.reservation_replicas = disk_res.nr_replicas;
650                 }
651
652                 if (!new.quota_reserved &&
653                     new.sectors + new.dirty_sectors < PAGE_SECTORS) {
654                         ret = bch2_quota_reservation_add(c, inode, &quota_res,
655                                                 PAGE_SECTORS - quota_res.sectors,
656                                                 check_enospc);
657                         if (unlikely(ret))
658                                 goto err;
659
660                         new.quota_reserved = 1;
661                 }
662         });
663
664         quota_res.sectors -= (new.quota_reserved - old.quota_reserved) * PAGE_SECTORS;
665         disk_res.sectors -= page_res_sectors(new) - page_res_sectors(old);
666 err:
667         bch2_quota_reservation_put(c, inode, &quota_res);
668         bch2_disk_reservation_put(c, &disk_res);
669         return ret;
670 }
671
672 static void bch2_clear_page_bits(struct page *page)
673 {
674         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
675         struct bch_fs *c = inode->v.i_sb->s_fs_info;
676         struct bch_page_state s;
677
678         if (!PagePrivate(page))
679                 return;
680
681         s.v = xchg(&page_state(page)->v, 0);
682         ClearPagePrivate(page);
683
684         if (s.dirty_sectors)
685                 i_sectors_acct(c, inode, NULL, -s.dirty_sectors);
686
687         __bch2_put_page_reservation(c, inode, s);
688 }
689
690 int bch2_set_page_dirty(struct page *page)
691 {
692         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
693         struct bch_fs *c = inode->v.i_sb->s_fs_info;
694         struct quota_res quota_res = { 0 };
695         struct bch_page_state old, new;
696
697         old = page_state_cmpxchg(page_state(page), new,
698                 new.dirty_sectors = PAGE_SECTORS - new.sectors;
699                 new.quota_reserved = 0;
700         );
701
702         quota_res.sectors += old.quota_reserved * PAGE_SECTORS;
703
704         if (old.dirty_sectors != new.dirty_sectors)
705                 i_sectors_acct(c, inode, &quota_res,
706                                new.dirty_sectors - old.dirty_sectors);
707         bch2_quota_reservation_put(c, inode, &quota_res);
708
709         return __set_page_dirty_nobuffers(page);
710 }
711
712 int bch2_page_mkwrite(struct vm_fault *vmf)
713 {
714         struct page *page = vmf->page;
715         struct file *file = vmf->vma->vm_file;
716         struct bch_inode_info *inode = file_bch_inode(file);
717         struct address_space *mapping = inode->v.i_mapping;
718         struct bch_fs *c = inode->v.i_sb->s_fs_info;
719         int ret = VM_FAULT_LOCKED;
720
721         sb_start_pagefault(inode->v.i_sb);
722         file_update_time(file);
723
724         /*
725          * Not strictly necessary, but helps avoid dio writes livelocking in
726          * write_invalidate_inode_pages_range() - can drop this if/when we get
727          * a write_invalidate_inode_pages_range() that works without dropping
728          * page lock before invalidating page
729          */
730         if (current->pagecache_lock != &mapping->add_lock)
731                 pagecache_add_get(&mapping->add_lock);
732
733         lock_page(page);
734         if (page->mapping != mapping ||
735             page_offset(page) > i_size_read(&inode->v)) {
736                 unlock_page(page);
737                 ret = VM_FAULT_NOPAGE;
738                 goto out;
739         }
740
741         if (bch2_get_page_reservation(c, inode, page, true)) {
742                 unlock_page(page);
743                 ret = VM_FAULT_SIGBUS;
744                 goto out;
745         }
746
747         if (!PageDirty(page))
748                 set_page_dirty(page);
749         wait_for_stable_page(page);
750 out:
751         if (current->pagecache_lock != &mapping->add_lock)
752                 pagecache_add_put(&mapping->add_lock);
753         sb_end_pagefault(inode->v.i_sb);
754         return ret;
755 }
756
757 void bch2_invalidatepage(struct page *page, unsigned int offset,
758                          unsigned int length)
759 {
760         EBUG_ON(!PageLocked(page));
761         EBUG_ON(PageWriteback(page));
762
763         if (offset || length < PAGE_SIZE)
764                 return;
765
766         bch2_clear_page_bits(page);
767 }
768
769 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
770 {
771         /* XXX: this can't take locks that are held while we allocate memory */
772         EBUG_ON(!PageLocked(page));
773         EBUG_ON(PageWriteback(page));
774
775         if (PageDirty(page))
776                 return 0;
777
778         bch2_clear_page_bits(page);
779         return 1;
780 }
781
782 #ifdef CONFIG_MIGRATION
783 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
784                       struct page *page, enum migrate_mode mode)
785 {
786         int ret;
787
788         ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
789         if (ret != MIGRATEPAGE_SUCCESS)
790                 return ret;
791
792         if (PagePrivate(page)) {
793                 *page_state(newpage) = *page_state(page);
794                 ClearPagePrivate(page);
795         }
796
797         migrate_page_copy(newpage, page);
798         return MIGRATEPAGE_SUCCESS;
799 }
800 #endif
801
802 /* readpages/writepages: */
803
804 static bool bio_can_add_page_contig(struct bio *bio, struct page *page)
805 {
806         sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
807
808         return bio->bi_vcnt < bio->bi_max_vecs &&
809                 bio_end_sector(bio) == offset;
810 }
811
812 static void __bio_add_page(struct bio *bio, struct page *page)
813 {
814         bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
815                 .bv_page = page,
816                 .bv_len = PAGE_SIZE,
817                 .bv_offset = 0,
818         };
819
820         bio->bi_iter.bi_size += PAGE_SIZE;
821 }
822
823 static int bio_add_page_contig(struct bio *bio, struct page *page)
824 {
825         sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
826
827         EBUG_ON(!bio->bi_max_vecs);
828
829         if (!bio->bi_vcnt)
830                 bio->bi_iter.bi_sector = offset;
831         else if (!bio_can_add_page_contig(bio, page))
832                 return -1;
833
834         __bio_add_page(bio, page);
835         return 0;
836 }
837
838 /* readpage(s): */
839
840 static void bch2_readpages_end_io(struct bio *bio)
841 {
842         struct bio_vec *bv;
843         int i;
844
845         bio_for_each_segment_all(bv, bio, i) {
846                 struct page *page = bv->bv_page;
847
848                 if (!bio->bi_status) {
849                         SetPageUptodate(page);
850                 } else {
851                         ClearPageUptodate(page);
852                         SetPageError(page);
853                 }
854                 unlock_page(page);
855         }
856
857         bio_put(bio);
858 }
859
860 struct readpages_iter {
861         struct address_space    *mapping;
862         struct list_head        pages;
863         unsigned                nr_pages;
864 };
865
866 static inline void page_state_init_for_read(struct page *page)
867 {
868         struct bch_page_state *s = page_state(page);
869
870         BUG_ON(s->reserved);
871         s->sectors      = 0;
872         s->compressed   = 0;
873 }
874
875 static int readpage_add_page(struct readpages_iter *iter, struct page *page)
876 {
877         int ret;
878
879         prefetchw(&page->flags);
880
881         ret = add_to_page_cache_lru(page, iter->mapping,
882                                     page->index, GFP_NOFS);
883         if (!ret)
884                 page_state_init_for_read(page);
885
886         put_page(page);
887         return ret;
888 }
889
890 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
891 {
892         while (iter->nr_pages) {
893                 struct page *page =
894                         list_last_entry(&iter->pages, struct page, lru);
895
896                 prefetchw(&page->flags);
897                 list_del(&page->lru);
898                 iter->nr_pages--;
899
900                 if (!readpage_add_page(iter, page))
901                         return page;
902         }
903
904         return NULL;
905 }
906
907 #define for_each_readpage_page(_iter, _page)                            \
908         for (;                                                          \
909              ((_page) = __readpage_next_page(&(_iter)));)               \
910
911 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
912 {
913         struct bvec_iter iter;
914         struct bio_vec bv;
915         bool compressed = bch2_extent_is_compressed(k);
916         unsigned nr_ptrs = bch2_extent_nr_dirty_ptrs(k);
917
918         bio_for_each_segment(bv, bio, iter) {
919                 struct bch_page_state *s = page_state(bv.bv_page);
920
921                 /* sectors in @k from the start of this page: */
922                 unsigned k_sectors = k.k->size - (iter.bi_sector - k.k->p.offset);
923
924                 unsigned page_sectors = min(bv.bv_len >> 9, k_sectors);
925
926                 s->nr_replicas = !s->sectors
927                         ? nr_ptrs
928                         : min_t(unsigned, s->nr_replicas, nr_ptrs);
929
930                 BUG_ON(s->sectors + page_sectors > PAGE_SECTORS);
931                 s->sectors += page_sectors;
932
933                 s->compressed |= compressed;
934         }
935 }
936
937 static void readpage_bio_extend(struct readpages_iter *iter,
938                                 struct bio *bio, u64 offset,
939                                 bool get_more)
940 {
941         struct page *page;
942         pgoff_t page_offset;
943         int ret;
944
945         while (bio_end_sector(bio) < offset &&
946                bio->bi_vcnt < bio->bi_max_vecs) {
947                 page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
948
949                 if (iter->nr_pages) {
950                         page = list_last_entry(&iter->pages, struct page, lru);
951                         if (page->index != page_offset)
952                                 break;
953
954                         list_del(&page->lru);
955                         iter->nr_pages--;
956                 } else if (get_more) {
957                         rcu_read_lock();
958                         page = radix_tree_lookup(&iter->mapping->page_tree, page_offset);
959                         rcu_read_unlock();
960
961                         if (page && !radix_tree_exceptional_entry(page))
962                                 break;
963
964                         page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
965                         if (!page)
966                                 break;
967
968                         page->index = page_offset;
969                         ClearPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
970                 } else {
971                         break;
972                 }
973
974                 ret = readpage_add_page(iter, page);
975                 if (ret)
976                         break;
977
978                 __bio_add_page(bio, page);
979         }
980
981         if (!iter->nr_pages)
982                 SetPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
983 }
984
985 static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
986                        struct bch_read_bio *rbio, u64 inum,
987                        struct readpages_iter *readpages_iter)
988 {
989         struct bio *bio = &rbio->bio;
990         int flags = BCH_READ_RETRY_IF_STALE|
991                 BCH_READ_MAY_PROMOTE;
992
993         rbio->c = c;
994         rbio->start_time = local_clock();
995
996         while (1) {
997                 BKEY_PADDED(k) tmp;
998                 struct bkey_s_c k;
999                 unsigned bytes;
1000
1001                 bch2_btree_iter_set_pos(iter, POS(inum, bio->bi_iter.bi_sector));
1002
1003                 k = bch2_btree_iter_peek_slot(iter);
1004                 BUG_ON(!k.k);
1005
1006                 if (IS_ERR(k.k)) {
1007                         int ret = bch2_btree_iter_unlock(iter);
1008                         BUG_ON(!ret);
1009                         bcache_io_error(c, bio, "btree IO error %i", ret);
1010                         bio_endio(bio);
1011                         return;
1012                 }
1013
1014                 bkey_reassemble(&tmp.k, k);
1015                 bch2_btree_iter_unlock(iter);
1016                 k = bkey_i_to_s_c(&tmp.k);
1017
1018                 if (readpages_iter) {
1019                         bool want_full_extent = false;
1020
1021                         if (bkey_extent_is_data(k.k)) {
1022                                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1023                                 struct bch_extent_crc_unpacked crc;
1024                                 const union bch_extent_entry *i;
1025
1026                                 extent_for_each_crc(e, crc, i)
1027                                         want_full_extent |= ((crc.csum_type != 0) |
1028                                                              (crc.compression_type != 0));
1029                         }
1030
1031                         readpage_bio_extend(readpages_iter,
1032                                             bio, k.k->p.offset,
1033                                             want_full_extent);
1034                 }
1035
1036                 bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
1037                          bio->bi_iter.bi_sector) << 9;
1038                 swap(bio->bi_iter.bi_size, bytes);
1039
1040                 if (bytes == bio->bi_iter.bi_size)
1041                         flags |= BCH_READ_LAST_FRAGMENT;
1042
1043                 if (bkey_extent_is_allocation(k.k))
1044                         bch2_add_page_sectors(bio, k);
1045
1046                 bch2_read_extent(c, rbio, k, flags);
1047
1048                 if (flags & BCH_READ_LAST_FRAGMENT)
1049                         return;
1050
1051                 swap(bio->bi_iter.bi_size, bytes);
1052                 bio_advance(bio, bytes);
1053         }
1054 }
1055
1056 int bch2_readpages(struct file *file, struct address_space *mapping,
1057                    struct list_head *pages, unsigned nr_pages)
1058 {
1059         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1060         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1061         struct bch_io_opts opts = io_opts(c, inode);
1062         struct btree_iter iter;
1063         struct page *page;
1064         struct readpages_iter readpages_iter = {
1065                 .mapping = mapping, .nr_pages = nr_pages
1066         };
1067
1068         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
1069                              BTREE_ITER_SLOTS);
1070
1071         INIT_LIST_HEAD(&readpages_iter.pages);
1072         list_add(&readpages_iter.pages, pages);
1073         list_del_init(pages);
1074
1075         if (current->pagecache_lock != &mapping->add_lock)
1076                 pagecache_add_get(&mapping->add_lock);
1077
1078         while ((page = readpage_iter_next(&readpages_iter))) {
1079                 unsigned n = max_t(unsigned,
1080                                    min_t(unsigned, readpages_iter.nr_pages + 1,
1081                                          BIO_MAX_PAGES),
1082                                    c->sb.encoded_extent_max >> PAGE_SECTOR_SHIFT);
1083
1084                 struct bch_read_bio *rbio =
1085                         rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
1086                                   opts);
1087
1088                 rbio->bio.bi_end_io = bch2_readpages_end_io;
1089                 bio_add_page_contig(&rbio->bio, page);
1090                 bchfs_read(c, &iter, rbio, inode->v.i_ino, &readpages_iter);
1091         }
1092
1093         if (current->pagecache_lock != &mapping->add_lock)
1094                 pagecache_add_put(&mapping->add_lock);
1095
1096         return 0;
1097 }
1098
1099 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
1100                              u64 inum, struct page *page)
1101 {
1102         struct btree_iter iter;
1103
1104         page_state_init_for_read(page);
1105
1106         bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
1107         bio_add_page_contig(&rbio->bio, page);
1108
1109         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
1110                              BTREE_ITER_SLOTS);
1111         bchfs_read(c, &iter, rbio, inum, NULL);
1112 }
1113
1114 int bch2_readpage(struct file *file, struct page *page)
1115 {
1116         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1117         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1118         struct bch_io_opts opts = io_opts(c, inode);
1119         struct bch_read_bio *rbio;
1120
1121         rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
1122         rbio->bio.bi_end_io = bch2_readpages_end_io;
1123
1124         __bchfs_readpage(c, rbio, inode->v.i_ino, page);
1125         return 0;
1126 }
1127
1128 static void bch2_read_single_page_end_io(struct bio *bio)
1129 {
1130         complete(bio->bi_private);
1131 }
1132
1133 static int bch2_read_single_page(struct page *page,
1134                                  struct address_space *mapping)
1135 {
1136         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1137         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1138         struct bch_read_bio *rbio;
1139         int ret;
1140         DECLARE_COMPLETION_ONSTACK(done);
1141
1142         rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
1143                          io_opts(c, inode));
1144         rbio->bio.bi_private = &done;
1145         rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1146
1147         __bchfs_readpage(c, rbio, inode->v.i_ino, page);
1148         wait_for_completion(&done);
1149
1150         ret = blk_status_to_errno(rbio->bio.bi_status);
1151         bio_put(&rbio->bio);
1152
1153         if (ret < 0)
1154                 return ret;
1155
1156         SetPageUptodate(page);
1157         return 0;
1158 }
1159
1160 /* writepages: */
1161
1162 struct bch_writepage_state {
1163         struct bch_writepage_io *io;
1164         struct bch_io_opts      opts;
1165 };
1166
1167 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1168                                                                   struct bch_inode_info *inode)
1169 {
1170         return (struct bch_writepage_state) { .opts = io_opts(c, inode) };
1171 }
1172
1173 static void bch2_writepage_io_free(struct closure *cl)
1174 {
1175         struct bch_writepage_io *io = container_of(cl,
1176                                         struct bch_writepage_io, cl);
1177
1178         bio_put(&io->op.op.wbio.bio);
1179 }
1180
1181 static void bch2_writepage_io_done(struct closure *cl)
1182 {
1183         struct bch_writepage_io *io = container_of(cl,
1184                                         struct bch_writepage_io, cl);
1185         struct bch_fs *c = io->op.op.c;
1186         struct bio *bio = &io->op.op.wbio.bio;
1187         struct bio_vec *bvec;
1188         unsigned i;
1189
1190         if (io->op.op.error) {
1191                 bio_for_each_segment_all(bvec, bio, i)
1192                         SetPageError(bvec->bv_page);
1193                 set_bit(AS_EIO, &io->op.inode->v.i_mapping->flags);
1194         }
1195
1196         /*
1197          * racing with fallocate can cause us to add fewer sectors than
1198          * expected - but we shouldn't add more sectors than expected:
1199          */
1200         BUG_ON(io->op.sectors_added > (s64) io->new_sectors);
1201
1202         /*
1203          * (error (due to going RO) halfway through a page can screw that up
1204          * slightly)
1205          * XXX wtf?
1206            BUG_ON(io->op.sectors_added - io->new_sectors >= (s64) PAGE_SECTORS);
1207          */
1208
1209         /*
1210          * PageWriteback is effectively our ref on the inode - fixup i_blocks
1211          * before calling end_page_writeback:
1212          */
1213         if (io->op.sectors_added != io->new_sectors)
1214                 i_sectors_acct(c, io->op.inode, NULL,
1215                                io->op.sectors_added - (s64) io->new_sectors);
1216
1217         bio_for_each_segment_all(bvec, bio, i)
1218                 end_page_writeback(bvec->bv_page);
1219
1220         closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1221 }
1222
1223 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1224 {
1225         struct bch_writepage_io *io = w->io;
1226
1227         w->io = NULL;
1228         closure_call(&io->op.op.cl, bch2_write, NULL, &io->cl);
1229         continue_at(&io->cl, bch2_writepage_io_done, NULL);
1230 }
1231
1232 /*
1233  * Get a bch_writepage_io and add @page to it - appending to an existing one if
1234  * possible, else allocating a new one:
1235  */
1236 static void bch2_writepage_io_alloc(struct bch_fs *c,
1237                                     struct bch_writepage_state *w,
1238                                     struct bch_inode_info *inode,
1239                                     struct page *page,
1240                                     unsigned nr_replicas)
1241 {
1242         struct bch_write_op *op;
1243         u64 offset = (u64) page->index << PAGE_SECTOR_SHIFT;
1244
1245         w->io = container_of(bio_alloc_bioset(GFP_NOFS,
1246                                               BIO_MAX_PAGES,
1247                                               &c->writepage_bioset),
1248                              struct bch_writepage_io, op.op.wbio.bio);
1249
1250         closure_init(&w->io->cl, NULL);
1251         w->io->new_sectors      = 0;
1252         bch2_fswrite_op_init(&w->io->op, c, inode, w->opts, false);
1253         op                      = &w->io->op.op;
1254         op->nr_replicas         = nr_replicas;
1255         op->res.nr_replicas     = nr_replicas;
1256         op->write_point         = writepoint_hashed(inode->ei_last_dirtied);
1257         op->pos                 = POS(inode->v.i_ino, offset);
1258         op->wbio.bio.bi_iter.bi_sector = offset;
1259 }
1260
1261 static int __bch2_writepage(struct page *page,
1262                             struct writeback_control *wbc,
1263                             void *data)
1264 {
1265         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1266         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1267         struct bch_writepage_state *w = data;
1268         struct bch_page_state new, old;
1269         unsigned offset;
1270         loff_t i_size = i_size_read(&inode->v);
1271         pgoff_t end_index = i_size >> PAGE_SHIFT;
1272
1273         EBUG_ON(!PageUptodate(page));
1274
1275         /* Is the page fully inside i_size? */
1276         if (page->index < end_index)
1277                 goto do_io;
1278
1279         /* Is the page fully outside i_size? (truncate in progress) */
1280         offset = i_size & (PAGE_SIZE - 1);
1281         if (page->index > end_index || !offset) {
1282                 unlock_page(page);
1283                 return 0;
1284         }
1285
1286         /*
1287          * The page straddles i_size.  It must be zeroed out on each and every
1288          * writepage invocation because it may be mmapped.  "A file is mapped
1289          * in multiples of the page size.  For a file that is not a multiple of
1290          * the  page size, the remaining memory is zeroed when mapped, and
1291          * writes to that region are not written out to the file."
1292          */
1293         zero_user_segment(page, offset, PAGE_SIZE);
1294 do_io:
1295         /* Before unlocking the page, transfer reservation to w->io: */
1296         old = page_state_cmpxchg(page_state(page), new, {
1297                 EBUG_ON(!new.reserved &&
1298                         (new.sectors != PAGE_SECTORS ||
1299                         new.compressed));
1300
1301                 if (new.reserved)
1302                         new.nr_replicas = new.reservation_replicas;
1303                 new.reserved = 0;
1304
1305                 new.compressed |= w->opts.compression != 0;
1306
1307                 new.sectors += new.dirty_sectors;
1308                 new.dirty_sectors = 0;
1309         });
1310
1311         BUG_ON(PageWriteback(page));
1312         set_page_writeback(page);
1313         unlock_page(page);
1314
1315         if (w->io &&
1316             (w->io->op.op.res.nr_replicas != new.nr_replicas ||
1317              !bio_can_add_page_contig(&w->io->op.op.wbio.bio, page)))
1318                 bch2_writepage_do_io(w);
1319
1320         if (!w->io)
1321                 bch2_writepage_io_alloc(c, w, inode, page, new.nr_replicas);
1322
1323         w->io->new_sectors += new.sectors - old.sectors;
1324
1325         BUG_ON(inode != w->io->op.inode);
1326         BUG_ON(bio_add_page_contig(&w->io->op.op.wbio.bio, page));
1327
1328         if (old.reserved)
1329                 w->io->op.op.res.sectors += old.reservation_replicas * PAGE_SECTORS;
1330
1331         w->io->op.new_i_size = i_size;
1332
1333         if (wbc->sync_mode == WB_SYNC_ALL)
1334                 w->io->op.op.wbio.bio.bi_opf |= REQ_SYNC;
1335
1336         return 0;
1337 }
1338
1339 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1340 {
1341         struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1342         struct bch_writepage_state w =
1343                 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1344         struct blk_plug plug;
1345         int ret;
1346
1347         blk_start_plug(&plug);
1348         ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1349         if (w.io)
1350                 bch2_writepage_do_io(&w);
1351         blk_finish_plug(&plug);
1352         return ret;
1353 }
1354
1355 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1356 {
1357         struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1358         struct bch_writepage_state w =
1359                 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1360         int ret;
1361
1362         ret = __bch2_writepage(page, wbc, &w);
1363         if (w.io)
1364                 bch2_writepage_do_io(&w);
1365
1366         return ret;
1367 }
1368
1369 /* buffered writes: */
1370
1371 int bch2_write_begin(struct file *file, struct address_space *mapping,
1372                      loff_t pos, unsigned len, unsigned flags,
1373                      struct page **pagep, void **fsdata)
1374 {
1375         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1376         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1377         pgoff_t index = pos >> PAGE_SHIFT;
1378         unsigned offset = pos & (PAGE_SIZE - 1);
1379         struct page *page;
1380         int ret = -ENOMEM;
1381
1382         BUG_ON(inode_unhashed(&inode->v));
1383
1384         /* Not strictly necessary - same reason as mkwrite(): */
1385         pagecache_add_get(&mapping->add_lock);
1386
1387         page = grab_cache_page_write_begin(mapping, index, flags);
1388         if (!page)
1389                 goto err_unlock;
1390
1391         if (PageUptodate(page))
1392                 goto out;
1393
1394         /* If we're writing entire page, don't need to read it in first: */
1395         if (len == PAGE_SIZE)
1396                 goto out;
1397
1398         if (!offset && pos + len >= inode->v.i_size) {
1399                 zero_user_segment(page, len, PAGE_SIZE);
1400                 flush_dcache_page(page);
1401                 goto out;
1402         }
1403
1404         if (index > inode->v.i_size >> PAGE_SHIFT) {
1405                 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1406                 flush_dcache_page(page);
1407                 goto out;
1408         }
1409 readpage:
1410         ret = bch2_read_single_page(page, mapping);
1411         if (ret)
1412                 goto err;
1413 out:
1414         ret = bch2_get_page_reservation(c, inode, page, true);
1415         if (ret) {
1416                 if (!PageUptodate(page)) {
1417                         /*
1418                          * If the page hasn't been read in, we won't know if we
1419                          * actually need a reservation - we don't actually need
1420                          * to read here, we just need to check if the page is
1421                          * fully backed by uncompressed data:
1422                          */
1423                         goto readpage;
1424                 }
1425
1426                 goto err;
1427         }
1428
1429         *pagep = page;
1430         return 0;
1431 err:
1432         unlock_page(page);
1433         put_page(page);
1434         *pagep = NULL;
1435 err_unlock:
1436         pagecache_add_put(&mapping->add_lock);
1437         return ret;
1438 }
1439
1440 int bch2_write_end(struct file *file, struct address_space *mapping,
1441                    loff_t pos, unsigned len, unsigned copied,
1442                    struct page *page, void *fsdata)
1443 {
1444         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1445         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1446
1447         lockdep_assert_held(&inode->v.i_rwsem);
1448
1449         if (unlikely(copied < len && !PageUptodate(page))) {
1450                 /*
1451                  * The page needs to be read in, but that would destroy
1452                  * our partial write - simplest thing is to just force
1453                  * userspace to redo the write:
1454                  */
1455                 zero_user(page, 0, PAGE_SIZE);
1456                 flush_dcache_page(page);
1457                 copied = 0;
1458         }
1459
1460         if (pos + copied > inode->v.i_size)
1461                 i_size_write(&inode->v, pos + copied);
1462
1463         if (copied) {
1464                 if (!PageUptodate(page))
1465                         SetPageUptodate(page);
1466                 if (!PageDirty(page))
1467                         set_page_dirty(page);
1468
1469                 inode->ei_last_dirtied = (unsigned long) current;
1470         } else {
1471                 bch2_put_page_reservation(c, inode, page);
1472         }
1473
1474         unlock_page(page);
1475         put_page(page);
1476         pagecache_add_put(&mapping->add_lock);
1477
1478         return copied;
1479 }
1480
1481 #define WRITE_BATCH_PAGES       32
1482
1483 static int __bch2_buffered_write(struct bch_inode_info *inode,
1484                                  struct address_space *mapping,
1485                                  struct iov_iter *iter,
1486                                  loff_t pos, unsigned len)
1487 {
1488         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1489         struct page *pages[WRITE_BATCH_PAGES];
1490         unsigned long index = pos >> PAGE_SHIFT;
1491         unsigned offset = pos & (PAGE_SIZE - 1);
1492         unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1493         unsigned i, copied = 0, nr_pages_copied = 0;
1494         int ret = 0;
1495
1496         BUG_ON(!len);
1497         BUG_ON(nr_pages > ARRAY_SIZE(pages));
1498
1499         for (i = 0; i < nr_pages; i++) {
1500                 pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
1501                 if (!pages[i]) {
1502                         nr_pages = i;
1503                         ret = -ENOMEM;
1504                         goto out;
1505                 }
1506         }
1507
1508         if (offset && !PageUptodate(pages[0])) {
1509                 ret = bch2_read_single_page(pages[0], mapping);
1510                 if (ret)
1511                         goto out;
1512         }
1513
1514         if ((pos + len) & (PAGE_SIZE - 1) &&
1515             !PageUptodate(pages[nr_pages - 1])) {
1516                 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1517                         zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1518                 } else {
1519                         ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1520                         if (ret)
1521                                 goto out;
1522                 }
1523         }
1524
1525         for (i = 0; i < nr_pages; i++) {
1526                 ret = bch2_get_page_reservation(c, inode, pages[i], true);
1527
1528                 if (ret && !PageUptodate(pages[i])) {
1529                         ret = bch2_read_single_page(pages[i], mapping);
1530                         if (ret)
1531                                 goto out;
1532
1533                         ret = bch2_get_page_reservation(c, inode, pages[i], true);
1534                 }
1535
1536                 if (ret)
1537                         goto out;
1538         }
1539
1540         if (mapping_writably_mapped(mapping))
1541                 for (i = 0; i < nr_pages; i++)
1542                         flush_dcache_page(pages[i]);
1543
1544         while (copied < len) {
1545                 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1546                 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1547                 unsigned pg_bytes = min_t(unsigned, len - copied,
1548                                           PAGE_SIZE - pg_offset);
1549                 unsigned pg_copied = iov_iter_copy_from_user_atomic(page,
1550                                                 iter, pg_offset, pg_bytes);
1551
1552                 if (!pg_copied)
1553                         break;
1554
1555                 flush_dcache_page(page);
1556                 iov_iter_advance(iter, pg_copied);
1557                 copied += pg_copied;
1558         }
1559
1560         if (!copied)
1561                 goto out;
1562
1563         nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1564         inode->ei_last_dirtied = (unsigned long) current;
1565
1566         if (pos + copied > inode->v.i_size)
1567                 i_size_write(&inode->v, pos + copied);
1568
1569         if (copied < len &&
1570             ((offset + copied) & (PAGE_SIZE - 1))) {
1571                 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1572
1573                 if (!PageUptodate(page)) {
1574                         zero_user(page, 0, PAGE_SIZE);
1575                         copied -= (offset + copied) & (PAGE_SIZE - 1);
1576                 }
1577         }
1578 out:
1579         for (i = 0; i < nr_pages_copied; i++) {
1580                 if (!PageUptodate(pages[i]))
1581                         SetPageUptodate(pages[i]);
1582                 if (!PageDirty(pages[i]))
1583                         set_page_dirty(pages[i]);
1584                 unlock_page(pages[i]);
1585                 put_page(pages[i]);
1586         }
1587
1588         for (i = nr_pages_copied; i < nr_pages; i++) {
1589                 if (!PageDirty(pages[i]))
1590                         bch2_put_page_reservation(c, inode, pages[i]);
1591                 unlock_page(pages[i]);
1592                 put_page(pages[i]);
1593         }
1594
1595         return copied ?: ret;
1596 }
1597
1598 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1599 {
1600         struct file *file = iocb->ki_filp;
1601         struct address_space *mapping = file->f_mapping;
1602         struct bch_inode_info *inode = file_bch_inode(file);
1603         loff_t pos = iocb->ki_pos;
1604         ssize_t written = 0;
1605         int ret = 0;
1606
1607         pagecache_add_get(&mapping->add_lock);
1608
1609         do {
1610                 unsigned offset = pos & (PAGE_SIZE - 1);
1611                 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1612                               PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1613 again:
1614                 /*
1615                  * Bring in the user page that we will copy from _first_.
1616                  * Otherwise there's a nasty deadlock on copying from the
1617                  * same page as we're writing to, without it being marked
1618                  * up-to-date.
1619                  *
1620                  * Not only is this an optimisation, but it is also required
1621                  * to check that the address is actually valid, when atomic
1622                  * usercopies are used, below.
1623                  */
1624                 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1625                         bytes = min_t(unsigned long, iov_iter_count(iter),
1626                                       PAGE_SIZE - offset);
1627
1628                         if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1629                                 ret = -EFAULT;
1630                                 break;
1631                         }
1632                 }
1633
1634                 if (unlikely(fatal_signal_pending(current))) {
1635                         ret = -EINTR;
1636                         break;
1637                 }
1638
1639                 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1640                 if (unlikely(ret < 0))
1641                         break;
1642
1643                 cond_resched();
1644
1645                 if (unlikely(ret == 0)) {
1646                         /*
1647                          * If we were unable to copy any data at all, we must
1648                          * fall back to a single segment length write.
1649                          *
1650                          * If we didn't fallback here, we could livelock
1651                          * because not all segments in the iov can be copied at
1652                          * once without a pagefault.
1653                          */
1654                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
1655                                       iov_iter_single_seg_count(iter));
1656                         goto again;
1657                 }
1658                 pos += ret;
1659                 written += ret;
1660
1661                 balance_dirty_pages_ratelimited(mapping);
1662         } while (iov_iter_count(iter));
1663
1664         pagecache_add_put(&mapping->add_lock);
1665
1666         return written ? written : ret;
1667 }
1668
1669 /* O_DIRECT reads */
1670
1671 static void bch2_dio_read_complete(struct closure *cl)
1672 {
1673         struct dio_read *dio = container_of(cl, struct dio_read, cl);
1674
1675         dio->req->ki_complete(dio->req, dio->ret, 0);
1676         bio_check_pages_dirty(&dio->rbio.bio);  /* transfers ownership */
1677 }
1678
1679 static void bch2_direct_IO_read_endio(struct bio *bio)
1680 {
1681         struct dio_read *dio = bio->bi_private;
1682
1683         if (bio->bi_status)
1684                 dio->ret = blk_status_to_errno(bio->bi_status);
1685
1686         closure_put(&dio->cl);
1687 }
1688
1689 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1690 {
1691         bch2_direct_IO_read_endio(bio);
1692         bio_check_pages_dirty(bio);     /* transfers ownership */
1693 }
1694
1695 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1696 {
1697         struct file *file = req->ki_filp;
1698         struct bch_inode_info *inode = file_bch_inode(file);
1699         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1700         struct bch_io_opts opts = io_opts(c, inode);
1701         struct dio_read *dio;
1702         struct bio *bio;
1703         loff_t offset = req->ki_pos;
1704         bool sync = is_sync_kiocb(req);
1705         ssize_t ret;
1706
1707         if ((offset|iter->count) & (block_bytes(c) - 1))
1708                 return -EINVAL;
1709
1710         ret = min_t(loff_t, iter->count,
1711                     max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1712         iov_iter_truncate(iter, round_up(ret, block_bytes(c)));
1713
1714         if (!ret)
1715                 return ret;
1716
1717         bio = bio_alloc_bioset(GFP_KERNEL,
1718                                iov_iter_npages(iter, BIO_MAX_PAGES),
1719                                &c->dio_read_bioset);
1720
1721         bio->bi_end_io = bch2_direct_IO_read_endio;
1722
1723         dio = container_of(bio, struct dio_read, rbio.bio);
1724         closure_init(&dio->cl, NULL);
1725
1726         /*
1727          * this is a _really_ horrible hack just to avoid an atomic sub at the
1728          * end:
1729          */
1730         if (!sync) {
1731                 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1732                 atomic_set(&dio->cl.remaining,
1733                            CLOSURE_REMAINING_INITIALIZER -
1734                            CLOSURE_RUNNING +
1735                            CLOSURE_DESTRUCTOR);
1736         } else {
1737                 atomic_set(&dio->cl.remaining,
1738                            CLOSURE_REMAINING_INITIALIZER + 1);
1739         }
1740
1741         dio->req        = req;
1742         dio->ret        = ret;
1743
1744         goto start;
1745         while (iter->count) {
1746                 bio = bio_alloc_bioset(GFP_KERNEL,
1747                                        iov_iter_npages(iter, BIO_MAX_PAGES),
1748                                        &c->bio_read);
1749                 bio->bi_end_io          = bch2_direct_IO_read_split_endio;
1750 start:
1751                 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1752                 bio->bi_iter.bi_sector  = offset >> 9;
1753                 bio->bi_private         = dio;
1754
1755                 ret = bio_iov_iter_get_pages(bio, iter);
1756                 if (ret < 0) {
1757                         /* XXX: fault inject this path */
1758                         bio->bi_status = BLK_STS_RESOURCE;
1759                         bio_endio(bio);
1760                         break;
1761                 }
1762
1763                 offset += bio->bi_iter.bi_size;
1764                 bio_set_pages_dirty(bio);
1765
1766                 if (iter->count)
1767                         closure_get(&dio->cl);
1768
1769                 bch2_read(c, rbio_init(bio, opts), inode->v.i_ino);
1770         }
1771
1772         if (sync) {
1773                 closure_sync(&dio->cl);
1774                 closure_debug_destroy(&dio->cl);
1775                 ret = dio->ret;
1776                 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1777                 return ret;
1778         } else {
1779                 return -EIOCBQUEUED;
1780         }
1781 }
1782
1783 /* O_DIRECT writes */
1784
1785 static void bch2_dio_write_loop_async(struct closure *);
1786
1787 static long bch2_dio_write_loop(struct dio_write *dio)
1788 {
1789         struct kiocb *req = dio->req;
1790         struct address_space *mapping = req->ki_filp->f_mapping;
1791         struct bch_inode_info *inode = dio->iop.inode;
1792         struct bio *bio = &dio->iop.op.wbio.bio;
1793         struct bio_vec *bv;
1794         bool sync;
1795         long ret;
1796         int i;
1797
1798         if (dio->loop)
1799                 goto loop;
1800
1801         inode_dio_begin(&inode->v);
1802         __pagecache_block_get(&mapping->add_lock);
1803
1804         /* Write and invalidate pagecache range that we're writing to: */
1805         ret = write_invalidate_inode_pages_range(mapping, req->ki_pos,
1806                                 req->ki_pos + iov_iter_count(&dio->iter) - 1);
1807         if (unlikely(ret))
1808                 goto err;
1809
1810         while (1) {
1811                 BUG_ON(current->pagecache_lock);
1812                 current->pagecache_lock = &mapping->add_lock;
1813                 if (current != dio->task)
1814                         use_mm(dio->task->mm);
1815
1816                 ret = bio_iov_iter_get_pages(bio, &dio->iter);
1817
1818                 if (current != dio->task)
1819                         unuse_mm(dio->task->mm);
1820                 current->pagecache_lock = NULL;
1821
1822                 if (unlikely(ret < 0))
1823                         goto err;
1824
1825                 dio->iop.op.pos = POS(inode->v.i_ino,
1826                                 (req->ki_pos >> 9) + dio->iop.op.written);
1827
1828                 task_io_account_write(bio->bi_iter.bi_size);
1829
1830                 closure_call(&dio->iop.op.cl, bch2_write, NULL, &dio->cl);
1831
1832                 if (!dio->sync && !dio->loop && dio->iter.count) {
1833                         struct iovec *iov = dio->inline_vecs;
1834
1835                         if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1836                                 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
1837                                               GFP_KERNEL);
1838                                 if (unlikely(!iov)) {
1839                                         dio->iop.op.error = -ENOMEM;
1840                                         goto err_wait_io;
1841                                 }
1842
1843                                 dio->free_iov = true;
1844                         }
1845
1846                         memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
1847                         dio->iter.iov = iov;
1848                 }
1849 err_wait_io:
1850                 dio->loop = true;
1851
1852                 if (!dio->sync) {
1853                         continue_at(&dio->cl, bch2_dio_write_loop_async, NULL);
1854                         return -EIOCBQUEUED;
1855                 }
1856
1857                 closure_sync(&dio->cl);
1858 loop:
1859                 bio_for_each_segment_all(bv, bio, i)
1860                         put_page(bv->bv_page);
1861                 if (!dio->iter.count || dio->iop.op.error)
1862                         break;
1863                 bio_reset(bio);
1864         }
1865
1866         ret = dio->iop.op.error ?: ((long) dio->iop.op.written << 9);
1867 err:
1868         __pagecache_block_put(&mapping->add_lock);
1869         bch2_disk_reservation_put(dio->iop.op.c, &dio->iop.op.res);
1870         bch2_quota_reservation_put(dio->iop.op.c, inode, &dio->quota_res);
1871
1872         if (dio->free_iov)
1873                 kfree(dio->iter.iov);
1874
1875         closure_debug_destroy(&dio->cl);
1876
1877         sync = dio->sync;
1878         bio_put(bio);
1879
1880         /* inode->i_dio_count is our ref on inode and thus bch_fs */
1881         inode_dio_end(&inode->v);
1882
1883         if (!sync) {
1884                 req->ki_complete(req, ret, 0);
1885                 ret = -EIOCBQUEUED;
1886         }
1887         return ret;
1888 }
1889
1890 static void bch2_dio_write_loop_async(struct closure *cl)
1891 {
1892         struct dio_write *dio = container_of(cl, struct dio_write, cl);
1893
1894         bch2_dio_write_loop(dio);
1895 }
1896
1897 static int bch2_direct_IO_write(struct kiocb *req,
1898                                 struct iov_iter *iter,
1899                                 bool swap)
1900 {
1901         struct file *file = req->ki_filp;
1902         struct bch_inode_info *inode = file_bch_inode(file);
1903         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1904         struct dio_write *dio;
1905         struct bio *bio;
1906         loff_t offset = req->ki_pos;
1907         ssize_t ret;
1908
1909         lockdep_assert_held(&inode->v.i_rwsem);
1910
1911         if (unlikely(!iter->count))
1912                 return 0;
1913
1914         if (unlikely((offset|iter->count) & (block_bytes(c) - 1)))
1915                 return -EINVAL;
1916
1917         bio = bio_alloc_bioset(GFP_KERNEL,
1918                                iov_iter_npages(iter, BIO_MAX_PAGES),
1919                                &c->dio_write_bioset);
1920         dio = container_of(bio, struct dio_write, iop.op.wbio.bio);
1921         closure_init(&dio->cl, NULL);
1922         dio->req                = req;
1923         dio->task               = current;
1924         dio->loop               = false;
1925         dio->sync               = is_sync_kiocb(req) ||
1926                 offset + iter->count > inode->v.i_size;
1927         dio->free_iov           = false;
1928         dio->quota_res.sectors  = 0;
1929         dio->iter               = *iter;
1930         bch2_fswrite_op_init(&dio->iop, c, inode, io_opts(c, inode), true);
1931         dio->iop.op.write_point = writepoint_hashed((unsigned long) dio->task);
1932         dio->iop.op.flags |= BCH_WRITE_NOPUT_RESERVATION;
1933
1934         if ((req->ki_flags & IOCB_DSYNC) &&
1935             !c->opts.journal_flush_disabled)
1936                 dio->iop.op.flags |= BCH_WRITE_FLUSH;
1937
1938         ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
1939                                          iter->count >> 9, true);
1940         if (unlikely(ret))
1941                 goto err;
1942
1943         ret = bch2_disk_reservation_get(c, &dio->iop.op.res, iter->count >> 9,
1944                                         dio->iop.op.opts.data_replicas, 0);
1945         if (unlikely(ret)) {
1946                 if (bch2_check_range_allocated(c, POS(inode->v.i_ino,
1947                                                       offset >> 9),
1948                                                iter->count >> 9))
1949                         goto err;
1950
1951                 dio->iop.unalloc = true;
1952         }
1953
1954         dio->iop.op.nr_replicas = dio->iop.op.res.nr_replicas;
1955
1956         return bch2_dio_write_loop(dio);
1957 err:
1958         bch2_disk_reservation_put(c, &dio->iop.op.res);
1959         bch2_quota_reservation_put(c, inode, &dio->quota_res);
1960         closure_debug_destroy(&dio->cl);
1961         bio_put(bio);
1962         return ret;
1963 }
1964
1965 ssize_t bch2_direct_IO(struct kiocb *req, struct iov_iter *iter)
1966 {
1967         struct blk_plug plug;
1968         ssize_t ret;
1969
1970         blk_start_plug(&plug);
1971         ret = iov_iter_rw(iter) == WRITE
1972                 ? bch2_direct_IO_write(req, iter, false)
1973                 : bch2_direct_IO_read(req, iter);
1974         blk_finish_plug(&plug);
1975
1976         return ret;
1977 }
1978
1979 static ssize_t
1980 bch2_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1981 {
1982         return bch2_direct_IO_write(iocb, iter, true);
1983 }
1984
1985 static ssize_t __bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1986 {
1987         struct file *file = iocb->ki_filp;
1988         struct bch_inode_info *inode = file_bch_inode(file);
1989         ssize_t ret;
1990
1991         /* We can write back this queue in page reclaim */
1992         current->backing_dev_info = inode_to_bdi(&inode->v);
1993         ret = file_remove_privs(file);
1994         if (ret)
1995                 goto out;
1996
1997         ret = file_update_time(file);
1998         if (ret)
1999                 goto out;
2000
2001         ret = iocb->ki_flags & IOCB_DIRECT
2002                 ? bch2_direct_write(iocb, from)
2003                 : bch2_buffered_write(iocb, from);
2004
2005         if (likely(ret > 0))
2006                 iocb->ki_pos += ret;
2007 out:
2008         current->backing_dev_info = NULL;
2009         return ret;
2010 }
2011
2012 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2013 {
2014         struct bch_inode_info *inode = file_bch_inode(iocb->ki_filp);
2015         bool direct = iocb->ki_flags & IOCB_DIRECT;
2016         ssize_t ret;
2017
2018         inode_lock(&inode->v);
2019         ret = generic_write_checks(iocb, from);
2020         if (ret > 0)
2021                 ret = __bch2_write_iter(iocb, from);
2022         inode_unlock(&inode->v);
2023
2024         if (ret > 0 && !direct)
2025                 ret = generic_write_sync(iocb, ret);
2026
2027         return ret;
2028 }
2029
2030 /* fsync: */
2031
2032 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2033 {
2034         struct bch_inode_info *inode = file_bch_inode(file);
2035         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2036         int ret;
2037
2038         ret = filemap_write_and_wait_range(inode->v.i_mapping, start, end);
2039         if (ret)
2040                 return ret;
2041
2042         if (c->opts.journal_flush_disabled)
2043                 return 0;
2044
2045         return bch2_journal_flush_seq(&c->journal, inode->ei_journal_seq);
2046 }
2047
2048 /* truncate: */
2049
2050 static int __bch2_truncate_page(struct bch_inode_info *inode,
2051                                 pgoff_t index, loff_t start, loff_t end)
2052 {
2053         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2054         struct address_space *mapping = inode->v.i_mapping;
2055         unsigned start_offset = start & (PAGE_SIZE - 1);
2056         unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2057         struct page *page;
2058         int ret = 0;
2059
2060         /* Page boundary? Nothing to do */
2061         if (!((index == start >> PAGE_SHIFT && start_offset) ||
2062               (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2063                 return 0;
2064
2065         /* Above i_size? */
2066         if (index << PAGE_SHIFT >= inode->v.i_size)
2067                 return 0;
2068
2069         page = find_lock_page(mapping, index);
2070         if (!page) {
2071                 struct btree_iter iter;
2072                 struct bkey_s_c k = bkey_s_c_null;
2073
2074                 /*
2075                  * XXX: we're doing two index lookups when we end up reading the
2076                  * page
2077                  */
2078                 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2079                                    POS(inode->v.i_ino,
2080                                        index << PAGE_SECTOR_SHIFT), 0, k) {
2081                         if (bkey_cmp(bkey_start_pos(k.k),
2082                                      POS(inode->v.i_ino,
2083                                          (index + 1) << PAGE_SECTOR_SHIFT)) >= 0)
2084                                 break;
2085
2086                         if (k.k->type != KEY_TYPE_DISCARD &&
2087                             k.k->type != BCH_RESERVATION) {
2088                                 bch2_btree_iter_unlock(&iter);
2089                                 goto create;
2090                         }
2091                 }
2092                 bch2_btree_iter_unlock(&iter);
2093                 return 0;
2094 create:
2095                 page = find_or_create_page(mapping, index, GFP_KERNEL);
2096                 if (unlikely(!page)) {
2097                         ret = -ENOMEM;
2098                         goto out;
2099                 }
2100         }
2101
2102         if (!PageUptodate(page)) {
2103                 ret = bch2_read_single_page(page, mapping);
2104                 if (ret)
2105                         goto unlock;
2106         }
2107
2108         /*
2109          * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2110          *
2111          * XXX: because we aren't currently tracking whether the page has actual
2112          * data in it (vs. just 0s, or only partially written) this wrong. ick.
2113          */
2114         ret = bch2_get_page_reservation(c, inode, page, false);
2115         BUG_ON(ret);
2116
2117         if (index == start >> PAGE_SHIFT &&
2118             index == end >> PAGE_SHIFT)
2119                 zero_user_segment(page, start_offset, end_offset);
2120         else if (index == start >> PAGE_SHIFT)
2121                 zero_user_segment(page, start_offset, PAGE_SIZE);
2122         else if (index == end >> PAGE_SHIFT)
2123                 zero_user_segment(page, 0, end_offset);
2124
2125         if (!PageDirty(page))
2126                 set_page_dirty(page);
2127 unlock:
2128         unlock_page(page);
2129         put_page(page);
2130 out:
2131         return ret;
2132 }
2133
2134 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2135 {
2136         return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2137                                     from, from + PAGE_SIZE);
2138 }
2139
2140 int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
2141 {
2142         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2143         struct address_space *mapping = inode->v.i_mapping;
2144         bool shrink = iattr->ia_size <= inode->v.i_size;
2145         struct i_sectors_hook i_sectors_hook =
2146                 i_sectors_hook_init(inode, BCH_INODE_I_SIZE_DIRTY);
2147         int ret = 0;
2148
2149         inode_dio_wait(&inode->v);
2150         pagecache_block_get(&mapping->add_lock);
2151
2152         truncate_setsize(&inode->v, iattr->ia_size);
2153
2154         /* sync appends.. */
2155         /* XXX what protects inode->i_size? */
2156         if (iattr->ia_size > inode->ei_inode.bi_size)
2157                 ret = filemap_write_and_wait_range(mapping,
2158                                                    inode->ei_inode.bi_size, S64_MAX);
2159         if (ret)
2160                 goto err_put_pagecache;
2161
2162         i_sectors_hook.new_i_size = iattr->ia_size;
2163
2164         ret = i_sectors_dirty_start(c, &i_sectors_hook);
2165         if (unlikely(ret))
2166                 goto err;
2167
2168         /*
2169          * There might be persistent reservations (from fallocate())
2170          * above i_size, which bch2_inode_truncate() will discard - we're
2171          * only supposed to discard them if we're doing a real truncate
2172          * here (new i_size < current i_size):
2173          */
2174         if (shrink) {
2175                 ret = bch2_truncate_page(inode, iattr->ia_size);
2176                 if (unlikely(ret))
2177                         goto err;
2178
2179                 ret = bch2_inode_truncate(c, inode->v.i_ino,
2180                                           round_up(iattr->ia_size, PAGE_SIZE) >> 9,
2181                                           &i_sectors_hook.hook,
2182                                           &inode->ei_journal_seq);
2183                 if (unlikely(ret))
2184                         goto err;
2185         }
2186
2187         setattr_copy(&inode->v, iattr);
2188         inode->v.i_mtime = inode->v.i_ctime = current_time(&inode->v);
2189 err:
2190         /*
2191          * On error - in particular, bch2_truncate_page() error - don't clear
2192          * I_SIZE_DIRTY, as we've left data above i_size!:
2193          */
2194         if (ret)
2195                 i_sectors_hook.flags &= ~BCH_INODE_I_SIZE_DIRTY;
2196
2197         ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2198 err_put_pagecache:
2199         pagecache_block_put(&mapping->add_lock);
2200         return ret;
2201 }
2202
2203 /* fallocate: */
2204
2205 static long bch2_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2206 {
2207         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2208         struct address_space *mapping = inode->v.i_mapping;
2209         u64 ino = inode->v.i_ino;
2210         u64 discard_start = round_up(offset, PAGE_SIZE) >> 9;
2211         u64 discard_end = round_down(offset + len, PAGE_SIZE) >> 9;
2212         int ret = 0;
2213
2214         inode_lock(&inode->v);
2215         inode_dio_wait(&inode->v);
2216         pagecache_block_get(&mapping->add_lock);
2217
2218         ret = __bch2_truncate_page(inode,
2219                                    offset >> PAGE_SHIFT,
2220                                    offset, offset + len);
2221         if (unlikely(ret))
2222                 goto err;
2223
2224         if (offset >> PAGE_SHIFT !=
2225             (offset + len) >> PAGE_SHIFT) {
2226                 ret = __bch2_truncate_page(inode,
2227                                            (offset + len) >> PAGE_SHIFT,
2228                                            offset, offset + len);
2229                 if (unlikely(ret))
2230                         goto err;
2231         }
2232
2233         truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2234
2235         if (discard_start < discard_end) {
2236                 /*
2237                  * We need to pass in a disk reservation here because we might
2238                  * be splitting a compressed extent into two. This isn't a
2239                  * problem with truncate because truncate will never split an
2240                  * extent, only truncate it...
2241                  */
2242                 struct disk_reservation disk_res =
2243                         bch2_disk_reservation_init(c, 0);
2244                 struct i_sectors_hook i_sectors_hook =
2245                         i_sectors_hook_init(inode, 0);
2246                 int ret;
2247
2248                 ret = i_sectors_dirty_start(c, &i_sectors_hook);
2249                 if (unlikely(ret))
2250                         goto err;
2251
2252                 ret = bch2_btree_delete_range(c,
2253                                 BTREE_ID_EXTENTS,
2254                                 POS(ino, discard_start),
2255                                 POS(ino, discard_end),
2256                                 ZERO_VERSION,
2257                                 &disk_res,
2258                                 &i_sectors_hook.hook,
2259                                 &inode->ei_journal_seq);
2260
2261                 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2262         }
2263 err:
2264         pagecache_block_put(&mapping->add_lock);
2265         inode_unlock(&inode->v);
2266
2267         return ret;
2268 }
2269
2270 static long bch2_fcollapse(struct bch_inode_info *inode,
2271                            loff_t offset, loff_t len)
2272 {
2273         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2274         struct address_space *mapping = inode->v.i_mapping;
2275         struct btree_iter src;
2276         struct btree_iter dst;
2277         BKEY_PADDED(k) copy;
2278         struct bkey_s_c k;
2279         struct i_sectors_hook i_sectors_hook = i_sectors_hook_init(inode, 0);
2280         loff_t new_size;
2281         int ret;
2282
2283         if ((offset | len) & (PAGE_SIZE - 1))
2284                 return -EINVAL;
2285
2286         bch2_btree_iter_init(&dst, c, BTREE_ID_EXTENTS,
2287                              POS(inode->v.i_ino, offset >> 9),
2288                              BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2289         /* position will be set from dst iter's position: */
2290         bch2_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN,
2291                              BTREE_ITER_SLOTS);
2292         bch2_btree_iter_link(&src, &dst);
2293
2294         /*
2295          * We need i_mutex to keep the page cache consistent with the extents
2296          * btree, and the btree consistent with i_size - we don't need outside
2297          * locking for the extents btree itself, because we're using linked
2298          * iterators
2299          */
2300         inode_lock(&inode->v);
2301         inode_dio_wait(&inode->v);
2302         pagecache_block_get(&mapping->add_lock);
2303
2304         ret = -EINVAL;
2305         if (offset + len >= inode->v.i_size)
2306                 goto err;
2307
2308         if (inode->v.i_size < len)
2309                 goto err;
2310
2311         new_size = inode->v.i_size - len;
2312
2313         ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2314         if (ret)
2315                 goto err;
2316
2317         ret = i_sectors_dirty_start(c, &i_sectors_hook);
2318         if (ret)
2319                 goto err;
2320
2321         while (bkey_cmp(dst.pos,
2322                         POS(inode->v.i_ino,
2323                             round_up(new_size, PAGE_SIZE) >> 9)) < 0) {
2324                 struct disk_reservation disk_res;
2325
2326                 bch2_btree_iter_set_pos(&src,
2327                         POS(dst.pos.inode, dst.pos.offset + (len >> 9)));
2328
2329                 k = bch2_btree_iter_peek_slot(&src);
2330                 if ((ret = btree_iter_err(k)))
2331                         goto btree_iter_err;
2332
2333                 bkey_reassemble(&copy.k, k);
2334
2335                 if (bkey_deleted(&copy.k.k))
2336                         copy.k.k.type = KEY_TYPE_DISCARD;
2337
2338                 bch2_cut_front(src.pos, &copy.k);
2339                 copy.k.k.p.offset -= len >> 9;
2340
2341                 BUG_ON(bkey_cmp(dst.pos, bkey_start_pos(&copy.k.k)));
2342
2343                 ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size,
2344                                 bch2_extent_nr_dirty_ptrs(bkey_i_to_s_c(&copy.k)),
2345                                 BCH_DISK_RESERVATION_NOFAIL);
2346                 BUG_ON(ret);
2347
2348                 ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2349                                            &inode->ei_journal_seq,
2350                                            BTREE_INSERT_ATOMIC|
2351                                            BTREE_INSERT_NOFAIL,
2352                                            BTREE_INSERT_ENTRY(&dst, &copy.k));
2353                 bch2_disk_reservation_put(c, &disk_res);
2354 btree_iter_err:
2355                 if (ret == -EINTR)
2356                         ret = 0;
2357                 if (ret)
2358                         goto err_put_sectors_dirty;
2359                 /*
2360                  * XXX: if we error here we've left data with multiple
2361                  * pointers... which isn't a _super_ serious problem...
2362                  */
2363
2364                 bch2_btree_iter_cond_resched(&src);
2365         }
2366
2367         bch2_btree_iter_unlock(&src);
2368         bch2_btree_iter_unlock(&dst);
2369
2370         ret = bch2_inode_truncate(c, inode->v.i_ino,
2371                                  round_up(new_size, PAGE_SIZE) >> 9,
2372                                  &i_sectors_hook.hook,
2373                                  &inode->ei_journal_seq);
2374         if (ret)
2375                 goto err_put_sectors_dirty;
2376
2377         i_size_write(&inode->v, new_size);
2378         i_sectors_hook.new_i_size = new_size;
2379 err_put_sectors_dirty:
2380         ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2381 err:
2382         pagecache_block_put(&mapping->add_lock);
2383         inode_unlock(&inode->v);
2384
2385         bch2_btree_iter_unlock(&src);
2386         bch2_btree_iter_unlock(&dst);
2387         return ret;
2388 }
2389
2390 static long bch2_fallocate(struct bch_inode_info *inode, int mode,
2391                            loff_t offset, loff_t len)
2392 {
2393         struct address_space *mapping = inode->v.i_mapping;
2394         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2395         struct i_sectors_hook i_sectors_hook = i_sectors_hook_init(inode, 0);
2396         struct btree_iter iter;
2397         struct bpos end_pos;
2398         loff_t block_start, block_end;
2399         loff_t end = offset + len;
2400         unsigned sectors;
2401         unsigned replicas = io_opts(c, inode).data_replicas;
2402         int ret;
2403
2404         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
2405                              BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2406
2407         inode_lock(&inode->v);
2408         inode_dio_wait(&inode->v);
2409         pagecache_block_get(&mapping->add_lock);
2410
2411         if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
2412                 ret = inode_newsize_ok(&inode->v, end);
2413                 if (ret)
2414                         goto err;
2415         }
2416
2417         if (mode & FALLOC_FL_ZERO_RANGE) {
2418                 ret = __bch2_truncate_page(inode,
2419                                            offset >> PAGE_SHIFT,
2420                                            offset, end);
2421
2422                 if (!ret &&
2423                     offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
2424                         ret = __bch2_truncate_page(inode,
2425                                                    end >> PAGE_SHIFT,
2426                                                    offset, end);
2427
2428                 if (unlikely(ret))
2429                         goto err;
2430
2431                 truncate_pagecache_range(&inode->v, offset, end - 1);
2432
2433                 block_start     = round_up(offset, PAGE_SIZE);
2434                 block_end       = round_down(end, PAGE_SIZE);
2435         } else {
2436                 block_start     = round_down(offset, PAGE_SIZE);
2437                 block_end       = round_up(end, PAGE_SIZE);
2438         }
2439
2440         bch2_btree_iter_set_pos(&iter, POS(inode->v.i_ino, block_start >> 9));
2441         end_pos = POS(inode->v.i_ino, block_end >> 9);
2442
2443         ret = i_sectors_dirty_start(c, &i_sectors_hook);
2444         if (unlikely(ret))
2445                 goto err;
2446
2447         while (bkey_cmp(iter.pos, end_pos) < 0) {
2448                 struct disk_reservation disk_res = { 0 };
2449                 struct bkey_i_reservation reservation;
2450                 struct bkey_s_c k;
2451
2452                 k = bch2_btree_iter_peek_slot(&iter);
2453                 if ((ret = btree_iter_err(k)))
2454                         goto btree_iter_err;
2455
2456                 /* already reserved */
2457                 if (k.k->type == BCH_RESERVATION &&
2458                     bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2459                         bch2_btree_iter_next_slot(&iter);
2460                         continue;
2461                 }
2462
2463                 if (bkey_extent_is_data(k.k)) {
2464                         if (!(mode & FALLOC_FL_ZERO_RANGE)) {
2465                                 bch2_btree_iter_next_slot(&iter);
2466                                 continue;
2467                         }
2468                 }
2469
2470                 bkey_reservation_init(&reservation.k_i);
2471                 reservation.k.type      = BCH_RESERVATION;
2472                 reservation.k.p         = k.k->p;
2473                 reservation.k.size      = k.k->size;
2474
2475                 bch2_cut_front(iter.pos, &reservation.k_i);
2476                 bch2_cut_back(end_pos, &reservation.k);
2477
2478                 sectors = reservation.k.size;
2479                 reservation.v.nr_replicas = bch2_extent_nr_dirty_ptrs(k);
2480
2481                 if (!bkey_extent_is_allocation(k.k)) {
2482                         ret = bch2_quota_reservation_add(c, inode,
2483                                         &i_sectors_hook.quota_res,
2484                                         sectors, true);
2485                         if (unlikely(ret))
2486                                 goto err_put_sectors_dirty;
2487                 }
2488
2489                 if (reservation.v.nr_replicas < replicas ||
2490                     bch2_extent_is_compressed(k)) {
2491                         ret = bch2_disk_reservation_get(c, &disk_res, sectors,
2492                                                         replicas, 0);
2493                         if (unlikely(ret))
2494                                 goto err_put_sectors_dirty;
2495
2496                         reservation.v.nr_replicas = disk_res.nr_replicas;
2497                 }
2498
2499                 ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2500                                           &inode->ei_journal_seq,
2501                                           BTREE_INSERT_ATOMIC|
2502                                           BTREE_INSERT_NOFAIL,
2503                                           BTREE_INSERT_ENTRY(&iter, &reservation.k_i));
2504                 bch2_disk_reservation_put(c, &disk_res);
2505 btree_iter_err:
2506                 if (ret < 0 && ret != -EINTR)
2507                         goto err_put_sectors_dirty;
2508
2509         }
2510         bch2_btree_iter_unlock(&iter);
2511
2512         ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2513
2514         if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2515             end > inode->v.i_size) {
2516                 i_size_write(&inode->v, end);
2517
2518                 mutex_lock(&inode->ei_update_lock);
2519                 ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2520                 mutex_unlock(&inode->ei_update_lock);
2521         }
2522
2523         /* blech */
2524         if ((mode & FALLOC_FL_KEEP_SIZE) &&
2525             (mode & FALLOC_FL_ZERO_RANGE) &&
2526             inode->ei_inode.bi_size != inode->v.i_size) {
2527                 /* sync appends.. */
2528                 ret = filemap_write_and_wait_range(mapping,
2529                                         inode->ei_inode.bi_size, S64_MAX);
2530                 if (ret)
2531                         goto err;
2532
2533                 if (inode->ei_inode.bi_size != inode->v.i_size) {
2534                         mutex_lock(&inode->ei_update_lock);
2535                         ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2536                         mutex_unlock(&inode->ei_update_lock);
2537                 }
2538         }
2539
2540         pagecache_block_put(&mapping->add_lock);
2541         inode_unlock(&inode->v);
2542
2543         return 0;
2544 err_put_sectors_dirty:
2545         ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2546 err:
2547         bch2_btree_iter_unlock(&iter);
2548         pagecache_block_put(&mapping->add_lock);
2549         inode_unlock(&inode->v);
2550         return ret;
2551 }
2552
2553 long bch2_fallocate_dispatch(struct file *file, int mode,
2554                              loff_t offset, loff_t len)
2555 {
2556         struct bch_inode_info *inode = file_bch_inode(file);
2557
2558         if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2559                 return bch2_fallocate(inode, mode, offset, len);
2560
2561         if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2562                 return bch2_fpunch(inode, offset, len);
2563
2564         if (mode == FALLOC_FL_COLLAPSE_RANGE)
2565                 return bch2_fcollapse(inode, offset, len);
2566
2567         return -EOPNOTSUPP;
2568 }
2569
2570 /* fseek: */
2571
2572 static bool page_is_data(struct page *page)
2573 {
2574         /* XXX: should only have to check PageDirty */
2575         return PagePrivate(page) &&
2576                 (page_state(page)->sectors ||
2577                  page_state(page)->dirty_sectors);
2578 }
2579
2580 static loff_t bch2_next_pagecache_data(struct inode *vinode,
2581                                        loff_t start_offset,
2582                                        loff_t end_offset)
2583 {
2584         struct address_space *mapping = vinode->i_mapping;
2585         struct page *page;
2586         pgoff_t index;
2587
2588         for (index = start_offset >> PAGE_SHIFT;
2589              index < end_offset >> PAGE_SHIFT;
2590              index++) {
2591                 if (find_get_pages(mapping, &index, 1, &page)) {
2592                         lock_page(page);
2593
2594                         if (page_is_data(page))
2595                                 end_offset =
2596                                         min(end_offset,
2597                                         max(start_offset,
2598                                             ((loff_t) index) << PAGE_SHIFT));
2599                         unlock_page(page);
2600                         put_page(page);
2601                 } else {
2602                         break;
2603                 }
2604         }
2605
2606         return end_offset;
2607 }
2608
2609 static loff_t bch2_seek_data(struct file *file, u64 offset)
2610 {
2611         struct bch_inode_info *inode = file_bch_inode(file);
2612         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2613         struct btree_iter iter;
2614         struct bkey_s_c k;
2615         u64 isize, next_data = MAX_LFS_FILESIZE;
2616         int ret;
2617
2618         isize = i_size_read(&inode->v);
2619         if (offset >= isize)
2620                 return -ENXIO;
2621
2622         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2623                            POS(inode->v.i_ino, offset >> 9), 0, k) {
2624                 if (k.k->p.inode != inode->v.i_ino) {
2625                         break;
2626                 } else if (bkey_extent_is_data(k.k)) {
2627                         next_data = max(offset, bkey_start_offset(k.k) << 9);
2628                         break;
2629                 } else if (k.k->p.offset >> 9 > isize)
2630                         break;
2631         }
2632
2633         ret = bch2_btree_iter_unlock(&iter);
2634         if (ret)
2635                 return ret;
2636
2637         if (next_data > offset)
2638                 next_data = bch2_next_pagecache_data(&inode->v,
2639                                                      offset, next_data);
2640
2641         if (next_data > isize)
2642                 return -ENXIO;
2643
2644         return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
2645 }
2646
2647 static bool page_slot_is_data(struct address_space *mapping, pgoff_t index)
2648 {
2649         struct page *page;
2650         bool ret;
2651
2652         page = find_lock_entry(mapping, index);
2653         if (!page || radix_tree_exception(page))
2654                 return false;
2655
2656         ret = page_is_data(page);
2657         unlock_page(page);
2658
2659         return ret;
2660 }
2661
2662 static loff_t bch2_next_pagecache_hole(struct inode *vinode,
2663                                        loff_t start_offset,
2664                                        loff_t end_offset)
2665 {
2666         struct address_space *mapping = vinode->i_mapping;
2667         pgoff_t index;
2668
2669         for (index = start_offset >> PAGE_SHIFT;
2670              index < end_offset >> PAGE_SHIFT;
2671              index++)
2672                 if (!page_slot_is_data(mapping, index))
2673                         end_offset = max(start_offset,
2674                                          ((loff_t) index) << PAGE_SHIFT);
2675
2676         return end_offset;
2677 }
2678
2679 static loff_t bch2_seek_hole(struct file *file, u64 offset)
2680 {
2681         struct bch_inode_info *inode = file_bch_inode(file);
2682         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2683         struct btree_iter iter;
2684         struct bkey_s_c k;
2685         u64 isize, next_hole = MAX_LFS_FILESIZE;
2686         int ret;
2687
2688         isize = i_size_read(&inode->v);
2689         if (offset >= isize)
2690                 return -ENXIO;
2691
2692         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2693                            POS(inode->v.i_ino, offset >> 9),
2694                            BTREE_ITER_SLOTS, k) {
2695                 if (k.k->p.inode != inode->v.i_ino) {
2696                         next_hole = bch2_next_pagecache_hole(&inode->v,
2697                                         offset, MAX_LFS_FILESIZE);
2698                         break;
2699                 } else if (!bkey_extent_is_data(k.k)) {
2700                         next_hole = bch2_next_pagecache_hole(&inode->v,
2701                                         max(offset, bkey_start_offset(k.k) << 9),
2702                                         k.k->p.offset << 9);
2703
2704                         if (next_hole < k.k->p.offset << 9)
2705                                 break;
2706                 } else {
2707                         offset = max(offset, bkey_start_offset(k.k) << 9);
2708                 }
2709         }
2710
2711         ret = bch2_btree_iter_unlock(&iter);
2712         if (ret)
2713                 return ret;
2714
2715         if (next_hole > isize)
2716                 next_hole = isize;
2717
2718         return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
2719 }
2720
2721 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
2722 {
2723         switch (whence) {
2724         case SEEK_SET:
2725         case SEEK_CUR:
2726         case SEEK_END:
2727                 return generic_file_llseek(file, offset, whence);
2728         case SEEK_DATA:
2729                 return bch2_seek_data(file, offset);
2730         case SEEK_HOLE:
2731                 return bch2_seek_hole(file, offset);
2732         }
2733
2734         return -EINVAL;
2735 }
2736
2737 void bch2_fs_fsio_exit(struct bch_fs *c)
2738 {
2739         bioset_exit(&c->dio_write_bioset);
2740         bioset_exit(&c->dio_read_bioset);
2741         bioset_exit(&c->writepage_bioset);
2742 }
2743
2744 int bch2_fs_fsio_init(struct bch_fs *c)
2745 {
2746         int ret = 0;
2747
2748         pr_verbose_init(c->opts, "");
2749
2750         if (bioset_init(&c->writepage_bioset,
2751                         4, offsetof(struct bch_writepage_io, op.op.wbio.bio),
2752                         BIOSET_NEED_BVECS) ||
2753             bioset_init(&c->dio_read_bioset,
2754                         4, offsetof(struct dio_read, rbio.bio),
2755                         BIOSET_NEED_BVECS) ||
2756             bioset_init(&c->dio_write_bioset,
2757                         4, offsetof(struct dio_write, iop.op.wbio.bio),
2758                         BIOSET_NEED_BVECS))
2759                 ret = -ENOMEM;
2760
2761         pr_verbose_init(c->opts, "ret %i", ret);
2762         return ret;
2763 }
2764
2765 #endif /* NO_BCACHEFS_FS */