]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/fs-io.c
Update bcachefs sources to b12d1535f3 bcachefs: fix bounds checks in bch2_bio_map()
[bcachefs-tools-debian] / libbcachefs / fs-io.c
1 #ifndef NO_BCACHEFS_FS
2
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "btree_update.h"
6 #include "buckets.h"
7 #include "clock.h"
8 #include "error.h"
9 #include "extents.h"
10 #include "fs.h"
11 #include "fs-io.h"
12 #include "fsck.h"
13 #include "inode.h"
14 #include "journal.h"
15 #include "io.h"
16 #include "keylist.h"
17 #include "quota.h"
18
19 #include <linux/aio.h>
20 #include <linux/backing-dev.h>
21 #include <linux/falloc.h>
22 #include <linux/migrate.h>
23 #include <linux/mmu_context.h>
24 #include <linux/pagevec.h>
25 #include <linux/sched/signal.h>
26 #include <linux/task_io_accounting_ops.h>
27 #include <linux/uio.h>
28 #include <linux/writeback.h>
29
30 #include <trace/events/bcachefs.h>
31 #include <trace/events/writeback.h>
32
33 struct quota_res {
34         u64                             sectors;
35 };
36
37 struct bchfs_write_op {
38         struct bch_inode_info           *inode;
39         s64                             sectors_added;
40         bool                            is_dio;
41         bool                            unalloc;
42         u64                             new_i_size;
43
44         /* must be last: */
45         struct bch_write_op             op;
46 };
47
48 struct bch_writepage_io {
49         struct closure                  cl;
50         u64                             new_sectors;
51
52         /* must be last: */
53         struct bchfs_write_op           op;
54 };
55
56 struct dio_write {
57         struct closure                  cl;
58         struct kiocb                    *req;
59         struct task_struct              *task;
60         unsigned                        loop:1,
61                                         sync:1,
62                                         free_iov:1;
63         struct quota_res                quota_res;
64
65         struct iov_iter                 iter;
66         struct iovec                    inline_vecs[2];
67
68         /* must be last: */
69         struct bchfs_write_op           iop;
70 };
71
72 struct dio_read {
73         struct closure                  cl;
74         struct kiocb                    *req;
75         long                            ret;
76         struct bch_read_bio             rbio;
77 };
78
79 /* pagecache_block must be held */
80 static int write_invalidate_inode_pages_range(struct address_space *mapping,
81                                               loff_t start, loff_t end)
82 {
83         int ret;
84
85         /*
86          * XXX: the way this is currently implemented, we can spin if a process
87          * is continually redirtying a specific page
88          */
89         do {
90                 if (!mapping->nrpages &&
91                     !mapping->nrexceptional)
92                         return 0;
93
94                 ret = filemap_write_and_wait_range(mapping, start, end);
95                 if (ret)
96                         break;
97
98                 if (!mapping->nrpages)
99                         return 0;
100
101                 ret = invalidate_inode_pages2_range(mapping,
102                                 start >> PAGE_SHIFT,
103                                 end >> PAGE_SHIFT);
104         } while (ret == -EBUSY);
105
106         return ret;
107 }
108
109 /* quotas */
110
111 #ifdef CONFIG_BCACHEFS_QUOTA
112
113 static void bch2_quota_reservation_put(struct bch_fs *c,
114                                        struct bch_inode_info *inode,
115                                        struct quota_res *res)
116 {
117         if (!res->sectors)
118                 return;
119
120         mutex_lock(&inode->ei_quota_lock);
121         BUG_ON(res->sectors > inode->ei_quota_reserved);
122
123         bch2_quota_acct(c, inode->ei_qid, Q_SPC,
124                         -((s64) res->sectors), BCH_QUOTA_PREALLOC);
125         inode->ei_quota_reserved -= res->sectors;
126         mutex_unlock(&inode->ei_quota_lock);
127
128         res->sectors = 0;
129 }
130
131 static int bch2_quota_reservation_add(struct bch_fs *c,
132                                       struct bch_inode_info *inode,
133                                       struct quota_res *res,
134                                       unsigned sectors,
135                                       bool check_enospc)
136 {
137         int ret;
138
139         mutex_lock(&inode->ei_quota_lock);
140         ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
141                               check_enospc ? BCH_QUOTA_PREALLOC : BCH_QUOTA_NOCHECK);
142         if (likely(!ret)) {
143                 inode->ei_quota_reserved += sectors;
144                 res->sectors += sectors;
145         }
146         mutex_unlock(&inode->ei_quota_lock);
147
148         return ret;
149 }
150
151 #else
152
153 static void bch2_quota_reservation_put(struct bch_fs *c,
154                                        struct bch_inode_info *inode,
155                                        struct quota_res *res)
156 {
157 }
158
159 static int bch2_quota_reservation_add(struct bch_fs *c,
160                                       struct bch_inode_info *inode,
161                                       struct quota_res *res,
162                                       unsigned sectors,
163                                       bool check_enospc)
164 {
165         return 0;
166 }
167
168 #endif
169
170 /* i_size updates: */
171
172 struct inode_new_size {
173         loff_t          new_size;
174         u64             now;
175         unsigned        fields;
176 };
177
178 static int inode_set_size(struct bch_inode_info *inode,
179                           struct bch_inode_unpacked *bi,
180                           void *p)
181 {
182         struct inode_new_size *s = p;
183
184         bi->bi_size = s->new_size;
185         if (s->fields & ATTR_ATIME)
186                 bi->bi_atime = s->now;
187         if (s->fields & ATTR_MTIME)
188                 bi->bi_mtime = s->now;
189         if (s->fields & ATTR_CTIME)
190                 bi->bi_ctime = s->now;
191
192         return 0;
193 }
194
195 static int __must_check bch2_write_inode_size(struct bch_fs *c,
196                                               struct bch_inode_info *inode,
197                                               loff_t new_size, unsigned fields)
198 {
199         struct inode_new_size s = {
200                 .new_size       = new_size,
201                 .now            = bch2_current_time(c),
202                 .fields         = fields,
203         };
204
205         return bch2_write_inode(c, inode, inode_set_size, &s, fields);
206 }
207
208 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
209                            struct quota_res *quota_res, s64 sectors)
210 {
211         if (!sectors)
212                 return;
213
214         mutex_lock(&inode->ei_quota_lock);
215 #ifdef CONFIG_BCACHEFS_QUOTA
216         if (quota_res && sectors > 0) {
217                 BUG_ON(sectors > quota_res->sectors);
218                 BUG_ON(sectors > inode->ei_quota_reserved);
219
220                 quota_res->sectors -= sectors;
221                 inode->ei_quota_reserved -= sectors;
222         } else {
223                 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, BCH_QUOTA_WARN);
224         }
225 #endif
226         inode->v.i_blocks += sectors;
227         mutex_unlock(&inode->ei_quota_lock);
228 }
229
230 /* normal i_size/i_sectors update machinery: */
231
232 static s64 sum_sector_overwrites(struct bkey_i *new, struct btree_iter *_iter,
233                                  bool *allocating)
234 {
235         struct btree_iter iter;
236         struct bkey_s_c old;
237         s64 delta = 0;
238
239         bch2_btree_iter_init(&iter, _iter->c, BTREE_ID_EXTENTS, POS_MIN,
240                              BTREE_ITER_SLOTS);
241
242         bch2_btree_iter_link(_iter, &iter);
243         bch2_btree_iter_copy(&iter, _iter);
244
245         for_each_btree_key_continue(&iter, BTREE_ITER_SLOTS, old) {
246                 if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
247                         break;
248
249                 if (allocating &&
250                     !bch2_extent_is_fully_allocated(old))
251                         *allocating = true;
252
253                 delta += (min(new->k.p.offset,
254                               old.k->p.offset) -
255                           max(bkey_start_offset(&new->k),
256                               bkey_start_offset(old.k))) *
257                         (bkey_extent_is_allocation(&new->k) -
258                          bkey_extent_is_allocation(old.k));
259         }
260
261         bch2_btree_iter_unlink(&iter);
262
263         return delta;
264 }
265
266 static int bch2_extent_update(struct btree_trans *trans,
267                               struct bch_inode_info *inode,
268                               struct disk_reservation *disk_res,
269                               struct quota_res *quota_res,
270                               struct btree_iter *extent_iter,
271                               struct bkey_i *k,
272                               u64 new_i_size,
273                               bool may_allocate,
274                               bool direct,
275                               s64 *total_delta)
276 {
277         struct btree_iter *inode_iter = NULL;
278         struct bch_inode_unpacked inode_u;
279         struct bkey_inode_buf inode_p;
280         bool allocating = false;
281         bool extended = false;
282         s64 i_sectors_delta;
283         int ret;
284
285         bch2_trans_begin_updates(trans);
286
287         ret = bch2_btree_iter_traverse(extent_iter);
288         if (ret)
289                 return ret;
290
291         bch2_extent_trim_atomic(k, extent_iter);
292
293         i_sectors_delta = sum_sector_overwrites(k, extent_iter, &allocating);
294         if (!may_allocate && allocating)
295                 return -ENOSPC;
296
297         bch2_trans_update(trans, BTREE_INSERT_ENTRY(extent_iter, k));
298
299         new_i_size = min(k->k.p.offset << 9, new_i_size);
300
301         /* XXX: inode->i_size locking */
302         if (i_sectors_delta ||
303             new_i_size > inode->ei_inode.bi_size) {
304                 inode_iter = bch2_trans_get_iter(trans,
305                         BTREE_ID_INODES,
306                         POS(k->k.p.inode, 0),
307                         BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
308                 if (IS_ERR(inode_iter))
309                         return PTR_ERR(inode_iter);
310
311                 ret = bch2_btree_iter_traverse(inode_iter);
312                 if (ret)
313                         goto err;
314
315                 inode_u = inode->ei_inode;
316                 inode_u.bi_sectors += i_sectors_delta;
317
318                 /* XXX: this is slightly suspect */
319                 if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
320                     new_i_size > inode_u.bi_size) {
321                         inode_u.bi_size = new_i_size;
322                         extended = true;
323                 }
324
325                 bch2_inode_pack(&inode_p, &inode_u);
326                 bch2_trans_update(trans,
327                         BTREE_INSERT_ENTRY(inode_iter, &inode_p.inode.k_i));
328         }
329
330         ret = bch2_trans_commit(trans, disk_res,
331                                 &inode->ei_journal_seq,
332                                 BTREE_INSERT_NOFAIL|
333                                 BTREE_INSERT_ATOMIC|
334                                 BTREE_INSERT_NOUNLOCK|
335                                 BTREE_INSERT_USE_RESERVE);
336         if (ret)
337                 goto err;
338
339         inode->ei_inode.bi_sectors += i_sectors_delta;
340
341         EBUG_ON(i_sectors_delta &&
342                 inode->ei_inode.bi_sectors != inode_u.bi_sectors);
343
344         if (extended) {
345                 inode->ei_inode.bi_size = new_i_size;
346
347                 if (direct) {
348                         spin_lock(&inode->v.i_lock);
349                         if (new_i_size > inode->v.i_size)
350                                 i_size_write(&inode->v, new_i_size);
351                         spin_unlock(&inode->v.i_lock);
352                 }
353         }
354
355         if (direct)
356                 i_sectors_acct(trans->c, inode, quota_res, i_sectors_delta);
357
358         if (total_delta)
359                 *total_delta += i_sectors_delta;
360 err:
361         if (!IS_ERR_OR_NULL(inode_iter))
362                 bch2_trans_iter_put(trans, inode_iter);
363         return ret;
364 }
365
366 static int bchfs_write_index_update(struct bch_write_op *wop)
367 {
368         struct bchfs_write_op *op = container_of(wop,
369                                 struct bchfs_write_op, op);
370         struct quota_res *quota_res = op->is_dio
371                 ? &container_of(op, struct dio_write, iop)->quota_res
372                 : NULL;
373         struct bch_inode_info *inode = op->inode;
374         struct keylist *keys = &op->op.insert_keys;
375         struct bkey_i *k = bch2_keylist_front(keys);
376         struct btree_trans trans;
377         struct btree_iter *iter;
378         int ret;
379
380         BUG_ON(k->k.p.inode != inode->v.i_ino);
381
382         bch2_trans_init(&trans, wop->c);
383         bch2_trans_preload_iters(&trans);
384
385         iter = bch2_trans_get_iter(&trans,
386                                 BTREE_ID_EXTENTS,
387                                 bkey_start_pos(&k->k),
388                                 BTREE_ITER_INTENT);
389
390         do {
391                 BKEY_PADDED(k) tmp;
392
393                 bkey_copy(&tmp.k, bch2_keylist_front(keys));
394
395                 ret = bch2_extent_update(&trans, inode,
396                                 &wop->res, quota_res,
397                                 iter, &tmp.k,
398                                 op->new_i_size,
399                                 !op->unalloc,
400                                 op->is_dio,
401                                 &op->sectors_added);
402                 if (ret == -EINTR)
403                         continue;
404                 if (ret)
405                         break;
406
407                 if (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) < 0)
408                         bch2_cut_front(iter->pos, bch2_keylist_front(keys));
409                 else
410                         bch2_keylist_pop_front(keys);
411         } while (!bch2_keylist_empty(keys));
412
413         bch2_trans_exit(&trans);
414
415         return ret;
416 }
417
418 static inline void bch2_fswrite_op_init(struct bchfs_write_op *op,
419                                         struct bch_fs *c,
420                                         struct bch_inode_info *inode,
421                                         struct bch_io_opts opts,
422                                         bool is_dio)
423 {
424         op->inode               = inode;
425         op->sectors_added       = 0;
426         op->is_dio              = is_dio;
427         op->unalloc             = false;
428         op->new_i_size          = U64_MAX;
429
430         bch2_write_op_init(&op->op, c, opts);
431         op->op.target           = opts.foreground_target;
432         op->op.index_update_fn  = bchfs_write_index_update;
433         op_journal_seq_set(&op->op, &inode->ei_journal_seq);
434 }
435
436 static inline struct bch_io_opts io_opts(struct bch_fs *c, struct bch_inode_info *inode)
437 {
438         struct bch_io_opts opts = bch2_opts_to_inode_opts(c->opts);
439
440         bch2_io_opts_apply(&opts, bch2_inode_opts_get(&inode->ei_inode));
441         return opts;
442 }
443
444 /* page state: */
445
446 /* stored in page->private: */
447
448 /*
449  * bch_page_state has to (unfortunately) be manipulated with cmpxchg - we could
450  * almost protected it with the page lock, except that bch2_writepage_io_done has
451  * to update the sector counts (and from interrupt/bottom half context).
452  */
453 struct bch_page_state {
454 union { struct {
455         /* existing data: */
456         unsigned                sectors:PAGE_SECTOR_SHIFT + 1;
457         unsigned                nr_replicas:4;
458         unsigned                compressed:1;
459
460         /* Owns PAGE_SECTORS sized reservation: */
461         unsigned                reserved:1;
462         unsigned                reservation_replicas:4;
463
464         /* Owns PAGE_SECTORS sized quota reservation: */
465         unsigned                quota_reserved:1;
466
467         /*
468          * Number of sectors on disk - for i_blocks
469          * Uncompressed size, not compressed size:
470          */
471         unsigned                dirty_sectors:PAGE_SECTOR_SHIFT + 1;
472 };
473         /* for cmpxchg: */
474         unsigned long           v;
475 };
476 };
477
478 #define page_state_cmpxchg(_ptr, _new, _expr)                           \
479 ({                                                                      \
480         unsigned long _v = READ_ONCE((_ptr)->v);                        \
481         struct bch_page_state _old;                                     \
482                                                                         \
483         do {                                                            \
484                 _old.v = _new.v = _v;                                   \
485                 _expr;                                                  \
486                                                                         \
487                 EBUG_ON(_new.sectors + _new.dirty_sectors > PAGE_SECTORS);\
488         } while (_old.v != _new.v &&                                    \
489                  (_v = cmpxchg(&(_ptr)->v, _old.v, _new.v)) != _old.v); \
490                                                                         \
491         _old;                                                           \
492 })
493
494 static inline struct bch_page_state *page_state(struct page *page)
495 {
496         struct bch_page_state *s = (void *) &page->private;
497
498         BUILD_BUG_ON(sizeof(*s) > sizeof(page->private));
499
500         if (!PagePrivate(page))
501                 SetPagePrivate(page);
502
503         return s;
504 }
505
506 static inline unsigned page_res_sectors(struct bch_page_state s)
507 {
508
509         return s.reserved ? s.reservation_replicas * PAGE_SECTORS : 0;
510 }
511
512 static void __bch2_put_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
513                                         struct bch_page_state s)
514 {
515         struct disk_reservation res = { .sectors = page_res_sectors(s) };
516         struct quota_res quota_res = { .sectors = s.quota_reserved ? PAGE_SECTORS : 0 };
517
518         bch2_quota_reservation_put(c, inode, &quota_res);
519         bch2_disk_reservation_put(c, &res);
520 }
521
522 static void bch2_put_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
523                                       struct page *page)
524 {
525         struct bch_page_state s;
526
527         s = page_state_cmpxchg(page_state(page), s, {
528                 s.reserved              = 0;
529                 s.quota_reserved        = 0;
530         });
531
532         __bch2_put_page_reservation(c, inode, s);
533 }
534
535 static int bch2_get_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
536                                      struct page *page, bool check_enospc)
537 {
538         struct bch_page_state *s = page_state(page), new, old;
539
540         /* XXX: this should not be open coded */
541         unsigned nr_replicas = inode->ei_inode.bi_data_replicas
542                 ? inode->ei_inode.bi_data_replicas - 1
543                 : c->opts.data_replicas;
544
545         struct disk_reservation disk_res = bch2_disk_reservation_init(c,
546                                                 nr_replicas);
547         struct quota_res quota_res = { 0 };
548         int ret = 0;
549
550         /*
551          * XXX: this could likely be quite a bit simpler, page reservations
552          * _should_ only be manipulated with page locked:
553          */
554
555         old = page_state_cmpxchg(s, new, {
556                 if (new.reserved
557                     ? (new.reservation_replicas < disk_res.nr_replicas)
558                     : (new.sectors < PAGE_SECTORS ||
559                        new.nr_replicas < disk_res.nr_replicas ||
560                        new.compressed)) {
561                         int sectors = (disk_res.nr_replicas * PAGE_SECTORS -
562                                        page_res_sectors(new) -
563                                        disk_res.sectors);
564
565                         if (sectors > 0) {
566                                 ret = bch2_disk_reservation_add(c, &disk_res, sectors,
567                                                 !check_enospc
568                                                 ? BCH_DISK_RESERVATION_NOFAIL : 0);
569                                 if (unlikely(ret))
570                                         goto err;
571                         }
572
573                         new.reserved = 1;
574                         new.reservation_replicas = disk_res.nr_replicas;
575                 }
576
577                 if (!new.quota_reserved &&
578                     new.sectors + new.dirty_sectors < PAGE_SECTORS) {
579                         ret = bch2_quota_reservation_add(c, inode, &quota_res,
580                                                 PAGE_SECTORS - quota_res.sectors,
581                                                 check_enospc);
582                         if (unlikely(ret))
583                                 goto err;
584
585                         new.quota_reserved = 1;
586                 }
587         });
588
589         quota_res.sectors -= (new.quota_reserved - old.quota_reserved) * PAGE_SECTORS;
590         disk_res.sectors -= page_res_sectors(new) - page_res_sectors(old);
591 err:
592         bch2_quota_reservation_put(c, inode, &quota_res);
593         bch2_disk_reservation_put(c, &disk_res);
594         return ret;
595 }
596
597 static void bch2_clear_page_bits(struct page *page)
598 {
599         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
600         struct bch_fs *c = inode->v.i_sb->s_fs_info;
601         struct bch_page_state s;
602
603         if (!PagePrivate(page))
604                 return;
605
606         s.v = xchg(&page_state(page)->v, 0);
607         ClearPagePrivate(page);
608
609         if (s.dirty_sectors)
610                 i_sectors_acct(c, inode, NULL, -s.dirty_sectors);
611
612         __bch2_put_page_reservation(c, inode, s);
613 }
614
615 int bch2_set_page_dirty(struct page *page)
616 {
617         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
618         struct bch_fs *c = inode->v.i_sb->s_fs_info;
619         struct quota_res quota_res = { 0 };
620         struct bch_page_state old, new;
621
622         old = page_state_cmpxchg(page_state(page), new,
623                 new.dirty_sectors = PAGE_SECTORS - new.sectors;
624                 new.quota_reserved = 0;
625         );
626
627         quota_res.sectors += old.quota_reserved * PAGE_SECTORS;
628
629         if (old.dirty_sectors != new.dirty_sectors)
630                 i_sectors_acct(c, inode, &quota_res,
631                                new.dirty_sectors - old.dirty_sectors);
632         bch2_quota_reservation_put(c, inode, &quota_res);
633
634         return __set_page_dirty_nobuffers(page);
635 }
636
637 int bch2_page_mkwrite(struct vm_fault *vmf)
638 {
639         struct page *page = vmf->page;
640         struct file *file = vmf->vma->vm_file;
641         struct bch_inode_info *inode = file_bch_inode(file);
642         struct address_space *mapping = inode->v.i_mapping;
643         struct bch_fs *c = inode->v.i_sb->s_fs_info;
644         int ret = VM_FAULT_LOCKED;
645
646         sb_start_pagefault(inode->v.i_sb);
647         file_update_time(file);
648
649         /*
650          * Not strictly necessary, but helps avoid dio writes livelocking in
651          * write_invalidate_inode_pages_range() - can drop this if/when we get
652          * a write_invalidate_inode_pages_range() that works without dropping
653          * page lock before invalidating page
654          */
655         if (current->pagecache_lock != &mapping->add_lock)
656                 pagecache_add_get(&mapping->add_lock);
657
658         lock_page(page);
659         if (page->mapping != mapping ||
660             page_offset(page) > i_size_read(&inode->v)) {
661                 unlock_page(page);
662                 ret = VM_FAULT_NOPAGE;
663                 goto out;
664         }
665
666         if (bch2_get_page_reservation(c, inode, page, true)) {
667                 unlock_page(page);
668                 ret = VM_FAULT_SIGBUS;
669                 goto out;
670         }
671
672         if (!PageDirty(page))
673                 set_page_dirty(page);
674         wait_for_stable_page(page);
675 out:
676         if (current->pagecache_lock != &mapping->add_lock)
677                 pagecache_add_put(&mapping->add_lock);
678         sb_end_pagefault(inode->v.i_sb);
679         return ret;
680 }
681
682 void bch2_invalidatepage(struct page *page, unsigned int offset,
683                          unsigned int length)
684 {
685         EBUG_ON(!PageLocked(page));
686         EBUG_ON(PageWriteback(page));
687
688         if (offset || length < PAGE_SIZE)
689                 return;
690
691         bch2_clear_page_bits(page);
692 }
693
694 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
695 {
696         /* XXX: this can't take locks that are held while we allocate memory */
697         EBUG_ON(!PageLocked(page));
698         EBUG_ON(PageWriteback(page));
699
700         if (PageDirty(page))
701                 return 0;
702
703         bch2_clear_page_bits(page);
704         return 1;
705 }
706
707 #ifdef CONFIG_MIGRATION
708 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
709                       struct page *page, enum migrate_mode mode)
710 {
711         int ret;
712
713         ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
714         if (ret != MIGRATEPAGE_SUCCESS)
715                 return ret;
716
717         if (PagePrivate(page)) {
718                 *page_state(newpage) = *page_state(page);
719                 ClearPagePrivate(page);
720         }
721
722         migrate_page_copy(newpage, page);
723         return MIGRATEPAGE_SUCCESS;
724 }
725 #endif
726
727 /* readpages/writepages: */
728
729 static bool bio_can_add_page_contig(struct bio *bio, struct page *page)
730 {
731         sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
732
733         return bio->bi_vcnt < bio->bi_max_vecs &&
734                 bio_end_sector(bio) == offset;
735 }
736
737 static int bio_add_page_contig(struct bio *bio, struct page *page)
738 {
739         sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
740
741         EBUG_ON(!bio->bi_max_vecs);
742
743         if (!bio->bi_vcnt)
744                 bio->bi_iter.bi_sector = offset;
745         else if (!bio_can_add_page_contig(bio, page))
746                 return -1;
747
748         __bio_add_page(bio, page, PAGE_SIZE, 0);
749         return 0;
750 }
751
752 /* readpage(s): */
753
754 static void bch2_readpages_end_io(struct bio *bio)
755 {
756         struct bio_vec *bv;
757         int i;
758
759         bio_for_each_segment_all(bv, bio, i) {
760                 struct page *page = bv->bv_page;
761
762                 if (!bio->bi_status) {
763                         SetPageUptodate(page);
764                 } else {
765                         ClearPageUptodate(page);
766                         SetPageError(page);
767                 }
768                 unlock_page(page);
769         }
770
771         bio_put(bio);
772 }
773
774 static inline void page_state_init_for_read(struct page *page)
775 {
776         SetPagePrivate(page);
777         page->private = 0;
778 }
779
780 struct readpages_iter {
781         struct address_space    *mapping;
782         struct page             **pages;
783         unsigned                nr_pages;
784         unsigned                nr_added;
785         unsigned                idx;
786         pgoff_t                 offset;
787 };
788
789 static int readpages_iter_init(struct readpages_iter *iter,
790                                struct address_space *mapping,
791                                struct list_head *pages, unsigned nr_pages)
792 {
793         memset(iter, 0, sizeof(*iter));
794
795         iter->mapping   = mapping;
796         iter->offset    = list_last_entry(pages, struct page, lru)->index;
797
798         iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
799         if (!iter->pages)
800                 return -ENOMEM;
801
802         while (!list_empty(pages)) {
803                 struct page *page = list_last_entry(pages, struct page, lru);
804
805                 prefetchw(&page->flags);
806                 iter->pages[iter->nr_pages++] = page;
807                 list_del(&page->lru);
808         }
809
810         return 0;
811 }
812
813 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
814 {
815         struct page *page;
816         unsigned i;
817         int ret;
818
819         BUG_ON(iter->idx > iter->nr_added);
820         BUG_ON(iter->nr_added > iter->nr_pages);
821
822         if (iter->idx < iter->nr_added)
823                 goto out;
824
825         while (1) {
826                 if (iter->idx == iter->nr_pages)
827                         return NULL;
828
829                 ret = add_to_page_cache_lru_vec(iter->mapping,
830                                 iter->pages     + iter->nr_added,
831                                 iter->nr_pages  - iter->nr_added,
832                                 iter->offset    + iter->nr_added,
833                                 GFP_NOFS);
834                 if (ret > 0)
835                         break;
836
837                 page = iter->pages[iter->nr_added];
838                 iter->idx++;
839                 iter->nr_added++;
840
841                 put_page(page);
842         }
843
844         iter->nr_added += ret;
845
846         for (i = iter->idx; i < iter->nr_added; i++)
847                 put_page(iter->pages[i]);
848 out:
849         EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
850
851         page_state_init_for_read(iter->pages[iter->idx]);
852         return iter->pages[iter->idx];
853 }
854
855 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
856 {
857         struct bvec_iter iter;
858         struct bio_vec bv;
859         bool compressed = bch2_extent_is_compressed(k);
860         unsigned nr_ptrs = bch2_extent_nr_dirty_ptrs(k);
861
862         bio_for_each_segment(bv, bio, iter) {
863                 struct bch_page_state *s = page_state(bv.bv_page);
864
865                 /* sectors in @k from the start of this page: */
866                 unsigned k_sectors = k.k->size - (iter.bi_sector - k.k->p.offset);
867
868                 unsigned page_sectors = min(bv.bv_len >> 9, k_sectors);
869
870                 s->nr_replicas = !s->sectors
871                         ? nr_ptrs
872                         : min_t(unsigned, s->nr_replicas, nr_ptrs);
873
874                 BUG_ON(s->sectors + page_sectors > PAGE_SECTORS);
875                 s->sectors += page_sectors;
876
877                 s->compressed |= compressed;
878         }
879 }
880
881 static void readpage_bio_extend(struct readpages_iter *iter,
882                                 struct bio *bio, u64 offset,
883                                 bool get_more)
884 {
885         while (bio_end_sector(bio) < offset &&
886                bio->bi_vcnt < bio->bi_max_vecs) {
887                 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
888                 struct page *page = readpage_iter_next(iter);
889                 int ret;
890
891                 if (page) {
892                         if (iter->offset + iter->idx != page_offset)
893                                 break;
894
895                         iter->idx++;
896                 } else {
897                         if (!get_more)
898                                 break;
899
900                         rcu_read_lock();
901                         page = radix_tree_lookup(&iter->mapping->i_pages, page_offset);
902                         rcu_read_unlock();
903
904                         if (page && !radix_tree_exceptional_entry(page))
905                                 break;
906
907                         page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
908                         if (!page)
909                                 break;
910
911                         page_state_init_for_read(page);
912
913                         ret = add_to_page_cache_lru(page, iter->mapping,
914                                                     page_offset, GFP_NOFS);
915                         if (ret) {
916                                 ClearPagePrivate(page);
917                                 put_page(page);
918                                 break;
919                         }
920
921                         put_page(page);
922                 }
923
924                 __bio_add_page(bio, page, PAGE_SIZE, 0);
925         }
926 }
927
928 static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
929                        struct bch_read_bio *rbio, u64 inum,
930                        struct readpages_iter *readpages_iter)
931 {
932         struct bio *bio = &rbio->bio;
933         int flags = BCH_READ_RETRY_IF_STALE|
934                 BCH_READ_MAY_PROMOTE;
935
936         rbio->c = c;
937         rbio->start_time = local_clock();
938
939         while (1) {
940                 BKEY_PADDED(k) tmp;
941                 struct bkey_s_c k;
942                 unsigned bytes;
943
944                 bch2_btree_iter_set_pos(iter, POS(inum, bio->bi_iter.bi_sector));
945
946                 k = bch2_btree_iter_peek_slot(iter);
947                 BUG_ON(!k.k);
948
949                 if (IS_ERR(k.k)) {
950                         int ret = bch2_btree_iter_unlock(iter);
951                         BUG_ON(!ret);
952                         bcache_io_error(c, bio, "btree IO error %i", ret);
953                         bio_endio(bio);
954                         return;
955                 }
956
957                 bkey_reassemble(&tmp.k, k);
958                 bch2_btree_iter_unlock(iter);
959                 k = bkey_i_to_s_c(&tmp.k);
960
961                 if (readpages_iter) {
962                         bool want_full_extent = false;
963
964                         if (bkey_extent_is_data(k.k)) {
965                                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
966                                 const union bch_extent_entry *i;
967                                 struct extent_ptr_decoded p;
968
969                                 extent_for_each_ptr_decode(e, p, i)
970                                         want_full_extent |= ((p.crc.csum_type != 0) |
971                                                              (p.crc.compression_type != 0));
972                         }
973
974                         readpage_bio_extend(readpages_iter,
975                                             bio, k.k->p.offset,
976                                             want_full_extent);
977                 }
978
979                 bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
980                          bio->bi_iter.bi_sector) << 9;
981                 swap(bio->bi_iter.bi_size, bytes);
982
983                 if (bytes == bio->bi_iter.bi_size)
984                         flags |= BCH_READ_LAST_FRAGMENT;
985
986                 if (bkey_extent_is_allocation(k.k))
987                         bch2_add_page_sectors(bio, k);
988
989                 bch2_read_extent(c, rbio, k, flags);
990
991                 if (flags & BCH_READ_LAST_FRAGMENT)
992                         return;
993
994                 swap(bio->bi_iter.bi_size, bytes);
995                 bio_advance(bio, bytes);
996         }
997 }
998
999 int bch2_readpages(struct file *file, struct address_space *mapping,
1000                    struct list_head *pages, unsigned nr_pages)
1001 {
1002         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1003         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1004         struct bch_io_opts opts = io_opts(c, inode);
1005         struct btree_iter iter;
1006         struct page *page;
1007         struct readpages_iter readpages_iter;
1008         int ret;
1009
1010         ret = readpages_iter_init(&readpages_iter, mapping, pages, nr_pages);
1011         BUG_ON(ret);
1012
1013         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
1014                              BTREE_ITER_SLOTS);
1015
1016         if (current->pagecache_lock != &mapping->add_lock)
1017                 pagecache_add_get(&mapping->add_lock);
1018
1019         while ((page = readpage_iter_next(&readpages_iter))) {
1020                 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
1021                 unsigned n = min_t(unsigned,
1022                                    readpages_iter.nr_pages -
1023                                    readpages_iter.idx,
1024                                    BIO_MAX_PAGES);
1025                 struct bch_read_bio *rbio =
1026                         rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
1027                                   opts);
1028
1029                 readpages_iter.idx++;
1030
1031                 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
1032                 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTOR_SHIFT;
1033                 rbio->bio.bi_end_io = bch2_readpages_end_io;
1034                 __bio_add_page(&rbio->bio, page, PAGE_SIZE, 0);
1035
1036                 bchfs_read(c, &iter, rbio, inode->v.i_ino, &readpages_iter);
1037         }
1038
1039         if (current->pagecache_lock != &mapping->add_lock)
1040                 pagecache_add_put(&mapping->add_lock);
1041
1042         kfree(readpages_iter.pages);
1043
1044         return 0;
1045 }
1046
1047 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
1048                              u64 inum, struct page *page)
1049 {
1050         struct btree_iter iter;
1051
1052         page_state_init_for_read(page);
1053
1054         bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
1055         bio_add_page_contig(&rbio->bio, page);
1056
1057         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
1058                              BTREE_ITER_SLOTS);
1059         bchfs_read(c, &iter, rbio, inum, NULL);
1060 }
1061
1062 int bch2_readpage(struct file *file, struct page *page)
1063 {
1064         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1065         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1066         struct bch_io_opts opts = io_opts(c, inode);
1067         struct bch_read_bio *rbio;
1068
1069         rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
1070         rbio->bio.bi_end_io = bch2_readpages_end_io;
1071
1072         __bchfs_readpage(c, rbio, inode->v.i_ino, page);
1073         return 0;
1074 }
1075
1076 static void bch2_read_single_page_end_io(struct bio *bio)
1077 {
1078         complete(bio->bi_private);
1079 }
1080
1081 static int bch2_read_single_page(struct page *page,
1082                                  struct address_space *mapping)
1083 {
1084         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1085         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1086         struct bch_read_bio *rbio;
1087         int ret;
1088         DECLARE_COMPLETION_ONSTACK(done);
1089
1090         rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
1091                          io_opts(c, inode));
1092         rbio->bio.bi_private = &done;
1093         rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1094
1095         __bchfs_readpage(c, rbio, inode->v.i_ino, page);
1096         wait_for_completion(&done);
1097
1098         ret = blk_status_to_errno(rbio->bio.bi_status);
1099         bio_put(&rbio->bio);
1100
1101         if (ret < 0)
1102                 return ret;
1103
1104         SetPageUptodate(page);
1105         return 0;
1106 }
1107
1108 /* writepages: */
1109
1110 struct bch_writepage_state {
1111         struct bch_writepage_io *io;
1112         struct bch_io_opts      opts;
1113 };
1114
1115 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1116                                                                   struct bch_inode_info *inode)
1117 {
1118         return (struct bch_writepage_state) { .opts = io_opts(c, inode) };
1119 }
1120
1121 static void bch2_writepage_io_free(struct closure *cl)
1122 {
1123         struct bch_writepage_io *io = container_of(cl,
1124                                         struct bch_writepage_io, cl);
1125
1126         bio_put(&io->op.op.wbio.bio);
1127 }
1128
1129 static void bch2_writepage_io_done(struct closure *cl)
1130 {
1131         struct bch_writepage_io *io = container_of(cl,
1132                                         struct bch_writepage_io, cl);
1133         struct bch_fs *c = io->op.op.c;
1134         struct bio *bio = &io->op.op.wbio.bio;
1135         struct bio_vec *bvec;
1136         unsigned i;
1137
1138         if (io->op.op.error) {
1139                 bio_for_each_segment_all(bvec, bio, i)
1140                         SetPageError(bvec->bv_page);
1141                 set_bit(AS_EIO, &io->op.inode->v.i_mapping->flags);
1142         }
1143
1144         /*
1145          * racing with fallocate can cause us to add fewer sectors than
1146          * expected - but we shouldn't add more sectors than expected:
1147          */
1148         BUG_ON(io->op.sectors_added > (s64) io->new_sectors);
1149
1150         /*
1151          * (error (due to going RO) halfway through a page can screw that up
1152          * slightly)
1153          * XXX wtf?
1154            BUG_ON(io->op.sectors_added - io->new_sectors >= (s64) PAGE_SECTORS);
1155          */
1156
1157         /*
1158          * PageWriteback is effectively our ref on the inode - fixup i_blocks
1159          * before calling end_page_writeback:
1160          */
1161         if (io->op.sectors_added != io->new_sectors)
1162                 i_sectors_acct(c, io->op.inode, NULL,
1163                                io->op.sectors_added - (s64) io->new_sectors);
1164
1165         bio_for_each_segment_all(bvec, bio, i)
1166                 end_page_writeback(bvec->bv_page);
1167
1168         closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1169 }
1170
1171 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1172 {
1173         struct bch_writepage_io *io = w->io;
1174
1175         w->io = NULL;
1176         closure_call(&io->op.op.cl, bch2_write, NULL, &io->cl);
1177         continue_at(&io->cl, bch2_writepage_io_done, NULL);
1178 }
1179
1180 /*
1181  * Get a bch_writepage_io and add @page to it - appending to an existing one if
1182  * possible, else allocating a new one:
1183  */
1184 static void bch2_writepage_io_alloc(struct bch_fs *c,
1185                                     struct bch_writepage_state *w,
1186                                     struct bch_inode_info *inode,
1187                                     struct page *page,
1188                                     unsigned nr_replicas)
1189 {
1190         struct bch_write_op *op;
1191         u64 offset = (u64) page->index << PAGE_SECTOR_SHIFT;
1192
1193         w->io = container_of(bio_alloc_bioset(GFP_NOFS,
1194                                               BIO_MAX_PAGES,
1195                                               &c->writepage_bioset),
1196                              struct bch_writepage_io, op.op.wbio.bio);
1197
1198         closure_init(&w->io->cl, NULL);
1199         w->io->new_sectors      = 0;
1200         bch2_fswrite_op_init(&w->io->op, c, inode, w->opts, false);
1201         op                      = &w->io->op.op;
1202         op->nr_replicas         = nr_replicas;
1203         op->res.nr_replicas     = nr_replicas;
1204         op->write_point         = writepoint_hashed(inode->ei_last_dirtied);
1205         op->pos                 = POS(inode->v.i_ino, offset);
1206         op->wbio.bio.bi_iter.bi_sector = offset;
1207 }
1208
1209 static int __bch2_writepage(struct page *page,
1210                             struct writeback_control *wbc,
1211                             void *data)
1212 {
1213         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1214         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1215         struct bch_writepage_state *w = data;
1216         struct bch_page_state new, old;
1217         unsigned offset;
1218         loff_t i_size = i_size_read(&inode->v);
1219         pgoff_t end_index = i_size >> PAGE_SHIFT;
1220
1221         EBUG_ON(!PageUptodate(page));
1222
1223         /* Is the page fully inside i_size? */
1224         if (page->index < end_index)
1225                 goto do_io;
1226
1227         /* Is the page fully outside i_size? (truncate in progress) */
1228         offset = i_size & (PAGE_SIZE - 1);
1229         if (page->index > end_index || !offset) {
1230                 unlock_page(page);
1231                 return 0;
1232         }
1233
1234         /*
1235          * The page straddles i_size.  It must be zeroed out on each and every
1236          * writepage invocation because it may be mmapped.  "A file is mapped
1237          * in multiples of the page size.  For a file that is not a multiple of
1238          * the  page size, the remaining memory is zeroed when mapped, and
1239          * writes to that region are not written out to the file."
1240          */
1241         zero_user_segment(page, offset, PAGE_SIZE);
1242 do_io:
1243         /* Before unlocking the page, transfer reservation to w->io: */
1244         old = page_state_cmpxchg(page_state(page), new, {
1245                 EBUG_ON(!new.reserved &&
1246                         (new.sectors != PAGE_SECTORS ||
1247                         new.compressed));
1248
1249                 if (new.reserved)
1250                         new.nr_replicas = new.reservation_replicas;
1251                 new.reserved = 0;
1252
1253                 new.compressed |= w->opts.compression != 0;
1254
1255                 new.sectors += new.dirty_sectors;
1256                 new.dirty_sectors = 0;
1257         });
1258
1259         BUG_ON(PageWriteback(page));
1260         set_page_writeback(page);
1261         unlock_page(page);
1262
1263         if (w->io &&
1264             (w->io->op.op.res.nr_replicas != new.nr_replicas ||
1265              !bio_can_add_page_contig(&w->io->op.op.wbio.bio, page)))
1266                 bch2_writepage_do_io(w);
1267
1268         if (!w->io)
1269                 bch2_writepage_io_alloc(c, w, inode, page, new.nr_replicas);
1270
1271         w->io->new_sectors += new.sectors - old.sectors;
1272
1273         BUG_ON(inode != w->io->op.inode);
1274         BUG_ON(bio_add_page_contig(&w->io->op.op.wbio.bio, page));
1275
1276         if (old.reserved)
1277                 w->io->op.op.res.sectors += old.reservation_replicas * PAGE_SECTORS;
1278
1279         w->io->op.new_i_size = i_size;
1280
1281         if (wbc->sync_mode == WB_SYNC_ALL)
1282                 w->io->op.op.wbio.bio.bi_opf |= REQ_SYNC;
1283
1284         return 0;
1285 }
1286
1287 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1288 {
1289         struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1290         struct bch_writepage_state w =
1291                 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1292         struct blk_plug plug;
1293         int ret;
1294
1295         blk_start_plug(&plug);
1296         ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1297         if (w.io)
1298                 bch2_writepage_do_io(&w);
1299         blk_finish_plug(&plug);
1300         return ret;
1301 }
1302
1303 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1304 {
1305         struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1306         struct bch_writepage_state w =
1307                 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1308         int ret;
1309
1310         ret = __bch2_writepage(page, wbc, &w);
1311         if (w.io)
1312                 bch2_writepage_do_io(&w);
1313
1314         return ret;
1315 }
1316
1317 /* buffered writes: */
1318
1319 int bch2_write_begin(struct file *file, struct address_space *mapping,
1320                      loff_t pos, unsigned len, unsigned flags,
1321                      struct page **pagep, void **fsdata)
1322 {
1323         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1324         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1325         pgoff_t index = pos >> PAGE_SHIFT;
1326         unsigned offset = pos & (PAGE_SIZE - 1);
1327         struct page *page;
1328         int ret = -ENOMEM;
1329
1330         BUG_ON(inode_unhashed(&inode->v));
1331
1332         /* Not strictly necessary - same reason as mkwrite(): */
1333         pagecache_add_get(&mapping->add_lock);
1334
1335         page = grab_cache_page_write_begin(mapping, index, flags);
1336         if (!page)
1337                 goto err_unlock;
1338
1339         if (PageUptodate(page))
1340                 goto out;
1341
1342         /* If we're writing entire page, don't need to read it in first: */
1343         if (len == PAGE_SIZE)
1344                 goto out;
1345
1346         if (!offset && pos + len >= inode->v.i_size) {
1347                 zero_user_segment(page, len, PAGE_SIZE);
1348                 flush_dcache_page(page);
1349                 goto out;
1350         }
1351
1352         if (index > inode->v.i_size >> PAGE_SHIFT) {
1353                 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1354                 flush_dcache_page(page);
1355                 goto out;
1356         }
1357 readpage:
1358         ret = bch2_read_single_page(page, mapping);
1359         if (ret)
1360                 goto err;
1361 out:
1362         ret = bch2_get_page_reservation(c, inode, page, true);
1363         if (ret) {
1364                 if (!PageUptodate(page)) {
1365                         /*
1366                          * If the page hasn't been read in, we won't know if we
1367                          * actually need a reservation - we don't actually need
1368                          * to read here, we just need to check if the page is
1369                          * fully backed by uncompressed data:
1370                          */
1371                         goto readpage;
1372                 }
1373
1374                 goto err;
1375         }
1376
1377         *pagep = page;
1378         return 0;
1379 err:
1380         unlock_page(page);
1381         put_page(page);
1382         *pagep = NULL;
1383 err_unlock:
1384         pagecache_add_put(&mapping->add_lock);
1385         return ret;
1386 }
1387
1388 int bch2_write_end(struct file *file, struct address_space *mapping,
1389                    loff_t pos, unsigned len, unsigned copied,
1390                    struct page *page, void *fsdata)
1391 {
1392         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1393         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1394
1395         lockdep_assert_held(&inode->v.i_rwsem);
1396
1397         if (unlikely(copied < len && !PageUptodate(page))) {
1398                 /*
1399                  * The page needs to be read in, but that would destroy
1400                  * our partial write - simplest thing is to just force
1401                  * userspace to redo the write:
1402                  */
1403                 zero_user(page, 0, PAGE_SIZE);
1404                 flush_dcache_page(page);
1405                 copied = 0;
1406         }
1407
1408         spin_lock(&inode->v.i_lock);
1409         if (pos + copied > inode->v.i_size)
1410                 i_size_write(&inode->v, pos + copied);
1411         spin_unlock(&inode->v.i_lock);
1412
1413         if (copied) {
1414                 if (!PageUptodate(page))
1415                         SetPageUptodate(page);
1416                 if (!PageDirty(page))
1417                         set_page_dirty(page);
1418
1419                 inode->ei_last_dirtied = (unsigned long) current;
1420         } else {
1421                 bch2_put_page_reservation(c, inode, page);
1422         }
1423
1424         unlock_page(page);
1425         put_page(page);
1426         pagecache_add_put(&mapping->add_lock);
1427
1428         return copied;
1429 }
1430
1431 #define WRITE_BATCH_PAGES       32
1432
1433 static int __bch2_buffered_write(struct bch_inode_info *inode,
1434                                  struct address_space *mapping,
1435                                  struct iov_iter *iter,
1436                                  loff_t pos, unsigned len)
1437 {
1438         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1439         struct page *pages[WRITE_BATCH_PAGES];
1440         unsigned long index = pos >> PAGE_SHIFT;
1441         unsigned offset = pos & (PAGE_SIZE - 1);
1442         unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1443         unsigned i, copied = 0, nr_pages_copied = 0;
1444         int ret = 0;
1445
1446         BUG_ON(!len);
1447         BUG_ON(nr_pages > ARRAY_SIZE(pages));
1448
1449         for (i = 0; i < nr_pages; i++) {
1450                 pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
1451                 if (!pages[i]) {
1452                         nr_pages = i;
1453                         ret = -ENOMEM;
1454                         goto out;
1455                 }
1456         }
1457
1458         if (offset && !PageUptodate(pages[0])) {
1459                 ret = bch2_read_single_page(pages[0], mapping);
1460                 if (ret)
1461                         goto out;
1462         }
1463
1464         if ((pos + len) & (PAGE_SIZE - 1) &&
1465             !PageUptodate(pages[nr_pages - 1])) {
1466                 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1467                         zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1468                 } else {
1469                         ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1470                         if (ret)
1471                                 goto out;
1472                 }
1473         }
1474
1475         for (i = 0; i < nr_pages; i++) {
1476                 ret = bch2_get_page_reservation(c, inode, pages[i], true);
1477
1478                 if (ret && !PageUptodate(pages[i])) {
1479                         ret = bch2_read_single_page(pages[i], mapping);
1480                         if (ret)
1481                                 goto out;
1482
1483                         ret = bch2_get_page_reservation(c, inode, pages[i], true);
1484                 }
1485
1486                 if (ret)
1487                         goto out;
1488         }
1489
1490         if (mapping_writably_mapped(mapping))
1491                 for (i = 0; i < nr_pages; i++)
1492                         flush_dcache_page(pages[i]);
1493
1494         while (copied < len) {
1495                 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1496                 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1497                 unsigned pg_bytes = min_t(unsigned, len - copied,
1498                                           PAGE_SIZE - pg_offset);
1499                 unsigned pg_copied = iov_iter_copy_from_user_atomic(page,
1500                                                 iter, pg_offset, pg_bytes);
1501
1502                 if (!pg_copied)
1503                         break;
1504
1505                 flush_dcache_page(page);
1506                 iov_iter_advance(iter, pg_copied);
1507                 copied += pg_copied;
1508         }
1509
1510         if (!copied)
1511                 goto out;
1512
1513         nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1514         inode->ei_last_dirtied = (unsigned long) current;
1515
1516         spin_lock(&inode->v.i_lock);
1517         if (pos + copied > inode->v.i_size)
1518                 i_size_write(&inode->v, pos + copied);
1519         spin_unlock(&inode->v.i_lock);
1520
1521         if (copied < len &&
1522             ((offset + copied) & (PAGE_SIZE - 1))) {
1523                 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1524
1525                 if (!PageUptodate(page)) {
1526                         zero_user(page, 0, PAGE_SIZE);
1527                         copied -= (offset + copied) & (PAGE_SIZE - 1);
1528                 }
1529         }
1530 out:
1531         for (i = 0; i < nr_pages_copied; i++) {
1532                 if (!PageUptodate(pages[i]))
1533                         SetPageUptodate(pages[i]);
1534                 if (!PageDirty(pages[i]))
1535                         set_page_dirty(pages[i]);
1536                 unlock_page(pages[i]);
1537                 put_page(pages[i]);
1538         }
1539
1540         for (i = nr_pages_copied; i < nr_pages; i++) {
1541                 if (!PageDirty(pages[i]))
1542                         bch2_put_page_reservation(c, inode, pages[i]);
1543                 unlock_page(pages[i]);
1544                 put_page(pages[i]);
1545         }
1546
1547         return copied ?: ret;
1548 }
1549
1550 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1551 {
1552         struct file *file = iocb->ki_filp;
1553         struct address_space *mapping = file->f_mapping;
1554         struct bch_inode_info *inode = file_bch_inode(file);
1555         loff_t pos = iocb->ki_pos;
1556         ssize_t written = 0;
1557         int ret = 0;
1558
1559         pagecache_add_get(&mapping->add_lock);
1560
1561         do {
1562                 unsigned offset = pos & (PAGE_SIZE - 1);
1563                 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1564                               PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1565 again:
1566                 /*
1567                  * Bring in the user page that we will copy from _first_.
1568                  * Otherwise there's a nasty deadlock on copying from the
1569                  * same page as we're writing to, without it being marked
1570                  * up-to-date.
1571                  *
1572                  * Not only is this an optimisation, but it is also required
1573                  * to check that the address is actually valid, when atomic
1574                  * usercopies are used, below.
1575                  */
1576                 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1577                         bytes = min_t(unsigned long, iov_iter_count(iter),
1578                                       PAGE_SIZE - offset);
1579
1580                         if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1581                                 ret = -EFAULT;
1582                                 break;
1583                         }
1584                 }
1585
1586                 if (unlikely(fatal_signal_pending(current))) {
1587                         ret = -EINTR;
1588                         break;
1589                 }
1590
1591                 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1592                 if (unlikely(ret < 0))
1593                         break;
1594
1595                 cond_resched();
1596
1597                 if (unlikely(ret == 0)) {
1598                         /*
1599                          * If we were unable to copy any data at all, we must
1600                          * fall back to a single segment length write.
1601                          *
1602                          * If we didn't fallback here, we could livelock
1603                          * because not all segments in the iov can be copied at
1604                          * once without a pagefault.
1605                          */
1606                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
1607                                       iov_iter_single_seg_count(iter));
1608                         goto again;
1609                 }
1610                 pos += ret;
1611                 written += ret;
1612
1613                 balance_dirty_pages_ratelimited(mapping);
1614         } while (iov_iter_count(iter));
1615
1616         pagecache_add_put(&mapping->add_lock);
1617
1618         return written ? written : ret;
1619 }
1620
1621 /* O_DIRECT reads */
1622
1623 static void bch2_dio_read_complete(struct closure *cl)
1624 {
1625         struct dio_read *dio = container_of(cl, struct dio_read, cl);
1626
1627         dio->req->ki_complete(dio->req, dio->ret, 0);
1628         bio_check_pages_dirty(&dio->rbio.bio);  /* transfers ownership */
1629 }
1630
1631 static void bch2_direct_IO_read_endio(struct bio *bio)
1632 {
1633         struct dio_read *dio = bio->bi_private;
1634
1635         if (bio->bi_status)
1636                 dio->ret = blk_status_to_errno(bio->bi_status);
1637
1638         closure_put(&dio->cl);
1639 }
1640
1641 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1642 {
1643         bch2_direct_IO_read_endio(bio);
1644         bio_check_pages_dirty(bio);     /* transfers ownership */
1645 }
1646
1647 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1648 {
1649         struct file *file = req->ki_filp;
1650         struct bch_inode_info *inode = file_bch_inode(file);
1651         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1652         struct bch_io_opts opts = io_opts(c, inode);
1653         struct dio_read *dio;
1654         struct bio *bio;
1655         loff_t offset = req->ki_pos;
1656         bool sync = is_sync_kiocb(req);
1657         size_t shorten;
1658         ssize_t ret;
1659
1660         if ((offset|iter->count) & (block_bytes(c) - 1))
1661                 return -EINVAL;
1662
1663         ret = min_t(loff_t, iter->count,
1664                     max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1665
1666         if (!ret)
1667                 return ret;
1668
1669         shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1670         iter->count -= shorten;
1671
1672         bio = bio_alloc_bioset(GFP_KERNEL,
1673                                iov_iter_npages(iter, BIO_MAX_PAGES),
1674                                &c->dio_read_bioset);
1675
1676         bio->bi_end_io = bch2_direct_IO_read_endio;
1677
1678         dio = container_of(bio, struct dio_read, rbio.bio);
1679         closure_init(&dio->cl, NULL);
1680
1681         /*
1682          * this is a _really_ horrible hack just to avoid an atomic sub at the
1683          * end:
1684          */
1685         if (!sync) {
1686                 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1687                 atomic_set(&dio->cl.remaining,
1688                            CLOSURE_REMAINING_INITIALIZER -
1689                            CLOSURE_RUNNING +
1690                            CLOSURE_DESTRUCTOR);
1691         } else {
1692                 atomic_set(&dio->cl.remaining,
1693                            CLOSURE_REMAINING_INITIALIZER + 1);
1694         }
1695
1696         dio->req        = req;
1697         dio->ret        = ret;
1698
1699         goto start;
1700         while (iter->count) {
1701                 bio = bio_alloc_bioset(GFP_KERNEL,
1702                                        iov_iter_npages(iter, BIO_MAX_PAGES),
1703                                        &c->bio_read);
1704                 bio->bi_end_io          = bch2_direct_IO_read_split_endio;
1705 start:
1706                 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1707                 bio->bi_iter.bi_sector  = offset >> 9;
1708                 bio->bi_private         = dio;
1709
1710                 ret = bio_iov_iter_get_pages(bio, iter);
1711                 if (ret < 0) {
1712                         /* XXX: fault inject this path */
1713                         bio->bi_status = BLK_STS_RESOURCE;
1714                         bio_endio(bio);
1715                         break;
1716                 }
1717
1718                 offset += bio->bi_iter.bi_size;
1719                 bio_set_pages_dirty(bio);
1720
1721                 if (iter->count)
1722                         closure_get(&dio->cl);
1723
1724                 bch2_read(c, rbio_init(bio, opts), inode->v.i_ino);
1725         }
1726
1727         iter->count += shorten;
1728
1729         if (sync) {
1730                 closure_sync(&dio->cl);
1731                 closure_debug_destroy(&dio->cl);
1732                 ret = dio->ret;
1733                 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1734                 return ret;
1735         } else {
1736                 return -EIOCBQUEUED;
1737         }
1738 }
1739
1740 /* O_DIRECT writes */
1741
1742 static void bch2_dio_write_loop_async(struct closure *);
1743
1744 static long bch2_dio_write_loop(struct dio_write *dio)
1745 {
1746         struct kiocb *req = dio->req;
1747         struct address_space *mapping = req->ki_filp->f_mapping;
1748         struct bch_inode_info *inode = dio->iop.inode;
1749         struct bio *bio = &dio->iop.op.wbio.bio;
1750         struct bio_vec *bv;
1751         bool sync;
1752         long ret;
1753         int i;
1754
1755         if (dio->loop)
1756                 goto loop;
1757
1758         inode_dio_begin(&inode->v);
1759         __pagecache_block_get(&mapping->add_lock);
1760
1761         /* Write and invalidate pagecache range that we're writing to: */
1762         ret = write_invalidate_inode_pages_range(mapping, req->ki_pos,
1763                                 req->ki_pos + iov_iter_count(&dio->iter) - 1);
1764         if (unlikely(ret))
1765                 goto err;
1766
1767         while (1) {
1768                 BUG_ON(current->pagecache_lock);
1769                 current->pagecache_lock = &mapping->add_lock;
1770                 if (current != dio->task)
1771                         use_mm(dio->task->mm);
1772
1773                 ret = bio_iov_iter_get_pages(bio, &dio->iter);
1774
1775                 if (current != dio->task)
1776                         unuse_mm(dio->task->mm);
1777                 current->pagecache_lock = NULL;
1778
1779                 if (unlikely(ret < 0))
1780                         goto err;
1781
1782                 /* gup might have faulted pages back in: */
1783                 ret = write_invalidate_inode_pages_range(mapping,
1784                                 req->ki_pos + (dio->iop.op.written << 9),
1785                                 req->ki_pos + iov_iter_count(&dio->iter) - 1);
1786                 if (unlikely(ret))
1787                         goto err;
1788
1789                 dio->iop.op.pos = POS(inode->v.i_ino,
1790                                 (req->ki_pos >> 9) + dio->iop.op.written);
1791
1792                 task_io_account_write(bio->bi_iter.bi_size);
1793
1794                 closure_call(&dio->iop.op.cl, bch2_write, NULL, &dio->cl);
1795
1796                 if (!dio->sync && !dio->loop && dio->iter.count) {
1797                         struct iovec *iov = dio->inline_vecs;
1798
1799                         if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1800                                 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
1801                                               GFP_KERNEL);
1802                                 if (unlikely(!iov)) {
1803                                         dio->iop.op.error = -ENOMEM;
1804                                         goto err_wait_io;
1805                                 }
1806
1807                                 dio->free_iov = true;
1808                         }
1809
1810                         memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
1811                         dio->iter.iov = iov;
1812                 }
1813 err_wait_io:
1814                 dio->loop = true;
1815
1816                 if (!dio->sync) {
1817                         continue_at(&dio->cl, bch2_dio_write_loop_async, NULL);
1818                         return -EIOCBQUEUED;
1819                 }
1820
1821                 closure_sync(&dio->cl);
1822 loop:
1823                 bio_for_each_segment_all(bv, bio, i)
1824                         put_page(bv->bv_page);
1825                 if (!dio->iter.count || dio->iop.op.error)
1826                         break;
1827                 bio_reset(bio);
1828         }
1829
1830         ret = dio->iop.op.error ?: ((long) dio->iop.op.written << 9);
1831 err:
1832         __pagecache_block_put(&mapping->add_lock);
1833         bch2_disk_reservation_put(dio->iop.op.c, &dio->iop.op.res);
1834         bch2_quota_reservation_put(dio->iop.op.c, inode, &dio->quota_res);
1835
1836         if (dio->free_iov)
1837                 kfree(dio->iter.iov);
1838
1839         closure_debug_destroy(&dio->cl);
1840
1841         sync = dio->sync;
1842         bio_put(bio);
1843
1844         /* inode->i_dio_count is our ref on inode and thus bch_fs */
1845         inode_dio_end(&inode->v);
1846
1847         if (!sync) {
1848                 req->ki_complete(req, ret, 0);
1849                 ret = -EIOCBQUEUED;
1850         }
1851         return ret;
1852 }
1853
1854 static void bch2_dio_write_loop_async(struct closure *cl)
1855 {
1856         struct dio_write *dio = container_of(cl, struct dio_write, cl);
1857
1858         bch2_dio_write_loop(dio);
1859 }
1860
1861 static int bch2_direct_IO_write(struct kiocb *req,
1862                                 struct iov_iter *iter,
1863                                 bool swap)
1864 {
1865         struct file *file = req->ki_filp;
1866         struct bch_inode_info *inode = file_bch_inode(file);
1867         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1868         struct dio_write *dio;
1869         struct bio *bio;
1870         loff_t offset = req->ki_pos;
1871         ssize_t ret;
1872
1873         lockdep_assert_held(&inode->v.i_rwsem);
1874
1875         if (unlikely(!iter->count))
1876                 return 0;
1877
1878         if (unlikely((offset|iter->count) & (block_bytes(c) - 1)))
1879                 return -EINVAL;
1880
1881         bio = bio_alloc_bioset(GFP_KERNEL,
1882                                iov_iter_npages(iter, BIO_MAX_PAGES),
1883                                &c->dio_write_bioset);
1884         dio = container_of(bio, struct dio_write, iop.op.wbio.bio);
1885         closure_init(&dio->cl, NULL);
1886         dio->req                = req;
1887         dio->task               = current;
1888         dio->loop               = false;
1889         dio->sync               = is_sync_kiocb(req) ||
1890                 offset + iter->count > inode->v.i_size;
1891         dio->free_iov           = false;
1892         dio->quota_res.sectors  = 0;
1893         dio->iter               = *iter;
1894         bch2_fswrite_op_init(&dio->iop, c, inode, io_opts(c, inode), true);
1895         dio->iop.op.write_point = writepoint_hashed((unsigned long) dio->task);
1896         dio->iop.op.flags |= BCH_WRITE_NOPUT_RESERVATION;
1897
1898         if ((req->ki_flags & IOCB_DSYNC) &&
1899             !c->opts.journal_flush_disabled)
1900                 dio->iop.op.flags |= BCH_WRITE_FLUSH;
1901
1902         ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
1903                                          iter->count >> 9, true);
1904         if (unlikely(ret))
1905                 goto err;
1906
1907         ret = bch2_disk_reservation_get(c, &dio->iop.op.res, iter->count >> 9,
1908                                         dio->iop.op.opts.data_replicas, 0);
1909         if (unlikely(ret)) {
1910                 if (bch2_check_range_allocated(c, POS(inode->v.i_ino,
1911                                                       offset >> 9),
1912                                                iter->count >> 9))
1913                         goto err;
1914
1915                 dio->iop.unalloc = true;
1916         }
1917
1918         dio->iop.op.nr_replicas = dio->iop.op.res.nr_replicas;
1919
1920         return bch2_dio_write_loop(dio);
1921 err:
1922         bch2_disk_reservation_put(c, &dio->iop.op.res);
1923         bch2_quota_reservation_put(c, inode, &dio->quota_res);
1924         closure_debug_destroy(&dio->cl);
1925         bio_put(bio);
1926         return ret;
1927 }
1928
1929 ssize_t bch2_direct_IO(struct kiocb *req, struct iov_iter *iter)
1930 {
1931         struct blk_plug plug;
1932         ssize_t ret;
1933
1934         blk_start_plug(&plug);
1935         ret = iov_iter_rw(iter) == WRITE
1936                 ? bch2_direct_IO_write(req, iter, false)
1937                 : bch2_direct_IO_read(req, iter);
1938         blk_finish_plug(&plug);
1939
1940         return ret;
1941 }
1942
1943 static ssize_t
1944 bch2_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1945 {
1946         return bch2_direct_IO_write(iocb, iter, true);
1947 }
1948
1949 static ssize_t __bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1950 {
1951         struct file *file = iocb->ki_filp;
1952         struct bch_inode_info *inode = file_bch_inode(file);
1953         ssize_t ret;
1954
1955         /* We can write back this queue in page reclaim */
1956         current->backing_dev_info = inode_to_bdi(&inode->v);
1957         ret = file_remove_privs(file);
1958         if (ret)
1959                 goto out;
1960
1961         ret = file_update_time(file);
1962         if (ret)
1963                 goto out;
1964
1965         ret = iocb->ki_flags & IOCB_DIRECT
1966                 ? bch2_direct_write(iocb, from)
1967                 : bch2_buffered_write(iocb, from);
1968
1969         if (likely(ret > 0))
1970                 iocb->ki_pos += ret;
1971 out:
1972         current->backing_dev_info = NULL;
1973         return ret;
1974 }
1975
1976 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1977 {
1978         struct bch_inode_info *inode = file_bch_inode(iocb->ki_filp);
1979         bool direct = iocb->ki_flags & IOCB_DIRECT;
1980         ssize_t ret;
1981
1982         inode_lock(&inode->v);
1983         ret = generic_write_checks(iocb, from);
1984         if (ret > 0)
1985                 ret = __bch2_write_iter(iocb, from);
1986         inode_unlock(&inode->v);
1987
1988         if (ret > 0 && !direct)
1989                 ret = generic_write_sync(iocb, ret);
1990
1991         return ret;
1992 }
1993
1994 /* fsync: */
1995
1996 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1997 {
1998         struct bch_inode_info *inode = file_bch_inode(file);
1999         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2000         int ret, ret2;
2001
2002         ret = file_write_and_wait_range(file, start, end);
2003         if (ret)
2004                 return ret;
2005
2006         if (datasync && !(inode->v.i_state & I_DIRTY_DATASYNC))
2007                 goto out;
2008
2009         ret = sync_inode_metadata(&inode->v, 1);
2010         if (ret)
2011                 return ret;
2012 out:
2013         if (c->opts.journal_flush_disabled)
2014                 return 0;
2015
2016         ret = bch2_journal_flush_seq(&c->journal, inode->ei_journal_seq);
2017         ret2 = file_check_and_advance_wb_err(file);
2018
2019         return ret ?: ret2;
2020 }
2021
2022 /* truncate: */
2023
2024 static int __bch2_fpunch(struct bch_fs *c, struct bch_inode_info *inode,
2025                          u64 start_offset, u64 end_offset, u64 *journal_seq)
2026 {
2027         struct bpos start       = POS(inode->v.i_ino, start_offset);
2028         struct bpos end         = POS(inode->v.i_ino, end_offset);
2029         unsigned max_sectors    = KEY_SIZE_MAX & (~0 << c->block_bits);
2030         struct btree_trans trans;
2031         struct btree_iter *iter;
2032         struct bkey_s_c k;
2033         int ret = 0;
2034
2035         bch2_trans_init(&trans, c);
2036         bch2_trans_preload_iters(&trans);
2037
2038         iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, start,
2039                                    BTREE_ITER_INTENT);
2040
2041         while ((k = bch2_btree_iter_peek(iter)).k &&
2042                !(ret = btree_iter_err(k)) &&
2043                bkey_cmp(iter->pos, end) < 0) {
2044                 struct disk_reservation disk_res =
2045                         bch2_disk_reservation_init(c, 0);
2046                 struct bkey_i delete;
2047
2048                 bkey_init(&delete.k);
2049                 delete.k.p = iter->pos;
2050
2051                 /* create the biggest key we can */
2052                 bch2_key_resize(&delete.k, max_sectors);
2053                 bch2_cut_back(end, &delete.k);
2054
2055                 ret = bch2_extent_update(&trans, inode,
2056                                 &disk_res, NULL, iter, &delete,
2057                                 0, true, true, NULL);
2058                 bch2_disk_reservation_put(c, &disk_res);
2059
2060                 if (ret == -EINTR)
2061                         ret = 0;
2062                 if (ret)
2063                         break;
2064
2065                 bch2_btree_iter_cond_resched(iter);
2066         }
2067
2068         bch2_trans_exit(&trans);
2069
2070         return ret;
2071 }
2072
2073 static inline int range_has_data(struct bch_fs *c,
2074                                   struct bpos start,
2075                                   struct bpos end)
2076 {
2077
2078         struct btree_iter iter;
2079         struct bkey_s_c k;
2080         int ret = 0;
2081
2082         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2083                            start, 0, k) {
2084                 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2085                         break;
2086
2087                 if (bkey_extent_is_data(k.k)) {
2088                         ret = 1;
2089                         break;
2090                 }
2091         }
2092
2093         return bch2_btree_iter_unlock(&iter) ?: ret;
2094 }
2095
2096 static int __bch2_truncate_page(struct bch_inode_info *inode,
2097                                 pgoff_t index, loff_t start, loff_t end)
2098 {
2099         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2100         struct address_space *mapping = inode->v.i_mapping;
2101         unsigned start_offset = start & (PAGE_SIZE - 1);
2102         unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2103         struct page *page;
2104         int ret = 0;
2105
2106         /* Page boundary? Nothing to do */
2107         if (!((index == start >> PAGE_SHIFT && start_offset) ||
2108               (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2109                 return 0;
2110
2111         /* Above i_size? */
2112         if (index << PAGE_SHIFT >= inode->v.i_size)
2113                 return 0;
2114
2115         page = find_lock_page(mapping, index);
2116         if (!page) {
2117                 /*
2118                  * XXX: we're doing two index lookups when we end up reading the
2119                  * page
2120                  */
2121                 ret = range_has_data(c,
2122                                 POS(inode->v.i_ino, index << PAGE_SECTOR_SHIFT),
2123                                 POS(inode->v.i_ino, (index + 1) << PAGE_SECTOR_SHIFT));
2124                 if (ret <= 0)
2125                         return ret;
2126
2127                 page = find_or_create_page(mapping, index, GFP_KERNEL);
2128                 if (unlikely(!page)) {
2129                         ret = -ENOMEM;
2130                         goto out;
2131                 }
2132         }
2133
2134         if (!PageUptodate(page)) {
2135                 ret = bch2_read_single_page(page, mapping);
2136                 if (ret)
2137                         goto unlock;
2138         }
2139
2140         /*
2141          * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2142          *
2143          * XXX: because we aren't currently tracking whether the page has actual
2144          * data in it (vs. just 0s, or only partially written) this wrong. ick.
2145          */
2146         ret = bch2_get_page_reservation(c, inode, page, false);
2147         BUG_ON(ret);
2148
2149         if (index == start >> PAGE_SHIFT &&
2150             index == end >> PAGE_SHIFT)
2151                 zero_user_segment(page, start_offset, end_offset);
2152         else if (index == start >> PAGE_SHIFT)
2153                 zero_user_segment(page, start_offset, PAGE_SIZE);
2154         else if (index == end >> PAGE_SHIFT)
2155                 zero_user_segment(page, 0, end_offset);
2156
2157         if (!PageDirty(page))
2158                 set_page_dirty(page);
2159 unlock:
2160         unlock_page(page);
2161         put_page(page);
2162 out:
2163         return ret;
2164 }
2165
2166 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2167 {
2168         return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2169                                     from, from + PAGE_SIZE);
2170 }
2171
2172 static int bch2_extend(struct bch_inode_info *inode, struct iattr *iattr)
2173 {
2174         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2175         struct address_space *mapping = inode->v.i_mapping;
2176         int ret;
2177
2178         ret = filemap_write_and_wait_range(mapping,
2179                         inode->ei_inode.bi_size, S64_MAX);
2180         if (ret)
2181                 return ret;
2182
2183         truncate_setsize(&inode->v, iattr->ia_size);
2184         setattr_copy(&inode->v, iattr);
2185
2186         mutex_lock(&inode->ei_update_lock);
2187         ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2188                                     ATTR_MTIME|ATTR_CTIME);
2189         mutex_unlock(&inode->ei_update_lock);
2190
2191         return ret;
2192 }
2193
2194 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2195                                    struct bch_inode_unpacked *bi,
2196                                    void *p)
2197 {
2198         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2199
2200         bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2201         bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2202         return 0;
2203 }
2204
2205 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2206                                   struct bch_inode_unpacked *bi, void *p)
2207 {
2208         u64 *new_i_size = p;
2209
2210         bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2211         bi->bi_size = *new_i_size;
2212         return 0;
2213 }
2214
2215 int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
2216 {
2217         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2218         struct address_space *mapping = inode->v.i_mapping;
2219         u64 new_i_size = iattr->ia_size;
2220         bool shrink;
2221         int ret = 0;
2222
2223         inode_dio_wait(&inode->v);
2224         pagecache_block_get(&mapping->add_lock);
2225
2226         BUG_ON(inode->v.i_size < inode->ei_inode.bi_size);
2227
2228         shrink = iattr->ia_size <= inode->v.i_size;
2229
2230         if (!shrink) {
2231                 ret = bch2_extend(inode, iattr);
2232                 goto err;
2233         }
2234
2235         ret = bch2_truncate_page(inode, iattr->ia_size);
2236         if (unlikely(ret))
2237                 goto err;
2238
2239         if (iattr->ia_size > inode->ei_inode.bi_size)
2240                 ret = filemap_write_and_wait_range(mapping,
2241                                 inode->ei_inode.bi_size,
2242                                 iattr->ia_size - 1);
2243         else if (iattr->ia_size & (PAGE_SIZE - 1))
2244                 ret = filemap_write_and_wait_range(mapping,
2245                                 round_down(iattr->ia_size, PAGE_SIZE),
2246                                 iattr->ia_size - 1);
2247         if (ret)
2248                 goto err;
2249
2250         mutex_lock(&inode->ei_update_lock);
2251         ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2252                                &new_i_size, 0);
2253         mutex_unlock(&inode->ei_update_lock);
2254
2255         if (unlikely(ret))
2256                 goto err;
2257
2258         truncate_setsize(&inode->v, iattr->ia_size);
2259
2260         /*
2261          * XXX: need a comment explaining why PAGE_SIZE and not block_bytes()
2262          * here:
2263          */
2264         ret = __bch2_fpunch(c, inode,
2265                         round_up(iattr->ia_size, PAGE_SIZE) >> 9,
2266                         U64_MAX, &inode->ei_journal_seq);
2267         if (unlikely(ret))
2268                 goto err;
2269
2270         setattr_copy(&inode->v, iattr);
2271
2272         mutex_lock(&inode->ei_update_lock);
2273         ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL,
2274                                ATTR_MTIME|ATTR_CTIME);
2275         mutex_unlock(&inode->ei_update_lock);
2276 err:
2277         pagecache_block_put(&mapping->add_lock);
2278         return ret;
2279 }
2280
2281 /* fallocate: */
2282
2283 static long bch2_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2284 {
2285         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2286         struct address_space *mapping = inode->v.i_mapping;
2287         u64 discard_start = round_up(offset, PAGE_SIZE) >> 9;
2288         u64 discard_end = round_down(offset + len, PAGE_SIZE) >> 9;
2289         int ret = 0;
2290
2291         inode_lock(&inode->v);
2292         inode_dio_wait(&inode->v);
2293         pagecache_block_get(&mapping->add_lock);
2294
2295         ret = __bch2_truncate_page(inode,
2296                                    offset >> PAGE_SHIFT,
2297                                    offset, offset + len);
2298         if (unlikely(ret))
2299                 goto err;
2300
2301         if (offset >> PAGE_SHIFT !=
2302             (offset + len) >> PAGE_SHIFT) {
2303                 ret = __bch2_truncate_page(inode,
2304                                            (offset + len) >> PAGE_SHIFT,
2305                                            offset, offset + len);
2306                 if (unlikely(ret))
2307                         goto err;
2308         }
2309
2310         truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2311
2312         if (discard_start < discard_end)
2313                 ret = __bch2_fpunch(c, inode, discard_start, discard_end,
2314                                     &inode->ei_journal_seq);
2315 err:
2316         pagecache_block_put(&mapping->add_lock);
2317         inode_unlock(&inode->v);
2318
2319         return ret;
2320 }
2321
2322 static long bch2_fcollapse(struct bch_inode_info *inode,
2323                            loff_t offset, loff_t len)
2324 {
2325         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2326         struct address_space *mapping = inode->v.i_mapping;
2327         struct btree_trans trans;
2328         struct btree_iter *src, *dst;
2329         BKEY_PADDED(k) copy;
2330         struct bkey_s_c k;
2331         loff_t new_size;
2332         int ret;
2333
2334         if ((offset | len) & (block_bytes(c) - 1))
2335                 return -EINVAL;
2336
2337         bch2_trans_init(&trans, c);
2338         bch2_trans_preload_iters(&trans);
2339
2340         /*
2341          * We need i_mutex to keep the page cache consistent with the extents
2342          * btree, and the btree consistent with i_size - we don't need outside
2343          * locking for the extents btree itself, because we're using linked
2344          * iterators
2345          */
2346         inode_lock(&inode->v);
2347         inode_dio_wait(&inode->v);
2348         pagecache_block_get(&mapping->add_lock);
2349
2350         ret = -EINVAL;
2351         if (offset + len >= inode->v.i_size)
2352                 goto err;
2353
2354         if (inode->v.i_size < len)
2355                 goto err;
2356
2357         new_size = inode->v.i_size - len;
2358
2359         ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2360         if (ret)
2361                 goto err;
2362
2363         dst = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2364                         POS(inode->v.i_ino, offset >> 9),
2365                         BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2366         BUG_ON(IS_ERR_OR_NULL(dst));
2367
2368         src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2369                         POS_MIN, BTREE_ITER_SLOTS);
2370         BUG_ON(IS_ERR_OR_NULL(src));
2371
2372         while (bkey_cmp(dst->pos,
2373                         POS(inode->v.i_ino,
2374                             round_up(new_size, PAGE_SIZE) >> 9)) < 0) {
2375                 struct disk_reservation disk_res;
2376
2377                 ret = bch2_btree_iter_traverse(dst);
2378                 if (ret)
2379                         goto btree_iter_err;
2380
2381                 bch2_btree_iter_set_pos(src,
2382                         POS(dst->pos.inode, dst->pos.offset + (len >> 9)));
2383
2384                 k = bch2_btree_iter_peek_slot(src);
2385                 if ((ret = btree_iter_err(k)))
2386                         goto btree_iter_err;
2387
2388                 bkey_reassemble(&copy.k, k);
2389
2390                 bch2_cut_front(src->pos, &copy.k);
2391                 copy.k.k.p.offset -= len >> 9;
2392
2393                 bch2_extent_trim_atomic(&copy.k, dst);
2394
2395                 BUG_ON(bkey_cmp(dst->pos, bkey_start_pos(&copy.k.k)));
2396
2397                 ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size,
2398                                 bch2_extent_nr_dirty_ptrs(bkey_i_to_s_c(&copy.k)),
2399                                 BCH_DISK_RESERVATION_NOFAIL);
2400                 BUG_ON(ret);
2401
2402                 ret = bch2_extent_update(&trans, inode,
2403                                 &disk_res, NULL,
2404                                 dst, &copy.k,
2405                                 0, true, true, NULL);
2406                 bch2_disk_reservation_put(c, &disk_res);
2407 btree_iter_err:
2408                 if (ret == -EINTR)
2409                         ret = 0;
2410                 if (ret)
2411                         goto err;
2412                 /*
2413                  * XXX: if we error here we've left data with multiple
2414                  * pointers... which isn't a _super_ serious problem...
2415                  */
2416
2417                 bch2_btree_iter_cond_resched(src);
2418         }
2419         bch2_trans_unlock(&trans);
2420
2421         ret = __bch2_fpunch(c, inode,
2422                         round_up(new_size, block_bytes(c)) >> 9,
2423                         U64_MAX, &inode->ei_journal_seq);
2424         if (ret)
2425                 goto err;
2426
2427         i_size_write(&inode->v, new_size);
2428         mutex_lock(&inode->ei_update_lock);
2429         ret = bch2_write_inode_size(c, inode, new_size,
2430                                     ATTR_MTIME|ATTR_CTIME);
2431         mutex_unlock(&inode->ei_update_lock);
2432 err:
2433         bch2_trans_exit(&trans);
2434         pagecache_block_put(&mapping->add_lock);
2435         inode_unlock(&inode->v);
2436         return ret;
2437 }
2438
2439 static long bch2_fallocate(struct bch_inode_info *inode, int mode,
2440                            loff_t offset, loff_t len)
2441 {
2442         struct address_space *mapping = inode->v.i_mapping;
2443         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2444         struct btree_trans trans;
2445         struct btree_iter *iter;
2446         struct bpos end_pos;
2447         loff_t block_start, block_end;
2448         loff_t end = offset + len;
2449         unsigned sectors;
2450         unsigned replicas = io_opts(c, inode).data_replicas;
2451         int ret;
2452
2453         bch2_trans_init(&trans, c);
2454         bch2_trans_preload_iters(&trans);
2455
2456         inode_lock(&inode->v);
2457         inode_dio_wait(&inode->v);
2458         pagecache_block_get(&mapping->add_lock);
2459
2460         if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
2461                 ret = inode_newsize_ok(&inode->v, end);
2462                 if (ret)
2463                         goto err;
2464         }
2465
2466         if (mode & FALLOC_FL_ZERO_RANGE) {
2467                 ret = __bch2_truncate_page(inode,
2468                                            offset >> PAGE_SHIFT,
2469                                            offset, end);
2470
2471                 if (!ret &&
2472                     offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
2473                         ret = __bch2_truncate_page(inode,
2474                                                    end >> PAGE_SHIFT,
2475                                                    offset, end);
2476
2477                 if (unlikely(ret))
2478                         goto err;
2479
2480                 truncate_pagecache_range(&inode->v, offset, end - 1);
2481
2482                 block_start     = round_up(offset, PAGE_SIZE);
2483                 block_end       = round_down(end, PAGE_SIZE);
2484         } else {
2485                 block_start     = round_down(offset, PAGE_SIZE);
2486                 block_end       = round_up(end, PAGE_SIZE);
2487         }
2488
2489         iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2490                         POS(inode->v.i_ino, block_start >> 9),
2491                         BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2492         end_pos = POS(inode->v.i_ino, block_end >> 9);
2493
2494         while (bkey_cmp(iter->pos, end_pos) < 0) {
2495                 struct disk_reservation disk_res = { 0 };
2496                 struct quota_res quota_res = { 0 };
2497                 struct bkey_i_reservation reservation;
2498                 struct bkey_s_c k;
2499
2500                 k = bch2_btree_iter_peek_slot(iter);
2501                 if ((ret = btree_iter_err(k)))
2502                         goto btree_iter_err;
2503
2504                 /* already reserved */
2505                 if (k.k->type == BCH_RESERVATION &&
2506                     bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2507                         bch2_btree_iter_next_slot(iter);
2508                         continue;
2509                 }
2510
2511                 if (bkey_extent_is_data(k.k) &&
2512                     !(mode & FALLOC_FL_ZERO_RANGE)) {
2513                         bch2_btree_iter_next_slot(iter);
2514                         continue;
2515                 }
2516
2517                 bkey_reservation_init(&reservation.k_i);
2518                 reservation.k.type      = BCH_RESERVATION;
2519                 reservation.k.p         = k.k->p;
2520                 reservation.k.size      = k.k->size;
2521
2522                 bch2_cut_front(iter->pos, &reservation.k_i);
2523                 bch2_cut_back(end_pos, &reservation.k);
2524
2525                 sectors = reservation.k.size;
2526                 reservation.v.nr_replicas = bch2_extent_nr_dirty_ptrs(k);
2527
2528                 if (!bkey_extent_is_allocation(k.k)) {
2529                         ret = bch2_quota_reservation_add(c, inode,
2530                                         &quota_res,
2531                                         sectors, true);
2532                         if (unlikely(ret))
2533                                 goto btree_iter_err;
2534                 }
2535
2536                 if (reservation.v.nr_replicas < replicas ||
2537                     bch2_extent_is_compressed(k)) {
2538                         ret = bch2_disk_reservation_get(c, &disk_res, sectors,
2539                                                         replicas, 0);
2540                         if (unlikely(ret))
2541                                 goto btree_iter_err;
2542
2543                         reservation.v.nr_replicas = disk_res.nr_replicas;
2544                 }
2545
2546                 ret = bch2_extent_update(&trans, inode,
2547                                 &disk_res, &quota_res,
2548                                 iter, &reservation.k_i,
2549                                 0, true, true, NULL);
2550
2551                 bch2_quota_reservation_put(c, inode, &quota_res);
2552                 bch2_disk_reservation_put(c, &disk_res);
2553 btree_iter_err:
2554                 if (ret == -EINTR)
2555                         ret = 0;
2556                 if (ret)
2557                         goto err;
2558         }
2559         bch2_trans_unlock(&trans);
2560
2561         if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2562             end > inode->v.i_size) {
2563                 i_size_write(&inode->v, end);
2564
2565                 mutex_lock(&inode->ei_update_lock);
2566                 ret = bch2_write_inode_size(c, inode, inode->v.i_size, 0);
2567                 mutex_unlock(&inode->ei_update_lock);
2568         }
2569
2570         /* blech */
2571         if ((mode & FALLOC_FL_KEEP_SIZE) &&
2572             (mode & FALLOC_FL_ZERO_RANGE) &&
2573             inode->ei_inode.bi_size != inode->v.i_size) {
2574                 /* sync appends.. */
2575                 ret = filemap_write_and_wait_range(mapping,
2576                                         inode->ei_inode.bi_size, S64_MAX);
2577                 if (ret)
2578                         goto err;
2579
2580                 if (inode->ei_inode.bi_size != inode->v.i_size) {
2581                         mutex_lock(&inode->ei_update_lock);
2582                         ret = bch2_write_inode_size(c, inode,
2583                                                     inode->v.i_size, 0);
2584                         mutex_unlock(&inode->ei_update_lock);
2585                 }
2586         }
2587 err:
2588         bch2_trans_exit(&trans);
2589         pagecache_block_put(&mapping->add_lock);
2590         inode_unlock(&inode->v);
2591         return ret;
2592 }
2593
2594 long bch2_fallocate_dispatch(struct file *file, int mode,
2595                              loff_t offset, loff_t len)
2596 {
2597         struct bch_inode_info *inode = file_bch_inode(file);
2598
2599         if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2600                 return bch2_fallocate(inode, mode, offset, len);
2601
2602         if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2603                 return bch2_fpunch(inode, offset, len);
2604
2605         if (mode == FALLOC_FL_COLLAPSE_RANGE)
2606                 return bch2_fcollapse(inode, offset, len);
2607
2608         return -EOPNOTSUPP;
2609 }
2610
2611 /* fseek: */
2612
2613 static bool page_is_data(struct page *page)
2614 {
2615         /* XXX: should only have to check PageDirty */
2616         return PagePrivate(page) &&
2617                 (page_state(page)->sectors ||
2618                  page_state(page)->dirty_sectors);
2619 }
2620
2621 static loff_t bch2_next_pagecache_data(struct inode *vinode,
2622                                        loff_t start_offset,
2623                                        loff_t end_offset)
2624 {
2625         struct address_space *mapping = vinode->i_mapping;
2626         struct page *page;
2627         pgoff_t index;
2628
2629         for (index = start_offset >> PAGE_SHIFT;
2630              index < end_offset >> PAGE_SHIFT;
2631              index++) {
2632                 if (find_get_pages(mapping, &index, 1, &page)) {
2633                         lock_page(page);
2634
2635                         if (page_is_data(page))
2636                                 end_offset =
2637                                         min(end_offset,
2638                                         max(start_offset,
2639                                             ((loff_t) index) << PAGE_SHIFT));
2640                         unlock_page(page);
2641                         put_page(page);
2642                 } else {
2643                         break;
2644                 }
2645         }
2646
2647         return end_offset;
2648 }
2649
2650 static loff_t bch2_seek_data(struct file *file, u64 offset)
2651 {
2652         struct bch_inode_info *inode = file_bch_inode(file);
2653         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2654         struct btree_iter iter;
2655         struct bkey_s_c k;
2656         u64 isize, next_data = MAX_LFS_FILESIZE;
2657         int ret;
2658
2659         isize = i_size_read(&inode->v);
2660         if (offset >= isize)
2661                 return -ENXIO;
2662
2663         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2664                            POS(inode->v.i_ino, offset >> 9), 0, k) {
2665                 if (k.k->p.inode != inode->v.i_ino) {
2666                         break;
2667                 } else if (bkey_extent_is_data(k.k)) {
2668                         next_data = max(offset, bkey_start_offset(k.k) << 9);
2669                         break;
2670                 } else if (k.k->p.offset >> 9 > isize)
2671                         break;
2672         }
2673
2674         ret = bch2_btree_iter_unlock(&iter);
2675         if (ret)
2676                 return ret;
2677
2678         if (next_data > offset)
2679                 next_data = bch2_next_pagecache_data(&inode->v,
2680                                                      offset, next_data);
2681
2682         if (next_data > isize)
2683                 return -ENXIO;
2684
2685         return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
2686 }
2687
2688 static bool page_slot_is_data(struct address_space *mapping, pgoff_t index)
2689 {
2690         struct page *page;
2691         bool ret;
2692
2693         page = find_lock_entry(mapping, index);
2694         if (!page || radix_tree_exception(page))
2695                 return false;
2696
2697         ret = page_is_data(page);
2698         unlock_page(page);
2699
2700         return ret;
2701 }
2702
2703 static loff_t bch2_next_pagecache_hole(struct inode *vinode,
2704                                        loff_t start_offset,
2705                                        loff_t end_offset)
2706 {
2707         struct address_space *mapping = vinode->i_mapping;
2708         pgoff_t index;
2709
2710         for (index = start_offset >> PAGE_SHIFT;
2711              index < end_offset >> PAGE_SHIFT;
2712              index++)
2713                 if (!page_slot_is_data(mapping, index))
2714                         end_offset = max(start_offset,
2715                                          ((loff_t) index) << PAGE_SHIFT);
2716
2717         return end_offset;
2718 }
2719
2720 static loff_t bch2_seek_hole(struct file *file, u64 offset)
2721 {
2722         struct bch_inode_info *inode = file_bch_inode(file);
2723         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2724         struct btree_iter iter;
2725         struct bkey_s_c k;
2726         u64 isize, next_hole = MAX_LFS_FILESIZE;
2727         int ret;
2728
2729         isize = i_size_read(&inode->v);
2730         if (offset >= isize)
2731                 return -ENXIO;
2732
2733         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2734                            POS(inode->v.i_ino, offset >> 9),
2735                            BTREE_ITER_SLOTS, k) {
2736                 if (k.k->p.inode != inode->v.i_ino) {
2737                         next_hole = bch2_next_pagecache_hole(&inode->v,
2738                                         offset, MAX_LFS_FILESIZE);
2739                         break;
2740                 } else if (!bkey_extent_is_data(k.k)) {
2741                         next_hole = bch2_next_pagecache_hole(&inode->v,
2742                                         max(offset, bkey_start_offset(k.k) << 9),
2743                                         k.k->p.offset << 9);
2744
2745                         if (next_hole < k.k->p.offset << 9)
2746                                 break;
2747                 } else {
2748                         offset = max(offset, bkey_start_offset(k.k) << 9);
2749                 }
2750         }
2751
2752         ret = bch2_btree_iter_unlock(&iter);
2753         if (ret)
2754                 return ret;
2755
2756         if (next_hole > isize)
2757                 next_hole = isize;
2758
2759         return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
2760 }
2761
2762 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
2763 {
2764         switch (whence) {
2765         case SEEK_SET:
2766         case SEEK_CUR:
2767         case SEEK_END:
2768                 return generic_file_llseek(file, offset, whence);
2769         case SEEK_DATA:
2770                 return bch2_seek_data(file, offset);
2771         case SEEK_HOLE:
2772                 return bch2_seek_hole(file, offset);
2773         }
2774
2775         return -EINVAL;
2776 }
2777
2778 void bch2_fs_fsio_exit(struct bch_fs *c)
2779 {
2780         bioset_exit(&c->dio_write_bioset);
2781         bioset_exit(&c->dio_read_bioset);
2782         bioset_exit(&c->writepage_bioset);
2783 }
2784
2785 int bch2_fs_fsio_init(struct bch_fs *c)
2786 {
2787         int ret = 0;
2788
2789         pr_verbose_init(c->opts, "");
2790
2791         if (bioset_init(&c->writepage_bioset,
2792                         4, offsetof(struct bch_writepage_io, op.op.wbio.bio),
2793                         BIOSET_NEED_BVECS) ||
2794             bioset_init(&c->dio_read_bioset,
2795                         4, offsetof(struct dio_read, rbio.bio),
2796                         BIOSET_NEED_BVECS) ||
2797             bioset_init(&c->dio_write_bioset,
2798                         4, offsetof(struct dio_write, iop.op.wbio.bio),
2799                         BIOSET_NEED_BVECS))
2800                 ret = -ENOMEM;
2801
2802         pr_verbose_init(c->opts, "ret %i", ret);
2803         return ret;
2804 }
2805
2806 #endif /* NO_BCACHEFS_FS */