]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/fs-io.c
Update bcachefs sources to 9e7ae5219c bcachefs: Make write points more dynamic
[bcachefs-tools-debian] / libbcachefs / fs-io.c
1 #ifndef NO_BCACHEFS_FS
2
3 #include "bcachefs.h"
4 #include "btree_update.h"
5 #include "buckets.h"
6 #include "clock.h"
7 #include "error.h"
8 #include "fs.h"
9 #include "fs-io.h"
10 #include "fsck.h"
11 #include "inode.h"
12 #include "journal.h"
13 #include "io.h"
14 #include "keylist.h"
15
16 #include <linux/aio.h>
17 #include <linux/backing-dev.h>
18 #include <linux/falloc.h>
19 #include <linux/migrate.h>
20 #include <linux/mmu_context.h>
21 #include <linux/pagevec.h>
22 #include <linux/task_io_accounting_ops.h>
23 #include <linux/uio.h>
24 #include <linux/writeback.h>
25
26 #include <trace/events/bcachefs.h>
27 #include <trace/events/writeback.h>
28
29 struct bio_set *bch2_writepage_bioset;
30 struct bio_set *bch2_dio_read_bioset;
31 struct bio_set *bch2_dio_write_bioset;
32
33 /* pagecache_block must be held */
34 static int write_invalidate_inode_pages_range(struct address_space *mapping,
35                                               loff_t start, loff_t end)
36 {
37         int ret;
38
39         /*
40          * XXX: the way this is currently implemented, we can spin if a process
41          * is continually redirtying a specific page
42          */
43         do {
44                 if (!mapping->nrpages &&
45                     !mapping->nrexceptional)
46                         return 0;
47
48                 ret = filemap_write_and_wait_range(mapping, start, end);
49                 if (ret)
50                         break;
51
52                 if (!mapping->nrpages)
53                         return 0;
54
55                 ret = invalidate_inode_pages2_range(mapping,
56                                 start >> PAGE_SHIFT,
57                                 end >> PAGE_SHIFT);
58         } while (ret == -EBUSY);
59
60         return ret;
61 }
62
63 /* i_size updates: */
64
65 static int inode_set_size(struct bch_inode_info *inode,
66                           struct bch_inode_unpacked *bi,
67                           void *p)
68 {
69         loff_t *new_i_size = p;
70
71         lockdep_assert_held(&inode->ei_update_lock);
72
73         bi->bi_size = *new_i_size;
74
75         if (atomic_long_read(&inode->ei_size_dirty_count))
76                 bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
77         else
78                 bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
79
80         return 0;
81 }
82
83 static int __must_check bch2_write_inode_size(struct bch_fs *c,
84                                               struct bch_inode_info *inode,
85                                               loff_t new_size)
86 {
87         return __bch2_write_inode(c, inode, inode_set_size, &new_size);
88 }
89
90 static inline void i_size_dirty_put(struct bch_inode_info *inode)
91 {
92         atomic_long_dec_bug(&inode->ei_size_dirty_count);
93 }
94
95 static inline void i_size_dirty_get(struct bch_inode_info *inode)
96 {
97         lockdep_assert_held(&inode->v.i_rwsem);
98
99         atomic_long_inc(&inode->ei_size_dirty_count);
100 }
101
102 /* i_sectors accounting: */
103
104 static enum extent_insert_hook_ret
105 i_sectors_hook_fn(struct extent_insert_hook *hook,
106                   struct bpos committed_pos,
107                   struct bpos next_pos,
108                   struct bkey_s_c k,
109                   const struct bkey_i *insert)
110 {
111         struct i_sectors_hook *h = container_of(hook,
112                                 struct i_sectors_hook, hook);
113         s64 sectors = next_pos.offset - committed_pos.offset;
114         int sign = bkey_extent_is_allocation(&insert->k) -
115                 (k.k && bkey_extent_is_allocation(k.k));
116
117         EBUG_ON(!(h->inode->ei_flags & BCH_INODE_I_SECTORS_DIRTY));
118         EBUG_ON(!atomic_long_read(&h->inode->ei_sectors_dirty_count));
119
120         h->sectors += sectors * sign;
121
122         return BTREE_HOOK_DO_INSERT;
123 }
124
125 static int inode_set_i_sectors_dirty(struct bch_inode_info *inode,
126                                      struct bch_inode_unpacked *bi, void *p)
127 {
128         BUG_ON(bi->bi_flags & BCH_INODE_I_SECTORS_DIRTY);
129
130         bi->bi_flags |= BCH_INODE_I_SECTORS_DIRTY;
131         return 0;
132 }
133
134 static int inode_clear_i_sectors_dirty(struct bch_inode_info *inode,
135                                        struct bch_inode_unpacked *bi,
136                                        void *p)
137 {
138         BUG_ON(!(bi->bi_flags & BCH_INODE_I_SECTORS_DIRTY));
139
140         bi->bi_sectors  = atomic64_read(&inode->ei_sectors);
141         bi->bi_flags    &= ~BCH_INODE_I_SECTORS_DIRTY;
142         return 0;
143 }
144
145 static void i_sectors_dirty_put(struct bch_fs *c,
146                                 struct bch_inode_info *inode,
147                                 struct i_sectors_hook *h)
148 {
149         if (h->sectors) {
150                 spin_lock(&inode->v.i_lock);
151                 inode->v.i_blocks += h->sectors;
152                 spin_unlock(&inode->v.i_lock);
153
154                 atomic64_add(h->sectors, &inode->ei_sectors);
155                 EBUG_ON(atomic64_read(&inode->ei_sectors) < 0);
156         }
157
158         EBUG_ON(atomic_long_read(&inode->ei_sectors_dirty_count) <= 0);
159
160         mutex_lock(&inode->ei_update_lock);
161
162         if (atomic_long_dec_and_test(&inode->ei_sectors_dirty_count)) {
163                 int ret = __bch2_write_inode(c, inode,
164                                           inode_clear_i_sectors_dirty, NULL);
165
166                 ret = ret;
167         }
168
169         mutex_unlock(&inode->ei_update_lock);
170 }
171
172 static int __must_check i_sectors_dirty_get(struct bch_fs *c,
173                                             struct bch_inode_info *inode,
174                                             struct i_sectors_hook *h)
175 {
176         int ret = 0;
177
178         h->hook.fn      = i_sectors_hook_fn;
179         h->sectors      = 0;
180 #ifdef CONFIG_BCACHEFS_DEBUG
181         h->inode        = inode;
182 #endif
183
184         if (atomic_long_inc_not_zero(&inode->ei_sectors_dirty_count))
185                 return 0;
186
187         mutex_lock(&inode->ei_update_lock);
188
189         if (!(inode->ei_flags & BCH_INODE_I_SECTORS_DIRTY))
190                 ret = __bch2_write_inode(c, inode, inode_set_i_sectors_dirty,
191                                          NULL);
192
193         if (!ret)
194                 atomic_long_inc(&inode->ei_sectors_dirty_count);
195
196         mutex_unlock(&inode->ei_update_lock);
197
198         return ret;
199 }
200
201 struct bchfs_extent_trans_hook {
202         struct bchfs_write_op           *op;
203         struct extent_insert_hook       hook;
204
205         struct bch_inode_unpacked       inode_u;
206         struct bkey_inode_buf           inode_p;
207
208         bool                            need_inode_update;
209 };
210
211 static enum extent_insert_hook_ret
212 bchfs_extent_update_hook(struct extent_insert_hook *hook,
213                          struct bpos committed_pos,
214                          struct bpos next_pos,
215                          struct bkey_s_c k,
216                          const struct bkey_i *insert)
217 {
218         struct bchfs_extent_trans_hook *h = container_of(hook,
219                                 struct bchfs_extent_trans_hook, hook);
220         struct bch_inode_info *inode = h->op->inode;
221         int sign = bkey_extent_is_allocation(&insert->k) -
222                 (k.k && bkey_extent_is_allocation(k.k));
223         s64 sectors = (s64) (next_pos.offset - committed_pos.offset) * sign;
224         u64 offset = min(next_pos.offset << 9, h->op->new_i_size);
225         bool do_pack = false;
226
227         BUG_ON((next_pos.offset << 9) > round_up(offset, PAGE_SIZE));
228
229         /* XXX: inode->i_size locking */
230         if (offset > inode->ei_size) {
231                 BUG_ON(inode->ei_flags & BCH_INODE_I_SIZE_DIRTY);
232
233                 if (!h->need_inode_update) {
234                         h->need_inode_update = true;
235                         return BTREE_HOOK_RESTART_TRANS;
236                 }
237
238                 h->inode_u.bi_size = offset;
239                 do_pack = true;
240
241                 inode->ei_size = offset;
242
243                 if (h->op->is_dio)
244                         i_size_write(&inode->v, offset);
245         }
246
247         if (sectors) {
248                 if (!h->need_inode_update) {
249                         h->need_inode_update = true;
250                         return BTREE_HOOK_RESTART_TRANS;
251                 }
252
253                 h->inode_u.bi_sectors += sectors;
254                 do_pack = true;
255
256                 atomic64_add(sectors, &inode->ei_sectors);
257
258                 h->op->sectors_added += sectors;
259
260                 if (h->op->is_dio) {
261                         spin_lock(&inode->v.i_lock);
262                         inode->v.i_blocks += sectors;
263                         spin_unlock(&inode->v.i_lock);
264                 }
265         }
266
267         if (do_pack)
268                 bch2_inode_pack(&h->inode_p, &h->inode_u);
269
270         return BTREE_HOOK_DO_INSERT;
271 }
272
273 static int bchfs_write_index_update(struct bch_write_op *wop)
274 {
275         struct bchfs_write_op *op = container_of(wop,
276                                 struct bchfs_write_op, op);
277         struct keylist *keys = &op->op.insert_keys;
278         struct btree_iter extent_iter, inode_iter;
279         struct bchfs_extent_trans_hook hook;
280         struct bkey_i *k = bch2_keylist_front(keys);
281         int ret;
282
283         BUG_ON(k->k.p.inode != op->inode->v.i_ino);
284
285         bch2_btree_iter_init(&extent_iter, wop->c, BTREE_ID_EXTENTS,
286                              bkey_start_pos(&bch2_keylist_front(keys)->k),
287                              BTREE_ITER_INTENT);
288         bch2_btree_iter_init(&inode_iter, wop->c, BTREE_ID_INODES,
289                              POS(extent_iter.pos.inode, 0),
290                              BTREE_ITER_INTENT);
291
292         hook.op                 = op;
293         hook.hook.fn            = bchfs_extent_update_hook;
294         hook.need_inode_update  = false;
295
296         do {
297                 ret = bch2_btree_iter_traverse(&extent_iter);
298                 if (ret)
299                         goto err;
300
301                 /* XXX: inode->i_size locking */
302                 k = bch2_keylist_front(keys);
303                 if (min(k->k.p.offset << 9, op->new_i_size) > op->inode->ei_size)
304                         hook.need_inode_update = true;
305
306                 if (hook.need_inode_update) {
307                         struct bkey_s_c inode;
308
309                         if (!btree_iter_linked(&inode_iter))
310                                 bch2_btree_iter_link(&extent_iter, &inode_iter);
311
312                         inode = bch2_btree_iter_peek_with_holes(&inode_iter);
313                         if ((ret = btree_iter_err(inode)))
314                                 goto err;
315
316                         if (WARN_ONCE(inode.k->type != BCH_INODE_FS,
317                                       "inode %llu not found when updating",
318                                       extent_iter.pos.inode)) {
319                                 ret = -ENOENT;
320                                 break;
321                         }
322
323                         if (WARN_ONCE(bkey_bytes(inode.k) >
324                                       sizeof(hook.inode_p),
325                                       "inode %llu too big (%zu bytes, buf %zu)",
326                                       extent_iter.pos.inode,
327                                       bkey_bytes(inode.k),
328                                       sizeof(hook.inode_p))) {
329                                 ret = -ENOENT;
330                                 break;
331                         }
332
333                         bkey_reassemble(&hook.inode_p.inode.k_i, inode);
334                         ret = bch2_inode_unpack(bkey_s_c_to_inode(inode),
335                                                &hook.inode_u);
336                         if (WARN_ONCE(ret,
337                                       "error %i unpacking inode %llu",
338                                       ret, extent_iter.pos.inode)) {
339                                 ret = -ENOENT;
340                                 break;
341                         }
342
343                         ret = bch2_btree_insert_at(wop->c, &wop->res,
344                                         &hook.hook, op_journal_seq(wop),
345                                         BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC,
346                                         BTREE_INSERT_ENTRY(&extent_iter, k),
347                                         BTREE_INSERT_ENTRY_EXTRA_RES(&inode_iter,
348                                                         &hook.inode_p.inode.k_i, 2));
349                 } else {
350                         ret = bch2_btree_insert_at(wop->c, &wop->res,
351                                         &hook.hook, op_journal_seq(wop),
352                                         BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC,
353                                         BTREE_INSERT_ENTRY(&extent_iter, k));
354                 }
355 err:
356                 if (ret == -EINTR)
357                         continue;
358                 if (ret)
359                         break;
360
361                 bch2_keylist_pop_front(keys);
362         } while (!bch2_keylist_empty(keys));
363
364         bch2_btree_iter_unlock(&extent_iter);
365         bch2_btree_iter_unlock(&inode_iter);
366
367         return ret;
368 }
369
370 /* page state: */
371
372 /* stored in page->private: */
373
374 /*
375  * bch_page_state has to (unfortunately) be manipulated with cmpxchg - we could
376  * almost protected it with the page lock, except that bch2_writepage_io_done has
377  * to update the sector counts (and from interrupt/bottom half context).
378  */
379 struct bch_page_state {
380 union { struct {
381         /*
382          * page is _fully_ written on disk, and not compressed - which means to
383          * write this page we don't have to reserve space (the new write will
384          * never take up more space on disk than what it's overwriting)
385          */
386         unsigned allocated:1;
387
388         /* Owns PAGE_SECTORS sized reservation: */
389         unsigned                reserved:1;
390         unsigned                nr_replicas:4;
391
392         /*
393          * Number of sectors on disk - for i_blocks
394          * Uncompressed size, not compressed size:
395          */
396         u8                      sectors;
397         u8                      dirty_sectors;
398 };
399         /* for cmpxchg: */
400         unsigned long           v;
401 };
402 };
403
404 #define page_state_cmpxchg(_ptr, _new, _expr)                           \
405 ({                                                                      \
406         unsigned long _v = READ_ONCE((_ptr)->v);                        \
407         struct bch_page_state _old;                                     \
408                                                                         \
409         do {                                                            \
410                 _old.v = _new.v = _v;                                   \
411                 _expr;                                                  \
412                                                                         \
413                 EBUG_ON(_new.sectors + _new.dirty_sectors > PAGE_SECTORS);\
414         } while (_old.v != _new.v &&                                    \
415                  (_v = cmpxchg(&(_ptr)->v, _old.v, _new.v)) != _old.v); \
416                                                                         \
417         _old;                                                           \
418 })
419
420 static inline struct bch_page_state *page_state(struct page *page)
421 {
422         struct bch_page_state *s = (void *) &page->private;
423
424         BUILD_BUG_ON(sizeof(*s) > sizeof(page->private));
425
426         if (!PagePrivate(page))
427                 SetPagePrivate(page);
428
429         return s;
430 }
431
432 static void bch2_put_page_reservation(struct bch_fs *c, struct page *page)
433 {
434         struct disk_reservation res = { .sectors = PAGE_SECTORS };
435         struct bch_page_state s;
436
437         s = page_state_cmpxchg(page_state(page), s, {
438                 if (!s.reserved)
439                         return;
440                 s.reserved = 0;
441         });
442
443         bch2_disk_reservation_put(c, &res);
444 }
445
446 static int bch2_get_page_reservation(struct bch_fs *c, struct page *page,
447                                     bool check_enospc)
448 {
449         struct bch_page_state *s = page_state(page), new;
450         struct disk_reservation res;
451         int ret = 0;
452
453         BUG_ON(s->allocated && s->sectors != PAGE_SECTORS);
454
455         if (s->allocated || s->reserved)
456                 return 0;
457
458         ret = bch2_disk_reservation_get(c, &res, PAGE_SECTORS, !check_enospc
459                                        ? BCH_DISK_RESERVATION_NOFAIL : 0);
460         if (ret)
461                 return ret;
462
463         page_state_cmpxchg(s, new, {
464                 if (new.reserved) {
465                         bch2_disk_reservation_put(c, &res);
466                         return 0;
467                 }
468                 new.reserved    = 1;
469                 new.nr_replicas = res.nr_replicas;
470         });
471
472         return 0;
473 }
474
475 static void bch2_clear_page_bits(struct page *page)
476 {
477         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
478         struct bch_fs *c = inode->v.i_sb->s_fs_info;
479         struct disk_reservation res = { .sectors = PAGE_SECTORS };
480         struct bch_page_state s;
481
482         if (!PagePrivate(page))
483                 return;
484
485         s = xchg(page_state(page), (struct bch_page_state) { .v = 0 });
486         ClearPagePrivate(page);
487
488         if (s.dirty_sectors) {
489                 spin_lock(&inode->v.i_lock);
490                 inode->v.i_blocks -= s.dirty_sectors;
491                 spin_unlock(&inode->v.i_lock);
492         }
493
494         if (s.reserved)
495                 bch2_disk_reservation_put(c, &res);
496 }
497
498 int bch2_set_page_dirty(struct page *page)
499 {
500         struct bch_page_state old, new;
501
502         old = page_state_cmpxchg(page_state(page), new,
503                 new.dirty_sectors = PAGE_SECTORS - new.sectors;
504         );
505
506         if (old.dirty_sectors != new.dirty_sectors) {
507                 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
508
509                 spin_lock(&inode->v.i_lock);
510                 inode->v.i_blocks += new.dirty_sectors - old.dirty_sectors;
511                 spin_unlock(&inode->v.i_lock);
512         }
513
514         return __set_page_dirty_nobuffers(page);
515 }
516
517 /* readpages/writepages: */
518
519 static bool bio_can_add_page_contig(struct bio *bio, struct page *page)
520 {
521         sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
522
523         return bio->bi_vcnt < bio->bi_max_vecs &&
524                 bio_end_sector(bio) == offset;
525 }
526
527 static void __bio_add_page(struct bio *bio, struct page *page)
528 {
529         bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
530                 .bv_page = page,
531                 .bv_len = PAGE_SIZE,
532                 .bv_offset = 0,
533         };
534
535         bio->bi_iter.bi_size += PAGE_SIZE;
536 }
537
538 static int bio_add_page_contig(struct bio *bio, struct page *page)
539 {
540         sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
541
542         BUG_ON(!bio->bi_max_vecs);
543
544         if (!bio->bi_vcnt)
545                 bio->bi_iter.bi_sector = offset;
546         else if (!bio_can_add_page_contig(bio, page))
547                 return -1;
548
549         __bio_add_page(bio, page);
550         return 0;
551 }
552
553 static void bch2_readpages_end_io(struct bio *bio)
554 {
555         struct bio_vec *bv;
556         int i;
557
558         bio_for_each_segment_all(bv, bio, i) {
559                 struct page *page = bv->bv_page;
560
561                 if (!bio->bi_error) {
562                         SetPageUptodate(page);
563                 } else {
564                         ClearPageUptodate(page);
565                         SetPageError(page);
566                 }
567                 unlock_page(page);
568         }
569
570         bio_put(bio);
571 }
572
573 struct readpages_iter {
574         struct address_space    *mapping;
575         struct list_head        pages;
576         unsigned                nr_pages;
577 };
578
579 static int readpage_add_page(struct readpages_iter *iter, struct page *page)
580 {
581         struct bch_page_state *s = page_state(page);
582         int ret;
583
584         BUG_ON(s->reserved);
585         s->allocated = 1;
586         s->sectors = 0;
587
588         prefetchw(&page->flags);
589         ret = add_to_page_cache_lru(page, iter->mapping,
590                                     page->index, GFP_NOFS);
591         put_page(page);
592         return ret;
593 }
594
595 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
596 {
597         while (iter->nr_pages) {
598                 struct page *page =
599                         list_last_entry(&iter->pages, struct page, lru);
600
601                 prefetchw(&page->flags);
602                 list_del(&page->lru);
603                 iter->nr_pages--;
604
605                 if (!readpage_add_page(iter, page))
606                         return page;
607         }
608
609         return NULL;
610 }
611
612 #define for_each_readpage_page(_iter, _page)                            \
613         for (;                                                          \
614              ((_page) = __readpage_next_page(&(_iter)));)               \
615
616 static void bch2_mark_pages_unalloc(struct bio *bio)
617 {
618         struct bvec_iter iter;
619         struct bio_vec bv;
620
621         bio_for_each_segment(bv, bio, iter)
622                 page_state(bv.bv_page)->allocated = 0;
623 }
624
625 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
626 {
627         struct bvec_iter iter;
628         struct bio_vec bv;
629
630         bio_for_each_segment(bv, bio, iter) {
631                 struct bch_page_state *s = page_state(bv.bv_page);
632
633                 /* sectors in @k from the start of this page: */
634                 unsigned k_sectors = k.k->size - (iter.bi_sector - k.k->p.offset);
635
636                 unsigned page_sectors = min(bv.bv_len >> 9, k_sectors);
637
638                 if (!s->sectors)
639                         s->nr_replicas = bch2_extent_nr_dirty_ptrs(k);
640                 else
641                         s->nr_replicas = min_t(unsigned, s->nr_replicas,
642                                                bch2_extent_nr_dirty_ptrs(k));
643
644                 BUG_ON(s->sectors + page_sectors > PAGE_SECTORS);
645                 s->sectors += page_sectors;
646         }
647 }
648
649 static void readpage_bio_extend(struct readpages_iter *iter,
650                                 struct bio *bio, u64 offset,
651                                 bool get_more)
652 {
653         struct page *page;
654         pgoff_t page_offset;
655         int ret;
656
657         while (bio_end_sector(bio) < offset &&
658                bio->bi_vcnt < bio->bi_max_vecs) {
659                 page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
660
661                 if (iter->nr_pages) {
662                         page = list_last_entry(&iter->pages, struct page, lru);
663                         if (page->index != page_offset)
664                                 break;
665
666                         list_del(&page->lru);
667                         iter->nr_pages--;
668                 } else if (get_more) {
669                         rcu_read_lock();
670                         page = radix_tree_lookup(&iter->mapping->page_tree, page_offset);
671                         rcu_read_unlock();
672
673                         if (page && !radix_tree_exceptional_entry(page))
674                                 break;
675
676                         page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
677                         if (!page)
678                                 break;
679
680                         page->index = page_offset;
681                         ClearPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
682                 } else {
683                         break;
684                 }
685
686                 ret = readpage_add_page(iter, page);
687                 if (ret)
688                         break;
689
690                 __bio_add_page(bio, page);
691         }
692
693         if (!iter->nr_pages)
694                 SetPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
695 }
696
697 static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
698                        struct bch_read_bio *rbio, u64 inum,
699                        struct readpages_iter *readpages_iter)
700 {
701         struct bio *bio = &rbio->bio;
702         int flags = BCH_READ_RETRY_IF_STALE|
703                 BCH_READ_MAY_PROMOTE;
704
705         while (1) {
706                 struct extent_pick_ptr pick;
707                 BKEY_PADDED(k) tmp;
708                 struct bkey_s_c k;
709                 unsigned bytes;
710                 bool is_last;
711
712                 bch2_btree_iter_set_pos(iter, POS(inum, bio->bi_iter.bi_sector));
713
714                 k = bch2_btree_iter_peek_with_holes(iter);
715                 BUG_ON(!k.k);
716
717                 if (IS_ERR(k.k)) {
718                         int ret = bch2_btree_iter_unlock(iter);
719                         BUG_ON(!ret);
720                         bcache_io_error(c, bio, "btree IO error %i", ret);
721                         bio_endio(bio);
722                         return;
723                 }
724
725                 bkey_reassemble(&tmp.k, k);
726                 bch2_btree_iter_unlock(iter);
727                 k = bkey_i_to_s_c(&tmp.k);
728
729                 bch2_extent_pick_ptr(c, k, NULL, &pick);
730                 if (IS_ERR(pick.ca)) {
731                         bcache_io_error(c, bio, "no device to read from");
732                         bio_endio(bio);
733                         return;
734                 }
735
736                 if (readpages_iter)
737                         readpage_bio_extend(readpages_iter,
738                                             bio, k.k->p.offset,
739                                             pick.ca &&
740                                             (pick.crc.csum_type ||
741                                              pick.crc.compression_type));
742
743                 bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
744                          bio->bi_iter.bi_sector) << 9;
745                 is_last = bytes == bio->bi_iter.bi_size;
746                 swap(bio->bi_iter.bi_size, bytes);
747
748                 if (bkey_extent_is_allocation(k.k))
749                         bch2_add_page_sectors(bio, k);
750
751                 if (!bkey_extent_is_allocation(k.k) ||
752                     bkey_extent_is_compressed(k))
753                         bch2_mark_pages_unalloc(bio);
754
755                 if (pick.ca) {
756                         if (!is_last) {
757                                 bio_inc_remaining(&rbio->bio);
758                                 flags |= BCH_READ_MUST_CLONE;
759                                 trace_read_split(&rbio->bio);
760                         }
761
762                         bch2_read_extent(c, rbio, k, &pick, flags);
763                 } else {
764                         zero_fill_bio(bio);
765
766                         if (is_last)
767                                 bio_endio(bio);
768                 }
769
770                 if (is_last)
771                         return;
772
773                 swap(bio->bi_iter.bi_size, bytes);
774                 bio_advance(bio, bytes);
775         }
776 }
777
778 int bch2_readpages(struct file *file, struct address_space *mapping,
779                    struct list_head *pages, unsigned nr_pages)
780 {
781         struct bch_inode_info *inode = to_bch_ei(mapping->host);
782         struct bch_fs *c = inode->v.i_sb->s_fs_info;
783         struct btree_iter iter;
784         struct page *page;
785         struct readpages_iter readpages_iter = {
786                 .mapping = mapping, .nr_pages = nr_pages
787         };
788
789         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0);
790
791         INIT_LIST_HEAD(&readpages_iter.pages);
792         list_add(&readpages_iter.pages, pages);
793         list_del_init(pages);
794
795         if (current->pagecache_lock != &mapping->add_lock)
796                 pagecache_add_get(&mapping->add_lock);
797
798         while ((page = readpage_iter_next(&readpages_iter))) {
799                 unsigned n = max_t(unsigned,
800                                    min_t(unsigned, readpages_iter.nr_pages + 1,
801                                          BIO_MAX_PAGES),
802                                    c->sb.encoded_extent_max >> PAGE_SECTOR_SHIFT);
803
804                 struct bch_read_bio *rbio =
805                         to_rbio(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read));
806
807                 rbio->bio.bi_end_io = bch2_readpages_end_io;
808                 bio_add_page_contig(&rbio->bio, page);
809                 bchfs_read(c, &iter, rbio, inode->v.i_ino, &readpages_iter);
810         }
811
812         if (current->pagecache_lock != &mapping->add_lock)
813                 pagecache_add_put(&mapping->add_lock);
814
815         return 0;
816 }
817
818 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
819                              u64 inum, struct page *page)
820 {
821         struct btree_iter iter;
822
823         /*
824          * Initialize page state:
825          * If a page is partly allocated and partly a hole, we want it to be
826          * marked BCH_PAGE_UNALLOCATED - so we initially mark all pages
827          * allocated and then mark them unallocated as we find holes:
828          *
829          * Note that the bio hasn't been split yet - it's the only bio that
830          * points to these pages. As we walk extents and split @bio, that
831          * necessarily be true, the splits won't necessarily be on page
832          * boundaries:
833          */
834         struct bch_page_state *s = page_state(page);
835
836         EBUG_ON(s->reserved);
837         s->allocated = 1;
838         s->sectors = 0;
839
840         bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
841         bio_add_page_contig(&rbio->bio, page);
842
843         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0);
844         bchfs_read(c, &iter, rbio, inum, NULL);
845 }
846
847 int bch2_readpage(struct file *file, struct page *page)
848 {
849         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
850         struct bch_fs *c = inode->v.i_sb->s_fs_info;
851         struct bch_read_bio *rbio;
852
853         rbio = to_rbio(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read));
854         rbio->bio.bi_end_io = bch2_readpages_end_io;
855
856         __bchfs_readpage(c, rbio, inode->v.i_ino, page);
857         return 0;
858 }
859
860 struct bch_writepage_state {
861         struct bch_writepage_io *io;
862 };
863
864 static void bch2_writepage_io_free(struct closure *cl)
865 {
866         struct bch_writepage_io *io = container_of(cl,
867                                         struct bch_writepage_io, cl);
868
869         bio_put(&io->op.op.wbio.bio);
870 }
871
872 static void bch2_writepage_io_done(struct closure *cl)
873 {
874         struct bch_writepage_io *io = container_of(cl,
875                                         struct bch_writepage_io, cl);
876         struct bch_fs *c = io->op.op.c;
877         struct bio *bio = &io->op.op.wbio.bio;
878         struct bio_vec *bvec;
879         unsigned i;
880
881         atomic_sub(bio->bi_vcnt, &c->writeback_pages);
882         wake_up(&c->writeback_wait);
883
884         bio_for_each_segment_all(bvec, bio, i) {
885                 struct page *page = bvec->bv_page;
886
887                 if (io->op.op.error) {
888                         SetPageError(page);
889                         if (page->mapping)
890                                 set_bit(AS_EIO, &page->mapping->flags);
891                 }
892
893                 if (io->op.op.written >= PAGE_SECTORS) {
894                         struct bch_page_state old, new;
895
896                         old = page_state_cmpxchg(page_state(page), new, {
897                                 new.sectors = PAGE_SECTORS;
898                                 new.dirty_sectors = 0;
899                         });
900
901                         io->op.sectors_added -= old.dirty_sectors;
902                         io->op.op.written -= PAGE_SECTORS;
903                 }
904         }
905
906         /*
907          * racing with fallocate can cause us to add fewer sectors than
908          * expected - but we shouldn't add more sectors than expected:
909          *
910          * (error (due to going RO) halfway through a page can screw that up
911          * slightly)
912          */
913         BUG_ON(io->op.sectors_added >= (s64) PAGE_SECTORS);
914
915         /*
916          * PageWriteback is effectively our ref on the inode - fixup i_blocks
917          * before calling end_page_writeback:
918          */
919         if (io->op.sectors_added) {
920                 struct bch_inode_info *inode = io->op.inode;
921
922                 spin_lock(&inode->v.i_lock);
923                 inode->v.i_blocks += io->op.sectors_added;
924                 spin_unlock(&inode->v.i_lock);
925         }
926
927         bio_for_each_segment_all(bvec, bio, i)
928                 end_page_writeback(bvec->bv_page);
929
930         closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
931 }
932
933 static void bch2_writepage_do_io(struct bch_writepage_state *w)
934 {
935         struct bch_writepage_io *io = w->io;
936         struct bio *bio = &io->op.op.wbio.bio;
937
938         w->io = NULL;
939         atomic_add(bio->bi_vcnt, &io->op.op.c->writeback_pages);
940
941         io->op.op.pos.offset = bio->bi_iter.bi_sector;
942
943         closure_call(&io->op.op.cl, bch2_write, NULL, &io->cl);
944         continue_at(&io->cl, bch2_writepage_io_done, NULL);
945 }
946
947 /*
948  * Get a bch_writepage_io and add @page to it - appending to an existing one if
949  * possible, else allocating a new one:
950  */
951 static void bch2_writepage_io_alloc(struct bch_fs *c,
952                                     struct bch_writepage_state *w,
953                                     struct bch_inode_info *inode,
954                                     struct page *page)
955 {
956         u64 inum = inode->v.i_ino;
957         unsigned nr_replicas = page_state(page)->nr_replicas;
958
959         EBUG_ON(!nr_replicas);
960         /* XXX: disk_reservation->gen isn't plumbed through */
961
962         if (!w->io) {
963 alloc_io:
964                 w->io = container_of(bio_alloc_bioset(GFP_NOFS,
965                                                       BIO_MAX_PAGES,
966                                                       bch2_writepage_bioset),
967                                      struct bch_writepage_io, op.op.wbio.bio);
968
969                 closure_init(&w->io->cl, NULL);
970                 w->io->op.inode         = inode;
971                 w->io->op.sectors_added = 0;
972                 w->io->op.is_dio        = false;
973                 bch2_write_op_init(&w->io->op.op, c,
974                                 (struct disk_reservation) {
975                                         .nr_replicas = c->opts.data_replicas,
976                                 },
977                                 c->fastest_devs,
978                                 inode->ei_last_dirtied,
979                                 POS(inum, 0),
980                                 &inode->ei_journal_seq,
981                                 BCH_WRITE_THROTTLE);
982                 w->io->op.op.index_update_fn = bchfs_write_index_update;
983         }
984
985         if (w->io->op.op.res.nr_replicas != nr_replicas ||
986             bio_add_page_contig(&w->io->op.op.wbio.bio, page)) {
987                 bch2_writepage_do_io(w);
988                 goto alloc_io;
989         }
990
991         /*
992          * We shouldn't ever be handed pages for multiple inodes in a single
993          * pass - right?
994          */
995         BUG_ON(inode != w->io->op.inode);
996 }
997
998 static int __bch2_writepage(struct bch_fs *c, struct page *page,
999                             struct writeback_control *wbc,
1000                             struct bch_writepage_state *w)
1001 {
1002         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1003         struct bch_page_state new, old;
1004         unsigned offset;
1005         loff_t i_size = i_size_read(&inode->v);
1006         pgoff_t end_index = i_size >> PAGE_SHIFT;
1007
1008         EBUG_ON(!PageUptodate(page));
1009
1010         /* Is the page fully inside i_size? */
1011         if (page->index < end_index)
1012                 goto do_io;
1013
1014         /* Is the page fully outside i_size? (truncate in progress) */
1015         offset = i_size & (PAGE_SIZE - 1);
1016         if (page->index > end_index || !offset) {
1017                 unlock_page(page);
1018                 return 0;
1019         }
1020
1021         /*
1022          * The page straddles i_size.  It must be zeroed out on each and every
1023          * writepage invocation because it may be mmapped.  "A file is mapped
1024          * in multiples of the page size.  For a file that is not a multiple of
1025          * the  page size, the remaining memory is zeroed when mapped, and
1026          * writes to that region are not written out to the file."
1027          */
1028         zero_user_segment(page, offset, PAGE_SIZE);
1029 do_io:
1030         bch2_writepage_io_alloc(c, w, inode, page);
1031
1032         /* while page is locked: */
1033         w->io->op.new_i_size = i_size;
1034
1035         if (wbc->sync_mode == WB_SYNC_ALL)
1036                 w->io->op.op.wbio.bio.bi_opf |= REQ_SYNC;
1037
1038         /* Before unlocking the page, transfer reservation to w->io: */
1039         old = page_state_cmpxchg(page_state(page), new, {
1040                 EBUG_ON(!new.reserved &&
1041                         (new.sectors != PAGE_SECTORS ||
1042                         !new.allocated));
1043
1044                 if (new.allocated &&
1045                     w->io->op.op.compression_type != BCH_COMPRESSION_NONE)
1046                         new.allocated = 0;
1047                 else if (!new.reserved)
1048                         goto out;
1049                 new.reserved = 0;
1050         });
1051
1052         w->io->op.op.res.sectors += PAGE_SECTORS *
1053                 (old.reserved - new.reserved) *
1054                 old.nr_replicas;
1055 out:
1056         BUG_ON(PageWriteback(page));
1057         set_page_writeback(page);
1058         unlock_page(page);
1059
1060         return 0;
1061 }
1062
1063 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1064 {
1065         struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1066         struct bch_writepage_state w = { NULL };
1067         struct pagecache_iter iter;
1068         struct page *page;
1069         int ret = 0;
1070         int done = 0;
1071         pgoff_t uninitialized_var(writeback_index);
1072         pgoff_t index;
1073         pgoff_t end;            /* Inclusive */
1074         pgoff_t done_index;
1075         int cycled;
1076         int range_whole = 0;
1077         int tag;
1078
1079         if (wbc->range_cyclic) {
1080                 writeback_index = mapping->writeback_index; /* prev offset */
1081                 index = writeback_index;
1082                 if (index == 0)
1083                         cycled = 1;
1084                 else
1085                         cycled = 0;
1086                 end = -1;
1087         } else {
1088                 index = wbc->range_start >> PAGE_SHIFT;
1089                 end = wbc->range_end >> PAGE_SHIFT;
1090                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1091                         range_whole = 1;
1092                 cycled = 1; /* ignore range_cyclic tests */
1093         }
1094         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1095                 tag = PAGECACHE_TAG_TOWRITE;
1096         else
1097                 tag = PAGECACHE_TAG_DIRTY;
1098 retry:
1099         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1100                 tag_pages_for_writeback(mapping, index, end);
1101
1102         done_index = index;
1103 get_pages:
1104         for_each_pagecache_tag(&iter, mapping, tag, index, end, page) {
1105                 done_index = page->index;
1106
1107                 if (w.io &&
1108                     !bio_can_add_page_contig(&w.io->op.op.wbio.bio, page))
1109                         bch2_writepage_do_io(&w);
1110
1111                 if (!w.io &&
1112                     atomic_read(&c->writeback_pages) >=
1113                     c->writeback_pages_max) {
1114                         /* don't sleep with pages pinned: */
1115                         pagecache_iter_release(&iter);
1116
1117                         __wait_event(c->writeback_wait,
1118                                      atomic_read(&c->writeback_pages) <
1119                                      c->writeback_pages_max);
1120                         goto get_pages;
1121                 }
1122
1123                 lock_page(page);
1124
1125                 /*
1126                  * Page truncated or invalidated. We can freely skip it
1127                  * then, even for data integrity operations: the page
1128                  * has disappeared concurrently, so there could be no
1129                  * real expectation of this data interity operation
1130                  * even if there is now a new, dirty page at the same
1131                  * pagecache address.
1132                  */
1133                 if (unlikely(page->mapping != mapping)) {
1134 continue_unlock:
1135                         unlock_page(page);
1136                         continue;
1137                 }
1138
1139                 if (!PageDirty(page)) {
1140                         /* someone wrote it for us */
1141                         goto continue_unlock;
1142                 }
1143
1144                 if (PageWriteback(page)) {
1145                         if (wbc->sync_mode != WB_SYNC_NONE)
1146                                 wait_on_page_writeback(page);
1147                         else
1148                                 goto continue_unlock;
1149                 }
1150
1151                 BUG_ON(PageWriteback(page));
1152                 if (!clear_page_dirty_for_io(page))
1153                         goto continue_unlock;
1154
1155                 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
1156                 ret = __bch2_writepage(c, page, wbc, &w);
1157                 if (unlikely(ret)) {
1158                         if (ret == AOP_WRITEPAGE_ACTIVATE) {
1159                                 unlock_page(page);
1160                                 ret = 0;
1161                         } else {
1162                                 /*
1163                                  * done_index is set past this page,
1164                                  * so media errors will not choke
1165                                  * background writeout for the entire
1166                                  * file. This has consequences for
1167                                  * range_cyclic semantics (ie. it may
1168                                  * not be suitable for data integrity
1169                                  * writeout).
1170                                  */
1171                                 done_index = page->index + 1;
1172                                 done = 1;
1173                                 break;
1174                         }
1175                 }
1176
1177                 /*
1178                  * We stop writing back only if we are not doing
1179                  * integrity sync. In case of integrity sync we have to
1180                  * keep going until we have written all the pages
1181                  * we tagged for writeback prior to entering this loop.
1182                  */
1183                 if (--wbc->nr_to_write <= 0 &&
1184                     wbc->sync_mode == WB_SYNC_NONE) {
1185                         done = 1;
1186                         break;
1187                 }
1188         }
1189         pagecache_iter_release(&iter);
1190
1191         if (w.io)
1192                 bch2_writepage_do_io(&w);
1193
1194         if (!cycled && !done) {
1195                 /*
1196                  * range_cyclic:
1197                  * We hit the last page and there is more work to be done: wrap
1198                  * back to the start of the file
1199                  */
1200                 cycled = 1;
1201                 index = 0;
1202                 end = writeback_index - 1;
1203                 goto retry;
1204         }
1205         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1206                 mapping->writeback_index = done_index;
1207
1208         return ret;
1209 }
1210
1211 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1212 {
1213         struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1214         struct bch_writepage_state w = { NULL };
1215         int ret;
1216
1217         ret = __bch2_writepage(c, page, wbc, &w);
1218         if (w.io)
1219                 bch2_writepage_do_io(&w);
1220
1221         return ret;
1222 }
1223
1224 static void bch2_read_single_page_end_io(struct bio *bio)
1225 {
1226         complete(bio->bi_private);
1227 }
1228
1229 static int bch2_read_single_page(struct page *page,
1230                                  struct address_space *mapping)
1231 {
1232         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1233         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1234         struct bch_read_bio *rbio;
1235         int ret;
1236         DECLARE_COMPLETION_ONSTACK(done);
1237
1238         rbio = to_rbio(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read));
1239         rbio->bio.bi_private = &done;
1240         rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1241
1242         __bchfs_readpage(c, rbio, inode->v.i_ino, page);
1243         wait_for_completion(&done);
1244
1245         ret = rbio->bio.bi_error;
1246         bio_put(&rbio->bio);
1247
1248         if (ret < 0)
1249                 return ret;
1250
1251         SetPageUptodate(page);
1252         return 0;
1253 }
1254
1255 int bch2_write_begin(struct file *file, struct address_space *mapping,
1256                      loff_t pos, unsigned len, unsigned flags,
1257                      struct page **pagep, void **fsdata)
1258 {
1259         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1260         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1261         pgoff_t index = pos >> PAGE_SHIFT;
1262         unsigned offset = pos & (PAGE_SIZE - 1);
1263         struct page *page;
1264         int ret = -ENOMEM;
1265
1266         BUG_ON(inode_unhashed(&inode->v));
1267
1268         /* Not strictly necessary - same reason as mkwrite(): */
1269         pagecache_add_get(&mapping->add_lock);
1270
1271         page = grab_cache_page_write_begin(mapping, index, flags);
1272         if (!page)
1273                 goto err_unlock;
1274
1275         if (PageUptodate(page))
1276                 goto out;
1277
1278         /* If we're writing entire page, don't need to read it in first: */
1279         if (len == PAGE_SIZE)
1280                 goto out;
1281
1282         if (!offset && pos + len >= inode->v.i_size) {
1283                 zero_user_segment(page, len, PAGE_SIZE);
1284                 flush_dcache_page(page);
1285                 goto out;
1286         }
1287
1288         if (index > inode->v.i_size >> PAGE_SHIFT) {
1289                 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1290                 flush_dcache_page(page);
1291                 goto out;
1292         }
1293 readpage:
1294         ret = bch2_read_single_page(page, mapping);
1295         if (ret)
1296                 goto err;
1297 out:
1298         ret = bch2_get_page_reservation(c, page, true);
1299         if (ret) {
1300                 if (!PageUptodate(page)) {
1301                         /*
1302                          * If the page hasn't been read in, we won't know if we
1303                          * actually need a reservation - we don't actually need
1304                          * to read here, we just need to check if the page is
1305                          * fully backed by uncompressed data:
1306                          */
1307                         goto readpage;
1308                 }
1309
1310                 goto err;
1311         }
1312
1313         *pagep = page;
1314         return 0;
1315 err:
1316         unlock_page(page);
1317         put_page(page);
1318         *pagep = NULL;
1319 err_unlock:
1320         pagecache_add_put(&mapping->add_lock);
1321         return ret;
1322 }
1323
1324 int bch2_write_end(struct file *filp, struct address_space *mapping,
1325                    loff_t pos, unsigned len, unsigned copied,
1326                    struct page *page, void *fsdata)
1327 {
1328         struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1329         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1330
1331         lockdep_assert_held(&inode->v.i_rwsem);
1332
1333         if (unlikely(copied < len && !PageUptodate(page))) {
1334                 /*
1335                  * The page needs to be read in, but that would destroy
1336                  * our partial write - simplest thing is to just force
1337                  * userspace to redo the write:
1338                  */
1339                 zero_user(page, 0, PAGE_SIZE);
1340                 flush_dcache_page(page);
1341                 copied = 0;
1342         }
1343
1344         if (pos + copied > inode->v.i_size)
1345                 i_size_write(&inode->v, pos + copied);
1346
1347         if (copied) {
1348                 if (!PageUptodate(page))
1349                         SetPageUptodate(page);
1350                 if (!PageDirty(page))
1351                         set_page_dirty(page);
1352
1353                 inode->ei_last_dirtied = (unsigned long) current;
1354         } else {
1355                 bch2_put_page_reservation(c, page);
1356         }
1357
1358         unlock_page(page);
1359         put_page(page);
1360         pagecache_add_put(&mapping->add_lock);
1361
1362         return copied;
1363 }
1364
1365 /* O_DIRECT */
1366
1367 static void bch2_dio_read_complete(struct closure *cl)
1368 {
1369         struct dio_read *dio = container_of(cl, struct dio_read, cl);
1370
1371         dio->req->ki_complete(dio->req, dio->ret, 0);
1372         bio_check_pages_dirty(&dio->rbio.bio);  /* transfers ownership */
1373 }
1374
1375 static void bch2_direct_IO_read_endio(struct bio *bio)
1376 {
1377         struct dio_read *dio = bio->bi_private;
1378
1379         if (bio->bi_error)
1380                 dio->ret = bio->bi_error;
1381
1382         closure_put(&dio->cl);
1383 }
1384
1385 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1386 {
1387         bch2_direct_IO_read_endio(bio);
1388         bio_check_pages_dirty(bio);     /* transfers ownership */
1389 }
1390
1391 static int bch2_direct_IO_read(struct bch_fs *c, struct kiocb *req,
1392                                struct file *file, struct bch_inode_info *inode,
1393                                struct iov_iter *iter, loff_t offset)
1394 {
1395         struct dio_read *dio;
1396         struct bio *bio;
1397         bool sync = is_sync_kiocb(req);
1398         ssize_t ret;
1399
1400         if ((offset|iter->count) & (block_bytes(c) - 1))
1401                 return -EINVAL;
1402
1403         ret = min_t(loff_t, iter->count,
1404                     max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1405         iov_iter_truncate(iter, round_up(ret, block_bytes(c)));
1406
1407         if (!ret)
1408                 return ret;
1409
1410         bio = bio_alloc_bioset(GFP_KERNEL,
1411                                iov_iter_npages(iter, BIO_MAX_PAGES),
1412                                bch2_dio_read_bioset);
1413
1414         bio->bi_end_io = bch2_direct_IO_read_endio;
1415
1416         dio = container_of(bio, struct dio_read, rbio.bio);
1417         closure_init(&dio->cl, NULL);
1418
1419         /*
1420          * this is a _really_ horrible hack just to avoid an atomic sub at the
1421          * end:
1422          */
1423         if (!sync) {
1424                 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1425                 atomic_set(&dio->cl.remaining,
1426                            CLOSURE_REMAINING_INITIALIZER -
1427                            CLOSURE_RUNNING +
1428                            CLOSURE_DESTRUCTOR);
1429         } else {
1430                 atomic_set(&dio->cl.remaining,
1431                            CLOSURE_REMAINING_INITIALIZER + 1);
1432         }
1433
1434         dio->req        = req;
1435         dio->ret        = ret;
1436
1437         goto start;
1438         while (iter->count) {
1439                 bio = bio_alloc_bioset(GFP_KERNEL,
1440                                        iov_iter_npages(iter, BIO_MAX_PAGES),
1441                                        &c->bio_read);
1442                 bio->bi_end_io          = bch2_direct_IO_read_split_endio;
1443 start:
1444                 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1445                 bio->bi_iter.bi_sector  = offset >> 9;
1446                 bio->bi_private         = dio;
1447
1448                 ret = bio_iov_iter_get_pages(bio, iter);
1449                 if (ret < 0) {
1450                         /* XXX: fault inject this path */
1451                         bio->bi_error = ret;
1452                         bio_endio(bio);
1453                         break;
1454                 }
1455
1456                 offset += bio->bi_iter.bi_size;
1457                 bio_set_pages_dirty(bio);
1458
1459                 if (iter->count)
1460                         closure_get(&dio->cl);
1461
1462                 bch2_read(c, to_rbio(bio), inode->v.i_ino);
1463         }
1464
1465         if (sync) {
1466                 closure_sync(&dio->cl);
1467                 closure_debug_destroy(&dio->cl);
1468                 ret = dio->ret;
1469                 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1470                 return ret;
1471         } else {
1472                 return -EIOCBQUEUED;
1473         }
1474 }
1475
1476 static long __bch2_dio_write_complete(struct dio_write *dio)
1477 {
1478         struct file *file = dio->req->ki_filp;
1479         struct address_space *mapping = file->f_mapping;
1480         struct bch_inode_info *inode = file_bch_inode(file);
1481         long ret = dio->error ?: dio->written;
1482
1483         bch2_disk_reservation_put(dio->c, &dio->res);
1484
1485         __pagecache_block_put(&mapping->add_lock);
1486         inode_dio_end(&inode->v);
1487
1488         if (dio->iovec && dio->iovec != dio->inline_vecs)
1489                 kfree(dio->iovec);
1490
1491         bio_put(&dio->iop.op.wbio.bio);
1492         return ret;
1493 }
1494
1495 static void bch2_dio_write_complete(struct closure *cl)
1496 {
1497         struct dio_write *dio = container_of(cl, struct dio_write, cl);
1498         struct kiocb *req = dio->req;
1499
1500         req->ki_complete(req, __bch2_dio_write_complete(dio), 0);
1501 }
1502
1503 static void bch2_dio_write_done(struct dio_write *dio)
1504 {
1505         struct bio_vec *bv;
1506         int i;
1507
1508         dio->written += dio->iop.op.written << 9;
1509
1510         if (dio->iop.op.error)
1511                 dio->error = dio->iop.op.error;
1512
1513         bio_for_each_segment_all(bv, &dio->iop.op.wbio.bio, i)
1514                 put_page(bv->bv_page);
1515
1516         if (dio->iter.count)
1517                 bio_reset(&dio->iop.op.wbio.bio);
1518 }
1519
1520 static void bch2_do_direct_IO_write(struct dio_write *dio)
1521 {
1522         struct file *file = dio->req->ki_filp;
1523         struct bch_inode_info *inode = file_bch_inode(file);
1524         struct bio *bio = &dio->iop.op.wbio.bio;
1525         unsigned flags = 0;
1526         int ret;
1527
1528         if ((dio->req->ki_flags & IOCB_DSYNC) &&
1529             !dio->c->opts.journal_flush_disabled)
1530                 flags |= BCH_WRITE_FLUSH;
1531
1532         ret = bio_iov_iter_get_pages(bio, &dio->iter);
1533         if (ret < 0) {
1534                 /*
1535                  * these didn't get initialized, but bch2_dio_write_done() will
1536                  * look at them:
1537                  */
1538                 dio->iop.op.error = 0;
1539                 dio->iop.op.written = 0;
1540                 dio->error = ret;
1541                 return;
1542         }
1543
1544         dio->iop.inode          = inode;
1545         dio->iop.sectors_added  = 0;
1546         dio->iop.is_dio         = true;
1547         dio->iop.new_i_size     = U64_MAX;
1548         bch2_write_op_init(&dio->iop.op, dio->c, dio->res,
1549                            dio->c->fastest_devs,
1550                            (unsigned long) dio->task,
1551                            POS(inode->v.i_ino, (dio->offset + dio->written) >> 9),
1552                            &inode->ei_journal_seq,
1553                            flags|BCH_WRITE_THROTTLE);
1554         dio->iop.op.index_update_fn = bchfs_write_index_update;
1555
1556         dio->res.sectors -= bio_sectors(bio);
1557         dio->iop.op.res.sectors = bio_sectors(bio);
1558
1559         task_io_account_write(bio->bi_iter.bi_size);
1560
1561         closure_call(&dio->iop.op.cl, bch2_write, NULL, &dio->cl);
1562 }
1563
1564 static void bch2_dio_write_loop_async(struct closure *cl)
1565 {
1566         struct dio_write *dio =
1567                 container_of(cl, struct dio_write, cl);
1568         struct address_space *mapping = dio->req->ki_filp->f_mapping;
1569
1570         bch2_dio_write_done(dio);
1571
1572         if (dio->iter.count && !dio->error) {
1573                 use_mm(dio->task->mm);
1574                 pagecache_block_get(&mapping->add_lock);
1575
1576                 bch2_do_direct_IO_write(dio);
1577
1578                 pagecache_block_put(&mapping->add_lock);
1579                 unuse_mm(dio->task->mm);
1580
1581                 continue_at(&dio->cl, bch2_dio_write_loop_async, NULL);
1582         } else {
1583 #if 0
1584                 closure_return_with_destructor(cl, bch2_dio_write_complete);
1585 #else
1586                 closure_debug_destroy(cl);
1587                 bch2_dio_write_complete(cl);
1588 #endif
1589         }
1590 }
1591
1592 static int bch2_direct_IO_write(struct bch_fs *c,
1593                                 struct kiocb *req, struct file *file,
1594                                 struct bch_inode_info *inode,
1595                                 struct iov_iter *iter, loff_t offset)
1596 {
1597         struct address_space *mapping = file->f_mapping;
1598         struct dio_write *dio;
1599         struct bio *bio;
1600         ssize_t ret;
1601         bool sync = is_sync_kiocb(req);
1602
1603         lockdep_assert_held(&inode->v.i_rwsem);
1604
1605         if (unlikely(!iter->count))
1606                 return 0;
1607
1608         if (unlikely((offset|iter->count) & (block_bytes(c) - 1)))
1609                 return -EINVAL;
1610
1611         bio = bio_alloc_bioset(GFP_KERNEL,
1612                                iov_iter_npages(iter, BIO_MAX_PAGES),
1613                                bch2_dio_write_bioset);
1614         dio = container_of(bio, struct dio_write, iop.op.wbio.bio);
1615         dio->req        = req;
1616         dio->c          = c;
1617         dio->written    = 0;
1618         dio->error      = 0;
1619         dio->offset     = offset;
1620         dio->iovec      = NULL;
1621         dio->iter       = *iter;
1622         dio->task       = current;
1623         closure_init(&dio->cl, NULL);
1624
1625         if (offset + iter->count > inode->v.i_size)
1626                 sync = true;
1627
1628         /*
1629          * XXX: we shouldn't return -ENOSPC if we're overwriting existing data -
1630          * if getting a reservation fails we should check if we are doing an
1631          * overwrite.
1632          *
1633          * Have to then guard against racing with truncate (deleting data that
1634          * we would have been overwriting)
1635          */
1636         ret = bch2_disk_reservation_get(c, &dio->res, iter->count >> 9, 0);
1637         if (unlikely(ret)) {
1638                 closure_debug_destroy(&dio->cl);
1639                 bio_put(bio);
1640                 return ret;
1641         }
1642
1643         inode_dio_begin(&inode->v);
1644         __pagecache_block_get(&mapping->add_lock);
1645
1646         if (sync) {
1647                 do {
1648                         bch2_do_direct_IO_write(dio);
1649
1650                         closure_sync(&dio->cl);
1651                         bch2_dio_write_done(dio);
1652                 } while (dio->iter.count && !dio->error);
1653
1654                 closure_debug_destroy(&dio->cl);
1655                 return __bch2_dio_write_complete(dio);
1656         } else {
1657                 bch2_do_direct_IO_write(dio);
1658
1659                 if (dio->iter.count && !dio->error) {
1660                         if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1661                                 dio->iovec = kmalloc(dio->iter.nr_segs *
1662                                                      sizeof(struct iovec),
1663                                                      GFP_KERNEL);
1664                                 if (!dio->iovec)
1665                                         dio->error = -ENOMEM;
1666                         } else {
1667                                 dio->iovec = dio->inline_vecs;
1668                         }
1669
1670                         memcpy(dio->iovec,
1671                                dio->iter.iov,
1672                                dio->iter.nr_segs * sizeof(struct iovec));
1673                         dio->iter.iov = dio->iovec;
1674                 }
1675
1676                 continue_at_noreturn(&dio->cl, bch2_dio_write_loop_async, NULL);
1677                 return -EIOCBQUEUED;
1678         }
1679 }
1680
1681 ssize_t bch2_direct_IO(struct kiocb *req, struct iov_iter *iter)
1682 {
1683         struct file *file = req->ki_filp;
1684         struct bch_inode_info *inode = file_bch_inode(file);
1685         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1686         struct blk_plug plug;
1687         ssize_t ret;
1688
1689         blk_start_plug(&plug);
1690         ret = ((iov_iter_rw(iter) == WRITE)
1691                 ? bch2_direct_IO_write
1692                 : bch2_direct_IO_read)(c, req, file, inode, iter, req->ki_pos);
1693         blk_finish_plug(&plug);
1694
1695         return ret;
1696 }
1697
1698 static ssize_t
1699 bch2_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1700 {
1701         struct file *file = iocb->ki_filp;
1702         struct bch_inode_info *inode = file_bch_inode(file);
1703         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1704         struct address_space *mapping = file->f_mapping;
1705         loff_t pos = iocb->ki_pos;
1706         ssize_t ret;
1707
1708         pagecache_block_get(&mapping->add_lock);
1709
1710         /* Write and invalidate pagecache range that we're writing to: */
1711         ret = write_invalidate_inode_pages_range(file->f_mapping, pos,
1712                                         pos + iov_iter_count(iter) - 1);
1713         if (unlikely(ret))
1714                 goto err;
1715
1716         ret = bch2_direct_IO_write(c, iocb, file, inode, iter, pos);
1717 err:
1718         pagecache_block_put(&mapping->add_lock);
1719
1720         return ret;
1721 }
1722
1723 static ssize_t __bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1724 {
1725         struct file *file = iocb->ki_filp;
1726         struct bch_inode_info *inode = file_bch_inode(file);
1727         ssize_t ret;
1728
1729         /* We can write back this queue in page reclaim */
1730         current->backing_dev_info = inode_to_bdi(&inode->v);
1731         ret = file_remove_privs(file);
1732         if (ret)
1733                 goto out;
1734
1735         ret = file_update_time(file);
1736         if (ret)
1737                 goto out;
1738
1739         ret = iocb->ki_flags & IOCB_DIRECT
1740                 ? bch2_direct_write(iocb, from)
1741                 : generic_perform_write(file, from, iocb->ki_pos);
1742
1743         if (likely(ret > 0))
1744                 iocb->ki_pos += ret;
1745 out:
1746         current->backing_dev_info = NULL;
1747         return ret;
1748 }
1749
1750 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1751 {
1752         struct bch_inode_info *inode = file_bch_inode(iocb->ki_filp);
1753         bool direct = iocb->ki_flags & IOCB_DIRECT;
1754         ssize_t ret;
1755
1756         inode_lock(&inode->v);
1757         ret = generic_write_checks(iocb, from);
1758         if (ret > 0)
1759                 ret = __bch2_write_iter(iocb, from);
1760         inode_unlock(&inode->v);
1761
1762         if (ret > 0 && !direct)
1763                 ret = generic_write_sync(iocb, ret);
1764
1765         return ret;
1766 }
1767
1768 int bch2_page_mkwrite(struct vm_fault *vmf)
1769 {
1770         struct page *page = vmf->page;
1771         struct file *file = vmf->vma->vm_file;
1772         struct bch_inode_info *inode = file_bch_inode(file);
1773         struct address_space *mapping = inode->v.i_mapping;
1774         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1775         int ret = VM_FAULT_LOCKED;
1776
1777         sb_start_pagefault(inode->v.i_sb);
1778         file_update_time(file);
1779
1780         /*
1781          * Not strictly necessary, but helps avoid dio writes livelocking in
1782          * write_invalidate_inode_pages_range() - can drop this if/when we get
1783          * a write_invalidate_inode_pages_range() that works without dropping
1784          * page lock before invalidating page
1785          */
1786         if (current->pagecache_lock != &mapping->add_lock)
1787                 pagecache_add_get(&mapping->add_lock);
1788
1789         lock_page(page);
1790         if (page->mapping != mapping ||
1791             page_offset(page) > i_size_read(&inode->v)) {
1792                 unlock_page(page);
1793                 ret = VM_FAULT_NOPAGE;
1794                 goto out;
1795         }
1796
1797         if (bch2_get_page_reservation(c, page, true)) {
1798                 unlock_page(page);
1799                 ret = VM_FAULT_SIGBUS;
1800                 goto out;
1801         }
1802
1803         if (!PageDirty(page))
1804                 set_page_dirty(page);
1805         wait_for_stable_page(page);
1806 out:
1807         if (current->pagecache_lock != &mapping->add_lock)
1808                 pagecache_add_put(&mapping->add_lock);
1809         sb_end_pagefault(inode->v.i_sb);
1810         return ret;
1811 }
1812
1813 void bch2_invalidatepage(struct page *page, unsigned int offset,
1814                          unsigned int length)
1815 {
1816         EBUG_ON(!PageLocked(page));
1817         EBUG_ON(PageWriteback(page));
1818
1819         if (offset || length < PAGE_SIZE)
1820                 return;
1821
1822         bch2_clear_page_bits(page);
1823 }
1824
1825 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
1826 {
1827         EBUG_ON(!PageLocked(page));
1828         EBUG_ON(PageWriteback(page));
1829
1830         if (PageDirty(page))
1831                 return 0;
1832
1833         bch2_clear_page_bits(page);
1834         return 1;
1835 }
1836
1837 #ifdef CONFIG_MIGRATION
1838 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
1839                       struct page *page, enum migrate_mode mode)
1840 {
1841         int ret;
1842
1843         ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
1844         if (ret != MIGRATEPAGE_SUCCESS)
1845                 return ret;
1846
1847         if (PagePrivate(page)) {
1848                 *page_state(newpage) = *page_state(page);
1849                 ClearPagePrivate(page);
1850         }
1851
1852         migrate_page_copy(newpage, page);
1853         return MIGRATEPAGE_SUCCESS;
1854 }
1855 #endif
1856
1857 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1858 {
1859         struct bch_inode_info *inode = file_bch_inode(file);
1860         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1861         int ret;
1862
1863         ret = filemap_write_and_wait_range(inode->v.i_mapping, start, end);
1864         if (ret)
1865                 return ret;
1866
1867         if (c->opts.journal_flush_disabled)
1868                 return 0;
1869
1870         return bch2_journal_flush_seq(&c->journal, inode->ei_journal_seq);
1871 }
1872
1873 static int __bch2_truncate_page(struct address_space *mapping,
1874                                 pgoff_t index, loff_t start, loff_t end)
1875 {
1876         struct bch_inode_info *inode = to_bch_ei(mapping->host);
1877         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1878         unsigned start_offset = start & (PAGE_SIZE - 1);
1879         unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
1880         struct page *page;
1881         int ret = 0;
1882
1883         /* Page boundary? Nothing to do */
1884         if (!((index == start >> PAGE_SHIFT && start_offset) ||
1885               (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
1886                 return 0;
1887
1888         /* Above i_size? */
1889         if (index << PAGE_SHIFT >= inode->v.i_size)
1890                 return 0;
1891
1892         page = find_lock_page(mapping, index);
1893         if (!page) {
1894                 struct btree_iter iter;
1895                 struct bkey_s_c k = bkey_s_c_null;
1896
1897                 /*
1898                  * XXX: we're doing two index lookups when we end up reading the
1899                  * page
1900                  */
1901                 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
1902                                    POS(inode->v.i_ino,
1903                                        index << PAGE_SECTOR_SHIFT), 0, k) {
1904                         if (bkey_cmp(bkey_start_pos(k.k),
1905                                      POS(inode->v.i_ino,
1906                                          (index + 1) << PAGE_SECTOR_SHIFT)) >= 0)
1907                                 break;
1908
1909                         if (k.k->type != KEY_TYPE_DISCARD &&
1910                             k.k->type != BCH_RESERVATION) {
1911                                 bch2_btree_iter_unlock(&iter);
1912                                 goto create;
1913                         }
1914                 }
1915                 bch2_btree_iter_unlock(&iter);
1916                 return 0;
1917 create:
1918                 page = find_or_create_page(mapping, index, GFP_KERNEL);
1919                 if (unlikely(!page)) {
1920                         ret = -ENOMEM;
1921                         goto out;
1922                 }
1923         }
1924
1925         if (!PageUptodate(page)) {
1926                 ret = bch2_read_single_page(page, mapping);
1927                 if (ret)
1928                         goto unlock;
1929         }
1930
1931         /*
1932          * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
1933          *
1934          * XXX: because we aren't currently tracking whether the page has actual
1935          * data in it (vs. just 0s, or only partially written) this wrong. ick.
1936          */
1937         ret = bch2_get_page_reservation(c, page, false);
1938         BUG_ON(ret);
1939
1940         if (index == start >> PAGE_SHIFT &&
1941             index == end >> PAGE_SHIFT)
1942                 zero_user_segment(page, start_offset, end_offset);
1943         else if (index == start >> PAGE_SHIFT)
1944                 zero_user_segment(page, start_offset, PAGE_SIZE);
1945         else if (index == end >> PAGE_SHIFT)
1946                 zero_user_segment(page, 0, end_offset);
1947
1948         if (!PageDirty(page))
1949                 set_page_dirty(page);
1950 unlock:
1951         unlock_page(page);
1952         put_page(page);
1953 out:
1954         return ret;
1955 }
1956
1957 static int bch2_truncate_page(struct address_space *mapping, loff_t from)
1958 {
1959         return __bch2_truncate_page(mapping, from >> PAGE_SHIFT,
1960                                    from, from + PAGE_SIZE);
1961 }
1962
1963 int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
1964 {
1965         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1966         struct address_space *mapping = inode->v.i_mapping;
1967         bool shrink = iattr->ia_size <= inode->v.i_size;
1968         int ret = 0;
1969
1970         inode_dio_wait(&inode->v);
1971         pagecache_block_get(&mapping->add_lock);
1972
1973         truncate_setsize(&inode->v, iattr->ia_size);
1974
1975         /* sync appends.. */
1976         /* XXX what protects inode->i_size? */
1977         if (iattr->ia_size > inode->ei_size)
1978                 ret = filemap_write_and_wait_range(mapping,
1979                                                    inode->ei_size, S64_MAX);
1980         if (ret)
1981                 goto err_put_pagecache;
1982
1983         mutex_lock(&inode->ei_update_lock);
1984         i_size_dirty_get(inode);
1985         ret = bch2_write_inode_size(c, inode, inode->v.i_size);
1986         mutex_unlock(&inode->ei_update_lock);
1987
1988         if (unlikely(ret))
1989                 goto err;
1990
1991         /*
1992          * There might be persistent reservations (from fallocate())
1993          * above i_size, which bch2_inode_truncate() will discard - we're
1994          * only supposed to discard them if we're doing a real truncate
1995          * here (new i_size < current i_size):
1996          */
1997         if (shrink) {
1998                 struct i_sectors_hook i_sectors_hook;
1999                 int ret;
2000
2001                 ret = i_sectors_dirty_get(c, inode, &i_sectors_hook);
2002                 if (unlikely(ret))
2003                         goto err;
2004
2005                 ret = bch2_truncate_page(inode->v.i_mapping, iattr->ia_size);
2006                 if (unlikely(ret)) {
2007                         i_sectors_dirty_put(c, inode, &i_sectors_hook);
2008                         goto err;
2009                 }
2010
2011                 ret = bch2_inode_truncate(c, inode->v.i_ino,
2012                                          round_up(iattr->ia_size, PAGE_SIZE) >> 9,
2013                                          &i_sectors_hook.hook,
2014                                          &inode->ei_journal_seq);
2015
2016                 i_sectors_dirty_put(c, inode, &i_sectors_hook);
2017
2018                 if (unlikely(ret))
2019                         goto err;
2020         }
2021
2022         mutex_lock(&inode->ei_update_lock);
2023         setattr_copy(&inode->v, iattr);
2024         inode->v.i_mtime = inode->v.i_ctime = current_fs_time(inode->v.i_sb);
2025 out:
2026         /* clear I_SIZE_DIRTY: */
2027         i_size_dirty_put(inode);
2028         ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2029         mutex_unlock(&inode->ei_update_lock);
2030
2031 err_put_pagecache:
2032         pagecache_block_put(&mapping->add_lock);
2033         return ret;
2034 err:
2035         mutex_lock(&inode->ei_update_lock);
2036         goto out;
2037 }
2038
2039 static long bch2_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2040 {
2041         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2042         struct address_space *mapping = inode->v.i_mapping;
2043         u64 ino = inode->v.i_ino;
2044         u64 discard_start = round_up(offset, PAGE_SIZE) >> 9;
2045         u64 discard_end = round_down(offset + len, PAGE_SIZE) >> 9;
2046         int ret = 0;
2047
2048         inode_lock(&inode->v);
2049         inode_dio_wait(&inode->v);
2050         pagecache_block_get(&mapping->add_lock);
2051
2052         ret = __bch2_truncate_page(mapping,
2053                                    offset >> PAGE_SHIFT,
2054                                    offset, offset + len);
2055         if (unlikely(ret))
2056                 goto out;
2057
2058         if (offset >> PAGE_SHIFT !=
2059             (offset + len) >> PAGE_SHIFT) {
2060                 ret = __bch2_truncate_page(mapping,
2061                                            (offset + len) >> PAGE_SHIFT,
2062                                            offset, offset + len);
2063                 if (unlikely(ret))
2064                         goto out;
2065         }
2066
2067         truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2068
2069         if (discard_start < discard_end) {
2070                 struct disk_reservation disk_res;
2071                 struct i_sectors_hook i_sectors_hook;
2072                 int ret;
2073
2074                 BUG_ON(bch2_disk_reservation_get(c, &disk_res, 0, 0));
2075
2076                 ret = i_sectors_dirty_get(c, inode, &i_sectors_hook);
2077                 if (unlikely(ret))
2078                         goto out;
2079
2080                 ret = bch2_btree_delete_range(c,
2081                                 BTREE_ID_EXTENTS,
2082                                 POS(ino, discard_start),
2083                                 POS(ino, discard_end),
2084                                 ZERO_VERSION,
2085                                 &disk_res,
2086                                 &i_sectors_hook.hook,
2087                                 &inode->ei_journal_seq);
2088
2089                 i_sectors_dirty_put(c, inode, &i_sectors_hook);
2090                 bch2_disk_reservation_put(c, &disk_res);
2091         }
2092 out:
2093         pagecache_block_put(&mapping->add_lock);
2094         inode_unlock(&inode->v);
2095
2096         return ret;
2097 }
2098
2099 static long bch2_fcollapse(struct bch_inode_info *inode,
2100                            loff_t offset, loff_t len)
2101 {
2102         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2103         struct address_space *mapping = inode->v.i_mapping;
2104         struct btree_iter src;
2105         struct btree_iter dst;
2106         BKEY_PADDED(k) copy;
2107         struct bkey_s_c k;
2108         struct i_sectors_hook i_sectors_hook;
2109         loff_t new_size;
2110         int ret;
2111
2112         if ((offset | len) & (PAGE_SIZE - 1))
2113                 return -EINVAL;
2114
2115         bch2_btree_iter_init(&dst, c, BTREE_ID_EXTENTS,
2116                              POS(inode->v.i_ino, offset >> 9),
2117                              BTREE_ITER_INTENT);
2118         /* position will be set from dst iter's position: */
2119         bch2_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN, 0);
2120         bch2_btree_iter_link(&src, &dst);
2121
2122         /*
2123          * We need i_mutex to keep the page cache consistent with the extents
2124          * btree, and the btree consistent with i_size - we don't need outside
2125          * locking for the extents btree itself, because we're using linked
2126          * iterators
2127          */
2128         inode_lock(&inode->v);
2129         inode_dio_wait(&inode->v);
2130         pagecache_block_get(&mapping->add_lock);
2131
2132         ret = -EINVAL;
2133         if (offset + len >= inode->v.i_size)
2134                 goto err;
2135
2136         if (inode->v.i_size < len)
2137                 goto err;
2138
2139         new_size = inode->v.i_size - len;
2140
2141         ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2142         if (ret)
2143                 goto err;
2144
2145         ret = i_sectors_dirty_get(c, inode, &i_sectors_hook);
2146         if (ret)
2147                 goto err;
2148
2149         while (bkey_cmp(dst.pos,
2150                         POS(inode->v.i_ino,
2151                             round_up(new_size, PAGE_SIZE) >> 9)) < 0) {
2152                 struct disk_reservation disk_res;
2153
2154                 bch2_btree_iter_set_pos(&src,
2155                         POS(dst.pos.inode, dst.pos.offset + (len >> 9)));
2156
2157                 ret = bch2_btree_iter_traverse(&dst);
2158                 if (ret)
2159                         goto btree_iter_err;
2160
2161                 k = bch2_btree_iter_peek_with_holes(&src);
2162                 if ((ret = btree_iter_err(k)))
2163                         goto btree_iter_err;
2164
2165                 bkey_reassemble(&copy.k, k);
2166
2167                 if (bkey_deleted(&copy.k.k))
2168                         copy.k.k.type = KEY_TYPE_DISCARD;
2169
2170                 bch2_cut_front(src.pos, &copy.k);
2171                 copy.k.k.p.offset -= len >> 9;
2172
2173                 BUG_ON(bkey_cmp(dst.pos, bkey_start_pos(&copy.k.k)));
2174
2175                 ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size,
2176                                                BCH_DISK_RESERVATION_NOFAIL);
2177                 BUG_ON(ret);
2178
2179                 ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2180                                            &inode->ei_journal_seq,
2181                                            BTREE_INSERT_ATOMIC|
2182                                            BTREE_INSERT_NOFAIL,
2183                                            BTREE_INSERT_ENTRY(&dst, &copy.k));
2184                 bch2_disk_reservation_put(c, &disk_res);
2185 btree_iter_err:
2186                 if (ret < 0 && ret != -EINTR)
2187                         goto err_unwind;
2188
2189                 bch2_btree_iter_cond_resched(&src);
2190         }
2191
2192         bch2_btree_iter_unlock(&src);
2193         bch2_btree_iter_unlock(&dst);
2194
2195         ret = bch2_inode_truncate(c, inode->v.i_ino,
2196                                  round_up(new_size, PAGE_SIZE) >> 9,
2197                                  &i_sectors_hook.hook,
2198                                  &inode->ei_journal_seq);
2199         if (ret)
2200                 goto err_unwind;
2201
2202         i_sectors_dirty_put(c, inode, &i_sectors_hook);
2203
2204         mutex_lock(&inode->ei_update_lock);
2205         i_size_write(&inode->v, new_size);
2206         ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2207         mutex_unlock(&inode->ei_update_lock);
2208
2209         pagecache_block_put(&mapping->add_lock);
2210         inode_unlock(&inode->v);
2211
2212         return ret;
2213 err_unwind:
2214         /*
2215          * XXX: we've left data with multiple pointers... which isn't a _super_
2216          * serious problem...
2217          */
2218         i_sectors_dirty_put(c, inode, &i_sectors_hook);
2219 err:
2220         bch2_btree_iter_unlock(&src);
2221         bch2_btree_iter_unlock(&dst);
2222         pagecache_block_put(&mapping->add_lock);
2223         inode_unlock(&inode->v);
2224         return ret;
2225 }
2226
2227 static long bch2_fallocate(struct bch_inode_info *inode, int mode,
2228                            loff_t offset, loff_t len)
2229 {
2230         struct address_space *mapping = inode->v.i_mapping;
2231         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2232         struct i_sectors_hook i_sectors_hook;
2233         struct btree_iter iter;
2234         struct bpos end;
2235         loff_t block_start, block_end;
2236         loff_t new_size = offset + len;
2237         unsigned sectors;
2238         unsigned replicas = READ_ONCE(c->opts.data_replicas);
2239         int ret;
2240
2241         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
2242                              BTREE_ITER_INTENT);
2243
2244         inode_lock(&inode->v);
2245         inode_dio_wait(&inode->v);
2246         pagecache_block_get(&mapping->add_lock);
2247
2248         if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2249             new_size > inode->v.i_size) {
2250                 ret = inode_newsize_ok(&inode->v, new_size);
2251                 if (ret)
2252                         goto err;
2253         }
2254
2255         if (mode & FALLOC_FL_ZERO_RANGE) {
2256                 ret = __bch2_truncate_page(mapping,
2257                                            offset >> PAGE_SHIFT,
2258                                            offset, offset + len);
2259
2260                 if (!ret &&
2261                     offset >> PAGE_SHIFT !=
2262                     (offset + len) >> PAGE_SHIFT)
2263                         ret = __bch2_truncate_page(mapping,
2264                                                    (offset + len) >> PAGE_SHIFT,
2265                                                    offset, offset + len);
2266
2267                 if (unlikely(ret))
2268                         goto err;
2269
2270                 truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2271
2272                 block_start     = round_up(offset, PAGE_SIZE);
2273                 block_end       = round_down(offset + len, PAGE_SIZE);
2274         } else {
2275                 block_start     = round_down(offset, PAGE_SIZE);
2276                 block_end       = round_up(offset + len, PAGE_SIZE);
2277         }
2278
2279         bch2_btree_iter_set_pos(&iter, POS(inode->v.i_ino, block_start >> 9));
2280         end = POS(inode->v.i_ino, block_end >> 9);
2281
2282         ret = i_sectors_dirty_get(c, inode, &i_sectors_hook);
2283         if (unlikely(ret))
2284                 goto err;
2285
2286         while (bkey_cmp(iter.pos, end) < 0) {
2287                 struct disk_reservation disk_res = { 0 };
2288                 struct bkey_i_reservation reservation;
2289                 struct bkey_s_c k;
2290
2291                 k = bch2_btree_iter_peek_with_holes(&iter);
2292                 if ((ret = btree_iter_err(k)))
2293                         goto btree_iter_err;
2294
2295                 /* already reserved */
2296                 if (k.k->type == BCH_RESERVATION &&
2297                     bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2298                         bch2_btree_iter_advance_pos(&iter);
2299                         continue;
2300                 }
2301
2302                 if (bkey_extent_is_data(k.k)) {
2303                         if (!(mode & FALLOC_FL_ZERO_RANGE)) {
2304                                 bch2_btree_iter_advance_pos(&iter);
2305                                 continue;
2306                         }
2307                 }
2308
2309                 bkey_reservation_init(&reservation.k_i);
2310                 reservation.k.type      = BCH_RESERVATION;
2311                 reservation.k.p         = k.k->p;
2312                 reservation.k.size      = k.k->size;
2313
2314                 bch2_cut_front(iter.pos, &reservation.k_i);
2315                 bch2_cut_back(end, &reservation.k);
2316
2317                 sectors = reservation.k.size;
2318                 reservation.v.nr_replicas = bch2_extent_nr_dirty_ptrs(k);
2319
2320                 if (reservation.v.nr_replicas < replicas ||
2321                     bkey_extent_is_compressed(k)) {
2322                         ret = bch2_disk_reservation_get(c, &disk_res,
2323                                                        sectors, 0);
2324                         if (ret)
2325                                 goto err_put_sectors_dirty;
2326
2327                         reservation.v.nr_replicas = disk_res.nr_replicas;
2328                 }
2329
2330                 ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2331                                           &inode->ei_journal_seq,
2332                                           BTREE_INSERT_ATOMIC|
2333                                           BTREE_INSERT_NOFAIL,
2334                                           BTREE_INSERT_ENTRY(&iter, &reservation.k_i));
2335                 bch2_disk_reservation_put(c, &disk_res);
2336 btree_iter_err:
2337                 if (ret < 0 && ret != -EINTR)
2338                         goto err_put_sectors_dirty;
2339
2340         }
2341         bch2_btree_iter_unlock(&iter);
2342
2343         i_sectors_dirty_put(c, inode, &i_sectors_hook);
2344
2345         if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2346             new_size > inode->v.i_size) {
2347                 i_size_write(&inode->v, new_size);
2348
2349                 mutex_lock(&inode->ei_update_lock);
2350                 ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2351                 mutex_unlock(&inode->ei_update_lock);
2352         }
2353
2354         /* blech */
2355         if ((mode & FALLOC_FL_KEEP_SIZE) &&
2356             (mode & FALLOC_FL_ZERO_RANGE) &&
2357             inode->ei_size != inode->v.i_size) {
2358                 /* sync appends.. */
2359                 ret = filemap_write_and_wait_range(mapping,
2360                                         inode->ei_size, S64_MAX);
2361                 if (ret)
2362                         goto err;
2363
2364                 if (inode->ei_size != inode->v.i_size) {
2365                         mutex_lock(&inode->ei_update_lock);
2366                         ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2367                         mutex_unlock(&inode->ei_update_lock);
2368                 }
2369         }
2370
2371         pagecache_block_put(&mapping->add_lock);
2372         inode_unlock(&inode->v);
2373
2374         return 0;
2375 err_put_sectors_dirty:
2376         i_sectors_dirty_put(c, inode, &i_sectors_hook);
2377 err:
2378         bch2_btree_iter_unlock(&iter);
2379         pagecache_block_put(&mapping->add_lock);
2380         inode_unlock(&inode->v);
2381         return ret;
2382 }
2383
2384 long bch2_fallocate_dispatch(struct file *file, int mode,
2385                              loff_t offset, loff_t len)
2386 {
2387         struct bch_inode_info *inode = file_bch_inode(file);
2388
2389         if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2390                 return bch2_fallocate(inode, mode, offset, len);
2391
2392         if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2393                 return bch2_fpunch(inode, offset, len);
2394
2395         if (mode == FALLOC_FL_COLLAPSE_RANGE)
2396                 return bch2_fcollapse(inode, offset, len);
2397
2398         return -EOPNOTSUPP;
2399 }
2400
2401 static bool page_is_data(struct page *page)
2402 {
2403         /* XXX: should only have to check PageDirty */
2404         return PagePrivate(page) &&
2405                 (page_state(page)->sectors ||
2406                  page_state(page)->dirty_sectors);
2407 }
2408
2409 static loff_t bch2_next_pagecache_data(struct inode *vinode,
2410                                        loff_t start_offset,
2411                                        loff_t end_offset)
2412 {
2413         struct address_space *mapping = vinode->i_mapping;
2414         struct page *page;
2415         pgoff_t index;
2416
2417         for (index = start_offset >> PAGE_SHIFT;
2418              index < end_offset >> PAGE_SHIFT;
2419              index++) {
2420                 if (find_get_pages(mapping, index, 1, &page)) {
2421                         lock_page(page);
2422                         index = page->index;
2423
2424                         if (page_is_data(page))
2425                                 end_offset =
2426                                         min(end_offset,
2427                                         max(start_offset,
2428                                             ((loff_t) index) << PAGE_SHIFT));
2429                         unlock_page(page);
2430                         put_page(page);
2431                 } else {
2432                         break;
2433                 }
2434         }
2435
2436         return end_offset;
2437 }
2438
2439 static loff_t bch2_seek_data(struct file *file, u64 offset)
2440 {
2441         struct bch_inode_info *inode = file_bch_inode(file);
2442         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2443         struct btree_iter iter;
2444         struct bkey_s_c k;
2445         u64 isize, next_data = MAX_LFS_FILESIZE;
2446         int ret;
2447
2448         isize = i_size_read(&inode->v);
2449         if (offset >= isize)
2450                 return -ENXIO;
2451
2452         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2453                            POS(inode->v.i_ino, offset >> 9), 0, k) {
2454                 if (k.k->p.inode != inode->v.i_ino) {
2455                         break;
2456                 } else if (bkey_extent_is_data(k.k)) {
2457                         next_data = max(offset, bkey_start_offset(k.k) << 9);
2458                         break;
2459                 } else if (k.k->p.offset >> 9 > isize)
2460                         break;
2461         }
2462
2463         ret = bch2_btree_iter_unlock(&iter);
2464         if (ret)
2465                 return ret;
2466
2467         if (next_data > offset)
2468                 next_data = bch2_next_pagecache_data(&inode->v,
2469                                                      offset, next_data);
2470
2471         if (next_data > isize)
2472                 return -ENXIO;
2473
2474         return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
2475 }
2476
2477 static bool page_slot_is_data(struct address_space *mapping, pgoff_t index)
2478 {
2479         struct page *page;
2480         bool ret;
2481
2482         page = find_lock_entry(mapping, index);
2483         if (!page || radix_tree_exception(page))
2484                 return false;
2485
2486         ret = page_is_data(page);
2487         unlock_page(page);
2488
2489         return ret;
2490 }
2491
2492 static loff_t bch2_next_pagecache_hole(struct inode *vinode,
2493                                        loff_t start_offset,
2494                                        loff_t end_offset)
2495 {
2496         struct address_space *mapping = vinode->i_mapping;
2497         pgoff_t index;
2498
2499         for (index = start_offset >> PAGE_SHIFT;
2500              index < end_offset >> PAGE_SHIFT;
2501              index++)
2502                 if (!page_slot_is_data(mapping, index))
2503                         end_offset = max(start_offset,
2504                                          ((loff_t) index) << PAGE_SHIFT);
2505
2506         return end_offset;
2507 }
2508
2509 static loff_t bch2_seek_hole(struct file *file, u64 offset)
2510 {
2511         struct bch_inode_info *inode = file_bch_inode(file);
2512         struct bch_fs *c = inode->v.i_sb->s_fs_info;
2513         struct btree_iter iter;
2514         struct bkey_s_c k;
2515         u64 isize, next_hole = MAX_LFS_FILESIZE;
2516         int ret;
2517
2518         isize = i_size_read(&inode->v);
2519         if (offset >= isize)
2520                 return -ENXIO;
2521
2522         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2523                            POS(inode->v.i_ino, offset >> 9),
2524                            BTREE_ITER_WITH_HOLES, k) {
2525                 if (k.k->p.inode != inode->v.i_ino) {
2526                         next_hole = bch2_next_pagecache_hole(&inode->v,
2527                                         offset, MAX_LFS_FILESIZE);
2528                         break;
2529                 } else if (!bkey_extent_is_data(k.k)) {
2530                         next_hole = bch2_next_pagecache_hole(&inode->v,
2531                                         max(offset, bkey_start_offset(k.k) << 9),
2532                                         k.k->p.offset << 9);
2533
2534                         if (next_hole < k.k->p.offset << 9)
2535                                 break;
2536                 } else {
2537                         offset = max(offset, bkey_start_offset(k.k) << 9);
2538                 }
2539         }
2540
2541         ret = bch2_btree_iter_unlock(&iter);
2542         if (ret)
2543                 return ret;
2544
2545         if (next_hole > isize)
2546                 next_hole = isize;
2547
2548         return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
2549 }
2550
2551 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
2552 {
2553         switch (whence) {
2554         case SEEK_SET:
2555         case SEEK_CUR:
2556         case SEEK_END:
2557                 return generic_file_llseek(file, offset, whence);
2558         case SEEK_DATA:
2559                 return bch2_seek_data(file, offset);
2560         case SEEK_HOLE:
2561                 return bch2_seek_hole(file, offset);
2562         }
2563
2564         return -EINVAL;
2565 }
2566
2567 #endif /* NO_BCACHEFS_FS */