]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/fs-io-buffered.c
Update bcachefs sources to a1b6677dca57 bcachefs: Fix looping around bch2_propagate_k...
[bcachefs-tools-debian] / libbcachefs / fs-io-buffered.c
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3
4 #include "bcachefs.h"
5 #include "alloc_foreground.h"
6 #include "bkey_buf.h"
7 #include "fs-io.h"
8 #include "fs-io-buffered.h"
9 #include "fs-io-direct.h"
10 #include "fs-io-pagecache.h"
11 #include "io_read.h"
12 #include "io_write.h"
13
14 #include <linux/backing-dev.h>
15 #include <linux/pagemap.h>
16 #include <linux/writeback.h>
17
18 static inline bool bio_full(struct bio *bio, unsigned len)
19 {
20         if (bio->bi_vcnt >= bio->bi_max_vecs)
21                 return true;
22         if (bio->bi_iter.bi_size > UINT_MAX - len)
23                 return true;
24         return false;
25 }
26
27 /* readpage(s): */
28
29 static void bch2_readpages_end_io(struct bio *bio)
30 {
31         struct folio_iter fi;
32
33         bio_for_each_folio_all(fi, bio) {
34                 if (!bio->bi_status) {
35                         folio_mark_uptodate(fi.folio);
36                 } else {
37                         folio_clear_uptodate(fi.folio);
38                         folio_set_error(fi.folio);
39                 }
40                 folio_unlock(fi.folio);
41         }
42
43         bio_put(bio);
44 }
45
46 struct readpages_iter {
47         struct address_space    *mapping;
48         unsigned                idx;
49         folios                  folios;
50 };
51
52 static int readpages_iter_init(struct readpages_iter *iter,
53                                struct readahead_control *ractl)
54 {
55         struct folio **fi;
56         int ret;
57
58         memset(iter, 0, sizeof(*iter));
59
60         iter->mapping = ractl->mapping;
61
62         ret = bch2_filemap_get_contig_folios_d(iter->mapping,
63                                 ractl->_index << PAGE_SHIFT,
64                                 (ractl->_index + ractl->_nr_pages) << PAGE_SHIFT,
65                                 0, mapping_gfp_mask(iter->mapping),
66                                 &iter->folios);
67         if (ret)
68                 return ret;
69
70         darray_for_each(iter->folios, fi) {
71                 ractl->_nr_pages -= 1U << folio_order(*fi);
72                 __bch2_folio_create(*fi, __GFP_NOFAIL|GFP_KERNEL);
73                 folio_put(*fi);
74                 folio_put(*fi);
75         }
76
77         return 0;
78 }
79
80 static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
81 {
82         if (iter->idx >= iter->folios.nr)
83                 return NULL;
84         return iter->folios.data[iter->idx];
85 }
86
87 static inline void readpage_iter_advance(struct readpages_iter *iter)
88 {
89         iter->idx++;
90 }
91
92 static bool extent_partial_reads_expensive(struct bkey_s_c k)
93 {
94         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
95         struct bch_extent_crc_unpacked crc;
96         const union bch_extent_entry *i;
97
98         bkey_for_each_crc(k.k, ptrs, crc, i)
99                 if (crc.csum_type || crc.compression_type)
100                         return true;
101         return false;
102 }
103
104 static int readpage_bio_extend(struct btree_trans *trans,
105                                struct readpages_iter *iter,
106                                struct bio *bio,
107                                unsigned sectors_this_extent,
108                                bool get_more)
109 {
110         /* Don't hold btree locks while allocating memory: */
111         bch2_trans_unlock(trans);
112
113         while (bio_sectors(bio) < sectors_this_extent &&
114                bio->bi_vcnt < bio->bi_max_vecs) {
115                 struct folio *folio = readpage_iter_peek(iter);
116                 int ret;
117
118                 if (folio) {
119                         readpage_iter_advance(iter);
120                 } else {
121                         pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
122
123                         if (!get_more)
124                                 break;
125
126                         folio = xa_load(&iter->mapping->i_pages, folio_offset);
127                         if (folio && !xa_is_value(folio))
128                                 break;
129
130                         folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
131                         if (!folio)
132                                 break;
133
134                         if (!__bch2_folio_create(folio, GFP_KERNEL)) {
135                                 folio_put(folio);
136                                 break;
137                         }
138
139                         ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_KERNEL);
140                         if (ret) {
141                                 __bch2_folio_release(folio);
142                                 folio_put(folio);
143                                 break;
144                         }
145
146                         folio_put(folio);
147                 }
148
149                 BUG_ON(folio_sector(folio) != bio_end_sector(bio));
150
151                 BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
152         }
153
154         return bch2_trans_relock(trans);
155 }
156
157 static void bchfs_read(struct btree_trans *trans,
158                        struct bch_read_bio *rbio,
159                        subvol_inum inum,
160                        struct readpages_iter *readpages_iter)
161 {
162         struct bch_fs *c = trans->c;
163         struct btree_iter iter;
164         struct bkey_buf sk;
165         int flags = BCH_READ_RETRY_IF_STALE|
166                 BCH_READ_MAY_PROMOTE;
167         u32 snapshot;
168         int ret = 0;
169
170         rbio->c = c;
171         rbio->start_time = local_clock();
172         rbio->subvol = inum.subvol;
173
174         bch2_bkey_buf_init(&sk);
175 retry:
176         bch2_trans_begin(trans);
177         iter = (struct btree_iter) { NULL };
178
179         ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
180         if (ret)
181                 goto err;
182
183         bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
184                              SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
185                              BTREE_ITER_SLOTS);
186         while (1) {
187                 struct bkey_s_c k;
188                 unsigned bytes, sectors, offset_into_extent;
189                 enum btree_id data_btree = BTREE_ID_extents;
190
191                 /*
192                  * read_extent -> io_time_reset may cause a transaction restart
193                  * without returning an error, we need to check for that here:
194                  */
195                 ret = bch2_trans_relock(trans);
196                 if (ret)
197                         break;
198
199                 bch2_btree_iter_set_pos(&iter,
200                                 POS(inum.inum, rbio->bio.bi_iter.bi_sector));
201
202                 k = bch2_btree_iter_peek_slot(&iter);
203                 ret = bkey_err(k);
204                 if (ret)
205                         break;
206
207                 offset_into_extent = iter.pos.offset -
208                         bkey_start_offset(k.k);
209                 sectors = k.k->size - offset_into_extent;
210
211                 bch2_bkey_buf_reassemble(&sk, c, k);
212
213                 ret = bch2_read_indirect_extent(trans, &data_btree,
214                                         &offset_into_extent, &sk);
215                 if (ret)
216                         break;
217
218                 k = bkey_i_to_s_c(sk.k);
219
220                 sectors = min(sectors, k.k->size - offset_into_extent);
221
222                 if (readpages_iter) {
223                         ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
224                                                   extent_partial_reads_expensive(k));
225                         if (ret)
226                                 break;
227                 }
228
229                 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
230                 swap(rbio->bio.bi_iter.bi_size, bytes);
231
232                 if (rbio->bio.bi_iter.bi_size == bytes)
233                         flags |= BCH_READ_LAST_FRAGMENT;
234
235                 bch2_bio_page_state_set(&rbio->bio, k);
236
237                 bch2_read_extent(trans, rbio, iter.pos,
238                                  data_btree, k, offset_into_extent, flags);
239
240                 if (flags & BCH_READ_LAST_FRAGMENT)
241                         break;
242
243                 swap(rbio->bio.bi_iter.bi_size, bytes);
244                 bio_advance(&rbio->bio, bytes);
245
246                 ret = btree_trans_too_many_iters(trans);
247                 if (ret)
248                         break;
249         }
250 err:
251         bch2_trans_iter_exit(trans, &iter);
252
253         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
254                 goto retry;
255
256         if (ret) {
257                 bch_err_inum_offset_ratelimited(c,
258                                 iter.pos.inode,
259                                 iter.pos.offset << 9,
260                                 "read error %i from btree lookup", ret);
261                 rbio->bio.bi_status = BLK_STS_IOERR;
262                 bio_endio(&rbio->bio);
263         }
264
265         bch2_bkey_buf_exit(&sk, c);
266 }
267
268 void bch2_readahead(struct readahead_control *ractl)
269 {
270         struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
271         struct bch_fs *c = inode->v.i_sb->s_fs_info;
272         struct bch_io_opts opts;
273         struct btree_trans *trans = bch2_trans_get(c);
274         struct folio *folio;
275         struct readpages_iter readpages_iter;
276         int ret;
277
278         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
279
280         ret = readpages_iter_init(&readpages_iter, ractl);
281         BUG_ON(ret);
282
283         bch2_pagecache_add_get(inode);
284
285         while ((folio = readpage_iter_peek(&readpages_iter))) {
286                 unsigned n = min_t(unsigned,
287                                    readpages_iter.folios.nr -
288                                    readpages_iter.idx,
289                                    BIO_MAX_VECS);
290                 struct bch_read_bio *rbio =
291                         rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
292                                                    GFP_KERNEL, &c->bio_read),
293                                   opts);
294
295                 readpage_iter_advance(&readpages_iter);
296
297                 rbio->bio.bi_iter.bi_sector = folio_sector(folio);
298                 rbio->bio.bi_end_io = bch2_readpages_end_io;
299                 BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
300
301                 bchfs_read(trans, rbio, inode_inum(inode),
302                            &readpages_iter);
303                 bch2_trans_unlock(trans);
304         }
305
306         bch2_pagecache_add_put(inode);
307
308         bch2_trans_put(trans);
309         darray_exit(&readpages_iter.folios);
310 }
311
312 static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio,
313                              subvol_inum inum, struct folio *folio)
314 {
315         bch2_folio_create(folio, __GFP_NOFAIL);
316
317         rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
318         rbio->bio.bi_iter.bi_sector = folio_sector(folio);
319         BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
320
321         bch2_trans_run(c, (bchfs_read(trans, rbio, inum, NULL), 0));
322 }
323
324 static void bch2_read_single_folio_end_io(struct bio *bio)
325 {
326         complete(bio->bi_private);
327 }
328
329 int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
330 {
331         struct bch_inode_info *inode = to_bch_ei(mapping->host);
332         struct bch_fs *c = inode->v.i_sb->s_fs_info;
333         struct bch_read_bio *rbio;
334         struct bch_io_opts opts;
335         int ret;
336         DECLARE_COMPLETION_ONSTACK(done);
337
338         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
339
340         rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
341                          opts);
342         rbio->bio.bi_private = &done;
343         rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
344
345         __bchfs_readfolio(c, rbio, inode_inum(inode), folio);
346         wait_for_completion(&done);
347
348         ret = blk_status_to_errno(rbio->bio.bi_status);
349         bio_put(&rbio->bio);
350
351         if (ret < 0)
352                 return ret;
353
354         folio_mark_uptodate(folio);
355         return 0;
356 }
357
358 int bch2_read_folio(struct file *file, struct folio *folio)
359 {
360         int ret;
361
362         ret = bch2_read_single_folio(folio, folio->mapping);
363         folio_unlock(folio);
364         return bch2_err_class(ret);
365 }
366
367 /* writepages: */
368
369 struct bch_writepage_io {
370         struct bch_inode_info           *inode;
371
372         /* must be last: */
373         struct bch_write_op             op;
374 };
375
376 struct bch_writepage_state {
377         struct bch_writepage_io *io;
378         struct bch_io_opts      opts;
379         struct bch_folio_sector *tmp;
380         unsigned                tmp_sectors;
381 };
382
383 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
384                                                                   struct bch_inode_info *inode)
385 {
386         struct bch_writepage_state ret = { 0 };
387
388         bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
389         return ret;
390 }
391
392 static void bch2_writepage_io_done(struct bch_write_op *op)
393 {
394         struct bch_writepage_io *io =
395                 container_of(op, struct bch_writepage_io, op);
396         struct bch_fs *c = io->op.c;
397         struct bio *bio = &io->op.wbio.bio;
398         struct folio_iter fi;
399         unsigned i;
400
401         if (io->op.error) {
402                 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
403
404                 bio_for_each_folio_all(fi, bio) {
405                         struct bch_folio *s;
406
407                         folio_set_error(fi.folio);
408                         mapping_set_error(fi.folio->mapping, -EIO);
409
410                         s = __bch2_folio(fi.folio);
411                         spin_lock(&s->lock);
412                         for (i = 0; i < folio_sectors(fi.folio); i++)
413                                 s->s[i].nr_replicas = 0;
414                         spin_unlock(&s->lock);
415                 }
416         }
417
418         if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
419                 bio_for_each_folio_all(fi, bio) {
420                         struct bch_folio *s;
421
422                         s = __bch2_folio(fi.folio);
423                         spin_lock(&s->lock);
424                         for (i = 0; i < folio_sectors(fi.folio); i++)
425                                 s->s[i].nr_replicas = 0;
426                         spin_unlock(&s->lock);
427                 }
428         }
429
430         /*
431          * racing with fallocate can cause us to add fewer sectors than
432          * expected - but we shouldn't add more sectors than expected:
433          */
434         WARN_ON_ONCE(io->op.i_sectors_delta > 0);
435
436         /*
437          * (error (due to going RO) halfway through a page can screw that up
438          * slightly)
439          * XXX wtf?
440            BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
441          */
442
443         /*
444          * PageWriteback is effectively our ref on the inode - fixup i_blocks
445          * before calling end_page_writeback:
446          */
447         bch2_i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
448
449         bio_for_each_folio_all(fi, bio) {
450                 struct bch_folio *s = __bch2_folio(fi.folio);
451
452                 if (atomic_dec_and_test(&s->write_count))
453                         folio_end_writeback(fi.folio);
454         }
455
456         bio_put(&io->op.wbio.bio);
457 }
458
459 static void bch2_writepage_do_io(struct bch_writepage_state *w)
460 {
461         struct bch_writepage_io *io = w->io;
462
463         w->io = NULL;
464         closure_call(&io->op.cl, bch2_write, NULL, NULL);
465 }
466
467 /*
468  * Get a bch_writepage_io and add @page to it - appending to an existing one if
469  * possible, else allocating a new one:
470  */
471 static void bch2_writepage_io_alloc(struct bch_fs *c,
472                                     struct writeback_control *wbc,
473                                     struct bch_writepage_state *w,
474                                     struct bch_inode_info *inode,
475                                     u64 sector,
476                                     unsigned nr_replicas)
477 {
478         struct bch_write_op *op;
479
480         w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
481                                               REQ_OP_WRITE,
482                                               GFP_KERNEL,
483                                               &c->writepage_bioset),
484                              struct bch_writepage_io, op.wbio.bio);
485
486         w->io->inode            = inode;
487         op                      = &w->io->op;
488         bch2_write_op_init(op, c, w->opts);
489         op->target              = w->opts.foreground_target;
490         op->nr_replicas         = nr_replicas;
491         op->res.nr_replicas     = nr_replicas;
492         op->write_point         = writepoint_hashed(inode->ei_last_dirtied);
493         op->subvol              = inode->ei_subvol;
494         op->pos                 = POS(inode->v.i_ino, sector);
495         op->end_io              = bch2_writepage_io_done;
496         op->devs_need_flush     = &inode->ei_devs_need_flush;
497         op->wbio.bio.bi_iter.bi_sector = sector;
498         op->wbio.bio.bi_opf     = wbc_to_write_flags(wbc);
499 }
500
501 static int __bch2_writepage(struct folio *folio,
502                             struct writeback_control *wbc,
503                             void *data)
504 {
505         struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
506         struct bch_fs *c = inode->v.i_sb->s_fs_info;
507         struct bch_writepage_state *w = data;
508         struct bch_folio *s;
509         unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
510         loff_t i_size = i_size_read(&inode->v);
511         int ret;
512
513         EBUG_ON(!folio_test_uptodate(folio));
514
515         /* Is the folio fully inside i_size? */
516         if (folio_end_pos(folio) <= i_size)
517                 goto do_io;
518
519         /* Is the folio fully outside i_size? (truncate in progress) */
520         if (folio_pos(folio) >= i_size) {
521                 folio_unlock(folio);
522                 return 0;
523         }
524
525         /*
526          * The folio straddles i_size.  It must be zeroed out on each and every
527          * writepage invocation because it may be mmapped.  "A file is mapped
528          * in multiples of the folio size.  For a file that is not a multiple of
529          * the  folio size, the remaining memory is zeroed when mapped, and
530          * writes to that region are not written out to the file."
531          */
532         folio_zero_segment(folio,
533                            i_size - folio_pos(folio),
534                            folio_size(folio));
535 do_io:
536         f_sectors = folio_sectors(folio);
537         s = bch2_folio(folio);
538
539         if (f_sectors > w->tmp_sectors) {
540                 kfree(w->tmp);
541                 w->tmp = kcalloc(f_sectors, sizeof(struct bch_folio_sector), __GFP_NOFAIL);
542                 w->tmp_sectors = f_sectors;
543         }
544
545         /*
546          * Things get really hairy with errors during writeback:
547          */
548         ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
549         BUG_ON(ret);
550
551         /* Before unlocking the page, get copy of reservations: */
552         spin_lock(&s->lock);
553         memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
554
555         for (i = 0; i < f_sectors; i++) {
556                 if (s->s[i].state < SECTOR_dirty)
557                         continue;
558
559                 nr_replicas_this_write =
560                         min_t(unsigned, nr_replicas_this_write,
561                               s->s[i].nr_replicas +
562                               s->s[i].replicas_reserved);
563         }
564
565         for (i = 0; i < f_sectors; i++) {
566                 if (s->s[i].state < SECTOR_dirty)
567                         continue;
568
569                 s->s[i].nr_replicas = w->opts.compression
570                         ? 0 : nr_replicas_this_write;
571
572                 s->s[i].replicas_reserved = 0;
573                 bch2_folio_sector_set(folio, s, i, SECTOR_allocated);
574         }
575         spin_unlock(&s->lock);
576
577         BUG_ON(atomic_read(&s->write_count));
578         atomic_set(&s->write_count, 1);
579
580         BUG_ON(folio_test_writeback(folio));
581         folio_start_writeback(folio);
582
583         folio_unlock(folio);
584
585         offset = 0;
586         while (1) {
587                 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
588                 u64 sector;
589
590                 while (offset < f_sectors &&
591                        w->tmp[offset].state < SECTOR_dirty)
592                         offset++;
593
594                 if (offset == f_sectors)
595                         break;
596
597                 while (offset + sectors < f_sectors &&
598                        w->tmp[offset + sectors].state >= SECTOR_dirty) {
599                         reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
600                         dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
601                         sectors++;
602                 }
603                 BUG_ON(!sectors);
604
605                 sector = folio_sector(folio) + offset;
606
607                 if (w->io &&
608                     (w->io->op.res.nr_replicas != nr_replicas_this_write ||
609                      bio_full(&w->io->op.wbio.bio, sectors << 9) ||
610                      w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
611                      (BIO_MAX_VECS * PAGE_SIZE) ||
612                      bio_end_sector(&w->io->op.wbio.bio) != sector))
613                         bch2_writepage_do_io(w);
614
615                 if (!w->io)
616                         bch2_writepage_io_alloc(c, wbc, w, inode, sector,
617                                                 nr_replicas_this_write);
618
619                 atomic_inc(&s->write_count);
620
621                 BUG_ON(inode != w->io->inode);
622                 BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
623                                      sectors << 9, offset << 9));
624
625                 /* Check for writing past i_size: */
626                 WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
627                           round_up(i_size, block_bytes(c)) &&
628                           !test_bit(BCH_FS_EMERGENCY_RO, &c->flags),
629                           "writing past i_size: %llu > %llu (unrounded %llu)\n",
630                           bio_end_sector(&w->io->op.wbio.bio) << 9,
631                           round_up(i_size, block_bytes(c)),
632                           i_size);
633
634                 w->io->op.res.sectors += reserved_sectors;
635                 w->io->op.i_sectors_delta -= dirty_sectors;
636                 w->io->op.new_i_size = i_size;
637
638                 offset += sectors;
639         }
640
641         if (atomic_dec_and_test(&s->write_count))
642                 folio_end_writeback(folio);
643
644         return 0;
645 }
646
647 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
648 {
649         struct bch_fs *c = mapping->host->i_sb->s_fs_info;
650         struct bch_writepage_state w =
651                 bch_writepage_state_init(c, to_bch_ei(mapping->host));
652         struct blk_plug plug;
653         int ret;
654
655         blk_start_plug(&plug);
656         ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
657         if (w.io)
658                 bch2_writepage_do_io(&w);
659         blk_finish_plug(&plug);
660         kfree(w.tmp);
661         return bch2_err_class(ret);
662 }
663
664 /* buffered writes: */
665
666 int bch2_write_begin(struct file *file, struct address_space *mapping,
667                      loff_t pos, unsigned len,
668                      struct page **pagep, void **fsdata)
669 {
670         struct bch_inode_info *inode = to_bch_ei(mapping->host);
671         struct bch_fs *c = inode->v.i_sb->s_fs_info;
672         struct bch2_folio_reservation *res;
673         struct folio *folio;
674         unsigned offset;
675         int ret = -ENOMEM;
676
677         res = kmalloc(sizeof(*res), GFP_KERNEL);
678         if (!res)
679                 return -ENOMEM;
680
681         bch2_folio_reservation_init(c, inode, res);
682         *fsdata = res;
683
684         bch2_pagecache_add_get(inode);
685
686         folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
687                                 FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
688                                 mapping_gfp_mask(mapping));
689         if (IS_ERR_OR_NULL(folio))
690                 goto err_unlock;
691
692         offset = pos - folio_pos(folio);
693         len = min_t(size_t, len, folio_end_pos(folio) - pos);
694
695         if (folio_test_uptodate(folio))
696                 goto out;
697
698         /* If we're writing entire folio, don't need to read it in first: */
699         if (!offset && len == folio_size(folio))
700                 goto out;
701
702         if (!offset && pos + len >= inode->v.i_size) {
703                 folio_zero_segment(folio, len, folio_size(folio));
704                 flush_dcache_folio(folio);
705                 goto out;
706         }
707
708         if (folio_pos(folio) >= inode->v.i_size) {
709                 folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
710                 flush_dcache_folio(folio);
711                 goto out;
712         }
713 readpage:
714         ret = bch2_read_single_folio(folio, mapping);
715         if (ret)
716                 goto err;
717 out:
718         ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
719         if (ret)
720                 goto err;
721
722         ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
723         if (ret) {
724                 if (!folio_test_uptodate(folio)) {
725                         /*
726                          * If the folio hasn't been read in, we won't know if we
727                          * actually need a reservation - we don't actually need
728                          * to read here, we just need to check if the folio is
729                          * fully backed by uncompressed data:
730                          */
731                         goto readpage;
732                 }
733
734                 goto err;
735         }
736
737         *pagep = &folio->page;
738         return 0;
739 err:
740         folio_unlock(folio);
741         folio_put(folio);
742         *pagep = NULL;
743 err_unlock:
744         bch2_pagecache_add_put(inode);
745         kfree(res);
746         *fsdata = NULL;
747         return bch2_err_class(ret);
748 }
749
750 int bch2_write_end(struct file *file, struct address_space *mapping,
751                    loff_t pos, unsigned len, unsigned copied,
752                    struct page *page, void *fsdata)
753 {
754         struct bch_inode_info *inode = to_bch_ei(mapping->host);
755         struct bch_fs *c = inode->v.i_sb->s_fs_info;
756         struct bch2_folio_reservation *res = fsdata;
757         struct folio *folio = page_folio(page);
758         unsigned offset = pos - folio_pos(folio);
759
760         lockdep_assert_held(&inode->v.i_rwsem);
761         BUG_ON(offset + copied > folio_size(folio));
762
763         if (unlikely(copied < len && !folio_test_uptodate(folio))) {
764                 /*
765                  * The folio needs to be read in, but that would destroy
766                  * our partial write - simplest thing is to just force
767                  * userspace to redo the write:
768                  */
769                 folio_zero_range(folio, 0, folio_size(folio));
770                 flush_dcache_folio(folio);
771                 copied = 0;
772         }
773
774         spin_lock(&inode->v.i_lock);
775         if (pos + copied > inode->v.i_size)
776                 i_size_write(&inode->v, pos + copied);
777         spin_unlock(&inode->v.i_lock);
778
779         if (copied) {
780                 if (!folio_test_uptodate(folio))
781                         folio_mark_uptodate(folio);
782
783                 bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
784
785                 inode->ei_last_dirtied = (unsigned long) current;
786         }
787
788         folio_unlock(folio);
789         folio_put(folio);
790         bch2_pagecache_add_put(inode);
791
792         bch2_folio_reservation_put(c, inode, res);
793         kfree(res);
794
795         return copied;
796 }
797
798 static noinline void folios_trunc(folios *fs, struct folio **fi)
799 {
800         while (fs->data + fs->nr > fi) {
801                 struct folio *f = darray_pop(fs);
802
803                 folio_unlock(f);
804                 folio_put(f);
805         }
806 }
807
808 static int __bch2_buffered_write(struct bch_inode_info *inode,
809                                  struct address_space *mapping,
810                                  struct iov_iter *iter,
811                                  loff_t pos, unsigned len)
812 {
813         struct bch_fs *c = inode->v.i_sb->s_fs_info;
814         struct bch2_folio_reservation res;
815         folios fs;
816         struct folio **fi, *f;
817         unsigned copied = 0, f_offset, f_copied;
818         u64 end = pos + len, f_pos, f_len;
819         loff_t last_folio_pos = inode->v.i_size;
820         int ret = 0;
821
822         BUG_ON(!len);
823
824         bch2_folio_reservation_init(c, inode, &res);
825         darray_init(&fs);
826
827         ret = bch2_filemap_get_contig_folios_d(mapping, pos, end,
828                                    FGP_LOCK|FGP_WRITE|FGP_STABLE|FGP_CREAT,
829                                    mapping_gfp_mask(mapping),
830                                    &fs);
831         if (ret)
832                 goto out;
833
834         BUG_ON(!fs.nr);
835
836         f = darray_first(fs);
837         if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
838                 ret = bch2_read_single_folio(f, mapping);
839                 if (ret)
840                         goto out;
841         }
842
843         f = darray_last(fs);
844         end = min(end, folio_end_pos(f));
845         last_folio_pos = folio_pos(f);
846         if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
847                 if (end >= inode->v.i_size) {
848                         folio_zero_range(f, 0, folio_size(f));
849                 } else {
850                         ret = bch2_read_single_folio(f, mapping);
851                         if (ret)
852                                 goto out;
853                 }
854         }
855
856         ret = bch2_folio_set(c, inode_inum(inode), fs.data, fs.nr);
857         if (ret)
858                 goto out;
859
860         f_pos = pos;
861         f_offset = pos - folio_pos(darray_first(fs));
862         darray_for_each(fs, fi) {
863                 f = *fi;
864                 f_len = min(end, folio_end_pos(f)) - f_pos;
865
866                 /*
867                  * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
868                  * supposed to write as much as we have disk space for.
869                  *
870                  * On failure here we should still write out a partial page if
871                  * we aren't completely out of disk space - we don't do that
872                  * yet:
873                  */
874                 ret = bch2_folio_reservation_get(c, inode, f, &res, f_offset, f_len);
875                 if (unlikely(ret)) {
876                         folios_trunc(&fs, fi);
877                         if (!fs.nr)
878                                 goto out;
879
880                         end = min(end, folio_end_pos(darray_last(fs)));
881                         break;
882                 }
883
884                 f_pos = folio_end_pos(f);
885                 f_offset = 0;
886         }
887
888         if (mapping_writably_mapped(mapping))
889                 darray_for_each(fs, fi)
890                         flush_dcache_folio(*fi);
891
892         f_pos = pos;
893         f_offset = pos - folio_pos(darray_first(fs));
894         darray_for_each(fs, fi) {
895                 f = *fi;
896                 f_len = min(end, folio_end_pos(f)) - f_pos;
897                 f_copied = copy_page_from_iter_atomic(&f->page, f_offset, f_len, iter);
898                 if (!f_copied) {
899                         folios_trunc(&fs, fi);
900                         break;
901                 }
902
903                 if (!folio_test_uptodate(f) &&
904                     f_copied != folio_size(f) &&
905                     pos + copied + f_copied < inode->v.i_size) {
906                         iov_iter_revert(iter, f_copied);
907                         folio_zero_range(f, 0, folio_size(f));
908                         folios_trunc(&fs, fi);
909                         break;
910                 }
911
912                 flush_dcache_folio(f);
913                 copied += f_copied;
914
915                 if (f_copied != f_len) {
916                         folios_trunc(&fs, fi + 1);
917                         break;
918                 }
919
920                 f_pos = folio_end_pos(f);
921                 f_offset = 0;
922         }
923
924         if (!copied)
925                 goto out;
926
927         end = pos + copied;
928
929         spin_lock(&inode->v.i_lock);
930         if (end > inode->v.i_size)
931                 i_size_write(&inode->v, end);
932         spin_unlock(&inode->v.i_lock);
933
934         f_pos = pos;
935         f_offset = pos - folio_pos(darray_first(fs));
936         darray_for_each(fs, fi) {
937                 f = *fi;
938                 f_len = min(end, folio_end_pos(f)) - f_pos;
939
940                 if (!folio_test_uptodate(f))
941                         folio_mark_uptodate(f);
942
943                 bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
944
945                 f_pos = folio_end_pos(f);
946                 f_offset = 0;
947         }
948
949         inode->ei_last_dirtied = (unsigned long) current;
950 out:
951         darray_for_each(fs, fi) {
952                 folio_unlock(*fi);
953                 folio_put(*fi);
954         }
955
956         /*
957          * If the last folio added to the mapping starts beyond current EOF, we
958          * performed a short write but left around at least one post-EOF folio.
959          * Clean up the mapping before we return.
960          */
961         if (last_folio_pos >= inode->v.i_size)
962                 truncate_pagecache(&inode->v, inode->v.i_size);
963
964         darray_exit(&fs);
965         bch2_folio_reservation_put(c, inode, &res);
966
967         return copied ?: ret;
968 }
969
970 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
971 {
972         struct file *file = iocb->ki_filp;
973         struct address_space *mapping = file->f_mapping;
974         struct bch_inode_info *inode = file_bch_inode(file);
975         loff_t pos = iocb->ki_pos;
976         ssize_t written = 0;
977         int ret = 0;
978
979         bch2_pagecache_add_get(inode);
980
981         do {
982                 unsigned offset = pos & (PAGE_SIZE - 1);
983                 unsigned bytes = iov_iter_count(iter);
984 again:
985                 /*
986                  * Bring in the user page that we will copy from _first_.
987                  * Otherwise there's a nasty deadlock on copying from the
988                  * same page as we're writing to, without it being marked
989                  * up-to-date.
990                  *
991                  * Not only is this an optimisation, but it is also required
992                  * to check that the address is actually valid, when atomic
993                  * usercopies are used, below.
994                  */
995                 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
996                         bytes = min_t(unsigned long, iov_iter_count(iter),
997                                       PAGE_SIZE - offset);
998
999                         if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1000                                 ret = -EFAULT;
1001                                 break;
1002                         }
1003                 }
1004
1005                 if (unlikely(fatal_signal_pending(current))) {
1006                         ret = -EINTR;
1007                         break;
1008                 }
1009
1010                 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1011                 if (unlikely(ret < 0))
1012                         break;
1013
1014                 cond_resched();
1015
1016                 if (unlikely(ret == 0)) {
1017                         /*
1018                          * If we were unable to copy any data at all, we must
1019                          * fall back to a single segment length write.
1020                          *
1021                          * If we didn't fallback here, we could livelock
1022                          * because not all segments in the iov can be copied at
1023                          * once without a pagefault.
1024                          */
1025                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
1026                                       iov_iter_single_seg_count(iter));
1027                         goto again;
1028                 }
1029                 pos += ret;
1030                 written += ret;
1031                 ret = 0;
1032
1033                 balance_dirty_pages_ratelimited(mapping);
1034         } while (iov_iter_count(iter));
1035
1036         bch2_pagecache_add_put(inode);
1037
1038         return written ? written : ret;
1039 }
1040
1041 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1042 {
1043         struct file *file = iocb->ki_filp;
1044         struct bch_inode_info *inode = file_bch_inode(file);
1045         ssize_t ret;
1046
1047         if (iocb->ki_flags & IOCB_DIRECT) {
1048                 ret = bch2_direct_write(iocb, from);
1049                 goto out;
1050         }
1051
1052         inode_lock(&inode->v);
1053
1054         ret = generic_write_checks(iocb, from);
1055         if (ret <= 0)
1056                 goto unlock;
1057
1058         ret = file_remove_privs(file);
1059         if (ret)
1060                 goto unlock;
1061
1062         ret = file_update_time(file);
1063         if (ret)
1064                 goto unlock;
1065
1066         ret = bch2_buffered_write(iocb, from);
1067         if (likely(ret > 0))
1068                 iocb->ki_pos += ret;
1069 unlock:
1070         inode_unlock(&inode->v);
1071
1072         if (ret > 0)
1073                 ret = generic_write_sync(iocb, ret);
1074 out:
1075         return bch2_err_class(ret);
1076 }
1077
1078 void bch2_fs_fs_io_buffered_exit(struct bch_fs *c)
1079 {
1080         bioset_exit(&c->writepage_bioset);
1081 }
1082
1083 int bch2_fs_fs_io_buffered_init(struct bch_fs *c)
1084 {
1085         if (bioset_init(&c->writepage_bioset,
1086                         4, offsetof(struct bch_writepage_io, op.wbio.bio),
1087                         BIOSET_NEED_BVECS))
1088                 return -BCH_ERR_ENOMEM_writepage_bioset_init;
1089
1090         return 0;
1091 }
1092
1093 #endif /* NO_BCACHEFS_FS */