]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/io_write.c
13b3514d86511db827370d0d0020a7aa07cd7c27
[bcachefs-tools-debian] / libbcachefs / io_write.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
4  * Copyright 2012 Google, Inc.
5  */
6
7 #include "bcachefs.h"
8 #include "alloc_foreground.h"
9 #include "bkey_buf.h"
10 #include "bset.h"
11 #include "btree_update.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "clock.h"
15 #include "compress.h"
16 #include "debug.h"
17 #include "ec.h"
18 #include "error.h"
19 #include "extent_update.h"
20 #include "inode.h"
21 #include "io_write.h"
22 #include "journal.h"
23 #include "keylist.h"
24 #include "move.h"
25 #include "nocow_locking.h"
26 #include "rebalance.h"
27 #include "subvolume.h"
28 #include "super.h"
29 #include "super-io.h"
30 #include "trace.h"
31
32 #include <linux/blkdev.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/sched/mm.h>
36
37 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
38
39 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
40                                        u64 now, int rw)
41 {
42         u64 latency_capable =
43                 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
44         /* ideally we'd be taking into account the device's variance here: */
45         u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
46         s64 latency_over = io_latency - latency_threshold;
47
48         if (latency_threshold && latency_over > 0) {
49                 /*
50                  * bump up congested by approximately latency_over * 4 /
51                  * latency_threshold - we don't need much accuracy here so don't
52                  * bother with the divide:
53                  */
54                 if (atomic_read(&ca->congested) < CONGESTED_MAX)
55                         atomic_add(latency_over >>
56                                    max_t(int, ilog2(latency_threshold) - 2, 0),
57                                    &ca->congested);
58
59                 ca->congested_last = now;
60         } else if (atomic_read(&ca->congested) > 0) {
61                 atomic_dec(&ca->congested);
62         }
63 }
64
65 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
66 {
67         atomic64_t *latency = &ca->cur_latency[rw];
68         u64 now = local_clock();
69         u64 io_latency = time_after64(now, submit_time)
70                 ? now - submit_time
71                 : 0;
72         u64 old, new, v = atomic64_read(latency);
73
74         do {
75                 old = v;
76
77                 /*
78                  * If the io latency was reasonably close to the current
79                  * latency, skip doing the update and atomic operation - most of
80                  * the time:
81                  */
82                 if (abs((int) (old - io_latency)) < (old >> 1) &&
83                     now & ~(~0U << 5))
84                         break;
85
86                 new = ewma_add(old, io_latency, 5);
87         } while ((v = atomic64_cmpxchg(latency, old, new)) != old);
88
89         bch2_congested_acct(ca, io_latency, now, rw);
90
91         __time_stats_update(&ca->io_latency[rw].stats, submit_time, now);
92 }
93
94 #endif
95
96 /* Allocate, free from mempool: */
97
98 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
99 {
100         struct bvec_iter_all iter;
101         struct bio_vec *bv;
102
103         bio_for_each_segment_all(bv, bio, iter)
104                 if (bv->bv_page != ZERO_PAGE(0))
105                         mempool_free(bv->bv_page, &c->bio_bounce_pages);
106         bio->bi_vcnt = 0;
107 }
108
109 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
110 {
111         struct page *page;
112
113         if (likely(!*using_mempool)) {
114                 page = alloc_page(GFP_NOFS);
115                 if (unlikely(!page)) {
116                         mutex_lock(&c->bio_bounce_pages_lock);
117                         *using_mempool = true;
118                         goto pool_alloc;
119
120                 }
121         } else {
122 pool_alloc:
123                 page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
124         }
125
126         return page;
127 }
128
129 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
130                                size_t size)
131 {
132         bool using_mempool = false;
133
134         while (size) {
135                 struct page *page = __bio_alloc_page_pool(c, &using_mempool);
136                 unsigned len = min_t(size_t, PAGE_SIZE, size);
137
138                 BUG_ON(!bio_add_page(bio, page, len, 0));
139                 size -= len;
140         }
141
142         if (using_mempool)
143                 mutex_unlock(&c->bio_bounce_pages_lock);
144 }
145
146 /* Extent update path: */
147
148 int bch2_sum_sector_overwrites(struct btree_trans *trans,
149                                struct btree_iter *extent_iter,
150                                struct bkey_i *new,
151                                bool *usage_increasing,
152                                s64 *i_sectors_delta,
153                                s64 *disk_sectors_delta)
154 {
155         struct bch_fs *c = trans->c;
156         struct btree_iter iter;
157         struct bkey_s_c old;
158         unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
159         bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
160         int ret = 0;
161
162         *usage_increasing       = false;
163         *i_sectors_delta        = 0;
164         *disk_sectors_delta     = 0;
165
166         bch2_trans_copy_iter(&iter, extent_iter);
167
168         for_each_btree_key_upto_continue_norestart(iter,
169                                 new->k.p, BTREE_ITER_SLOTS, old, ret) {
170                 s64 sectors = min(new->k.p.offset, old.k->p.offset) -
171                         max(bkey_start_offset(&new->k),
172                             bkey_start_offset(old.k));
173
174                 *i_sectors_delta += sectors *
175                         (bkey_extent_is_allocation(&new->k) -
176                          bkey_extent_is_allocation(old.k));
177
178                 *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
179                 *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
180                         ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
181                         : 0;
182
183                 if (!*usage_increasing &&
184                     (new->k.p.snapshot != old.k->p.snapshot ||
185                      new_replicas > bch2_bkey_replicas(c, old) ||
186                      (!new_compressed && bch2_bkey_sectors_compressed(old))))
187                         *usage_increasing = true;
188
189                 if (bkey_ge(old.k->p, new->k.p))
190                         break;
191         }
192
193         bch2_trans_iter_exit(trans, &iter);
194         return ret;
195 }
196
197 static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
198                                                     struct btree_iter *extent_iter,
199                                                     u64 new_i_size,
200                                                     s64 i_sectors_delta)
201 {
202         struct btree_iter iter;
203         struct bkey_i *k;
204         struct bkey_i_inode_v3 *inode;
205         /*
206          * Crazy performance optimization:
207          * Every extent update needs to also update the inode: the inode trigger
208          * will set bi->journal_seq to the journal sequence number of this
209          * transaction - for fsync.
210          *
211          * But if that's the only reason we're updating the inode (we're not
212          * updating bi_size or bi_sectors), then we don't need the inode update
213          * to be journalled - if we crash, the bi_journal_seq update will be
214          * lost, but that's fine.
215          */
216         unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL;
217         int ret;
218
219         k = bch2_bkey_get_mut_noupdate(trans, &iter, BTREE_ID_inodes,
220                               SPOS(0,
221                                    extent_iter->pos.inode,
222                                    extent_iter->snapshot),
223                               BTREE_ITER_CACHED);
224         ret = PTR_ERR_OR_ZERO(k);
225         if (unlikely(ret))
226                 return ret;
227
228         if (unlikely(k->k.type != KEY_TYPE_inode_v3)) {
229                 k = bch2_inode_to_v3(trans, k);
230                 ret = PTR_ERR_OR_ZERO(k);
231                 if (unlikely(ret))
232                         goto err;
233         }
234
235         inode = bkey_i_to_inode_v3(k);
236
237         if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) &&
238             new_i_size > le64_to_cpu(inode->v.bi_size)) {
239                 inode->v.bi_size = cpu_to_le64(new_i_size);
240                 inode_update_flags = 0;
241         }
242
243         if (i_sectors_delta) {
244                 le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
245                 inode_update_flags = 0;
246         }
247
248         if (inode->k.p.snapshot != iter.snapshot) {
249                 inode->k.p.snapshot = iter.snapshot;
250                 inode_update_flags = 0;
251         }
252
253         ret = bch2_trans_update(trans, &iter, &inode->k_i,
254                                 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
255                                 inode_update_flags);
256 err:
257         bch2_trans_iter_exit(trans, &iter);
258         return ret;
259 }
260
261 int bch2_extent_update(struct btree_trans *trans,
262                        subvol_inum inum,
263                        struct btree_iter *iter,
264                        struct bkey_i *k,
265                        struct disk_reservation *disk_res,
266                        u64 new_i_size,
267                        s64 *i_sectors_delta_total,
268                        bool check_enospc)
269 {
270         struct bpos next_pos;
271         bool usage_increasing;
272         s64 i_sectors_delta = 0, disk_sectors_delta = 0;
273         int ret;
274
275         /*
276          * This traverses us the iterator without changing iter->path->pos to
277          * search_key() (which is pos + 1 for extents): we want there to be a
278          * path already traversed at iter->pos because
279          * bch2_trans_extent_update() will use it to attempt extent merging
280          */
281         ret = __bch2_btree_iter_traverse(iter);
282         if (ret)
283                 return ret;
284
285         ret = bch2_extent_trim_atomic(trans, iter, k);
286         if (ret)
287                 return ret;
288
289         next_pos = k->k.p;
290
291         ret = bch2_sum_sector_overwrites(trans, iter, k,
292                         &usage_increasing,
293                         &i_sectors_delta,
294                         &disk_sectors_delta);
295         if (ret)
296                 return ret;
297
298         if (disk_res &&
299             disk_sectors_delta > (s64) disk_res->sectors) {
300                 ret = bch2_disk_reservation_add(trans->c, disk_res,
301                                         disk_sectors_delta - disk_res->sectors,
302                                         !check_enospc || !usage_increasing
303                                         ? BCH_DISK_RESERVATION_NOFAIL : 0);
304                 if (ret)
305                         return ret;
306         }
307
308         /*
309          * Note:
310          * We always have to do an inode update - even when i_size/i_sectors
311          * aren't changing - for fsync to work properly; fsync relies on
312          * inode->bi_journal_seq which is updated by the trigger code:
313          */
314         ret =   bch2_extent_update_i_size_sectors(trans, iter,
315                                                   min(k->k.p.offset << 9, new_i_size),
316                                                   i_sectors_delta) ?:
317                 bch2_trans_update(trans, iter, k, 0) ?:
318                 bch2_trans_commit(trans, disk_res, NULL,
319                                 BCH_TRANS_COMMIT_no_check_rw|
320                                 BCH_TRANS_COMMIT_no_enospc);
321         if (unlikely(ret))
322                 return ret;
323
324         if (i_sectors_delta_total)
325                 *i_sectors_delta_total += i_sectors_delta;
326         bch2_btree_iter_set_pos(iter, next_pos);
327         return 0;
328 }
329
330 static int bch2_write_index_default(struct bch_write_op *op)
331 {
332         struct bch_fs *c = op->c;
333         struct bkey_buf sk;
334         struct keylist *keys = &op->insert_keys;
335         struct bkey_i *k = bch2_keylist_front(keys);
336         struct btree_trans *trans = bch2_trans_get(c);
337         struct btree_iter iter;
338         subvol_inum inum = {
339                 .subvol = op->subvol,
340                 .inum   = k->k.p.inode,
341         };
342         int ret;
343
344         BUG_ON(!inum.subvol);
345
346         bch2_bkey_buf_init(&sk);
347
348         do {
349                 bch2_trans_begin(trans);
350
351                 k = bch2_keylist_front(keys);
352                 bch2_bkey_buf_copy(&sk, c, k);
353
354                 ret = bch2_subvolume_get_snapshot(trans, inum.subvol,
355                                                   &sk.k->k.p.snapshot);
356                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
357                         continue;
358                 if (ret)
359                         break;
360
361                 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
362                                      bkey_start_pos(&sk.k->k),
363                                      BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
364
365                 ret =   bch2_bkey_set_needs_rebalance(c, sk.k, &op->opts) ?:
366                         bch2_extent_update(trans, inum, &iter, sk.k,
367                                         &op->res,
368                                         op->new_i_size, &op->i_sectors_delta,
369                                         op->flags & BCH_WRITE_CHECK_ENOSPC);
370                 bch2_trans_iter_exit(trans, &iter);
371
372                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
373                         continue;
374                 if (ret)
375                         break;
376
377                 if (bkey_ge(iter.pos, k->k.p))
378                         bch2_keylist_pop_front(&op->insert_keys);
379                 else
380                         bch2_cut_front(iter.pos, k);
381         } while (!bch2_keylist_empty(keys));
382
383         bch2_trans_put(trans);
384         bch2_bkey_buf_exit(&sk, c);
385
386         return ret;
387 }
388
389 /* Writes */
390
391 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
392                                enum bch_data_type type,
393                                const struct bkey_i *k,
394                                bool nocow)
395 {
396         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
397         struct bch_write_bio *n;
398
399         BUG_ON(c->opts.nochanges);
400
401         bkey_for_each_ptr(ptrs, ptr) {
402                 BUG_ON(!bch2_dev_exists2(c, ptr->dev));
403
404                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
405
406                 if (to_entry(ptr + 1) < ptrs.end) {
407                         n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
408                                                 GFP_NOFS, &ca->replica_set));
409
410                         n->bio.bi_end_io        = wbio->bio.bi_end_io;
411                         n->bio.bi_private       = wbio->bio.bi_private;
412                         n->parent               = wbio;
413                         n->split                = true;
414                         n->bounce               = false;
415                         n->put_bio              = true;
416                         n->bio.bi_opf           = wbio->bio.bi_opf;
417                         bio_inc_remaining(&wbio->bio);
418                 } else {
419                         n = wbio;
420                         n->split                = false;
421                 }
422
423                 n->c                    = c;
424                 n->dev                  = ptr->dev;
425                 n->have_ioref           = nocow || bch2_dev_get_ioref(ca,
426                                         type == BCH_DATA_btree ? READ : WRITE);
427                 n->nocow                = nocow;
428                 n->submit_time          = local_clock();
429                 n->inode_offset         = bkey_start_offset(&k->k);
430                 n->bio.bi_iter.bi_sector = ptr->offset;
431
432                 if (likely(n->have_ioref)) {
433                         this_cpu_add(ca->io_done->sectors[WRITE][type],
434                                      bio_sectors(&n->bio));
435
436                         bio_set_dev(&n->bio, ca->disk_sb.bdev);
437
438                         if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
439                                 bio_endio(&n->bio);
440                                 continue;
441                         }
442
443                         submit_bio(&n->bio);
444                 } else {
445                         n->bio.bi_status        = BLK_STS_REMOVED;
446                         bio_endio(&n->bio);
447                 }
448         }
449 }
450
451 static void __bch2_write(struct bch_write_op *);
452
453 static void bch2_write_done(struct closure *cl)
454 {
455         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
456         struct bch_fs *c = op->c;
457
458         EBUG_ON(op->open_buckets.nr);
459
460         time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
461         bch2_disk_reservation_put(c, &op->res);
462
463         if (!(op->flags & BCH_WRITE_MOVE))
464                 bch2_write_ref_put(c, BCH_WRITE_REF_write);
465         bch2_keylist_free(&op->insert_keys, op->inline_keys);
466
467         EBUG_ON(cl->parent);
468         closure_debug_destroy(cl);
469         if (op->end_io)
470                 op->end_io(op);
471 }
472
473 static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
474 {
475         struct keylist *keys = &op->insert_keys;
476         struct bch_extent_ptr *ptr;
477         struct bkey_i *src, *dst = keys->keys, *n;
478
479         for (src = keys->keys; src != keys->top; src = n) {
480                 n = bkey_next(src);
481
482                 if (bkey_extent_is_direct_data(&src->k)) {
483                         bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
484                                             test_bit(ptr->dev, op->failed.d));
485
486                         if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
487                                 return -EIO;
488                 }
489
490                 if (dst != src)
491                         memmove_u64s_down(dst, src, src->k.u64s);
492                 dst = bkey_next(dst);
493         }
494
495         keys->top = dst;
496         return 0;
497 }
498
499 /**
500  * __bch2_write_index - after a write, update index to point to new data
501  * @op:         bch_write_op to process
502  */
503 static void __bch2_write_index(struct bch_write_op *op)
504 {
505         struct bch_fs *c = op->c;
506         struct keylist *keys = &op->insert_keys;
507         unsigned dev;
508         int ret = 0;
509
510         if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
511                 ret = bch2_write_drop_io_error_ptrs(op);
512                 if (ret)
513                         goto err;
514         }
515
516         if (!bch2_keylist_empty(keys)) {
517                 u64 sectors_start = keylist_sectors(keys);
518
519                 ret = !(op->flags & BCH_WRITE_MOVE)
520                         ? bch2_write_index_default(op)
521                         : bch2_data_update_index_update(op);
522
523                 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
524                 BUG_ON(keylist_sectors(keys) && !ret);
525
526                 op->written += sectors_start - keylist_sectors(keys);
527
528                 if (ret && !bch2_err_matches(ret, EROFS)) {
529                         struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
530
531                         bch_err_inum_offset_ratelimited(c,
532                                 insert->k.p.inode, insert->k.p.offset << 9,
533                                 "write error while doing btree update: %s",
534                                 bch2_err_str(ret));
535                 }
536
537                 if (ret)
538                         goto err;
539         }
540 out:
541         /* If some a bucket wasn't written, we can't erasure code it: */
542         for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
543                 bch2_open_bucket_write_error(c, &op->open_buckets, dev);
544
545         bch2_open_buckets_put(c, &op->open_buckets);
546         return;
547 err:
548         keys->top = keys->keys;
549         op->error = ret;
550         op->flags |= BCH_WRITE_DONE;
551         goto out;
552 }
553
554 static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)
555 {
556         if (state != wp->state) {
557                 u64 now = ktime_get_ns();
558
559                 if (wp->last_state_change &&
560                     time_after64(now, wp->last_state_change))
561                         wp->time[wp->state] += now - wp->last_state_change;
562                 wp->state = state;
563                 wp->last_state_change = now;
564         }
565 }
566
567 static inline void wp_update_state(struct write_point *wp, bool running)
568 {
569         enum write_point_state state;
570
571         state = running                  ? WRITE_POINT_running :
572                 !list_empty(&wp->writes) ? WRITE_POINT_waiting_io
573                                          : WRITE_POINT_stopped;
574
575         __wp_update_state(wp, state);
576 }
577
578 static CLOSURE_CALLBACK(bch2_write_index)
579 {
580         closure_type(op, struct bch_write_op, cl);
581         struct write_point *wp = op->wp;
582         struct workqueue_struct *wq = index_update_wq(op);
583         unsigned long flags;
584
585         if ((op->flags & BCH_WRITE_DONE) &&
586             (op->flags & BCH_WRITE_MOVE))
587                 bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
588
589         spin_lock_irqsave(&wp->writes_lock, flags);
590         if (wp->state == WRITE_POINT_waiting_io)
591                 __wp_update_state(wp, WRITE_POINT_waiting_work);
592         list_add_tail(&op->wp_list, &wp->writes);
593         spin_unlock_irqrestore (&wp->writes_lock, flags);
594
595         queue_work(wq, &wp->index_update_work);
596 }
597
598 static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp)
599 {
600         op->wp = wp;
601
602         if (wp->state == WRITE_POINT_stopped) {
603                 spin_lock_irq(&wp->writes_lock);
604                 __wp_update_state(wp, WRITE_POINT_waiting_io);
605                 spin_unlock_irq(&wp->writes_lock);
606         }
607 }
608
609 void bch2_write_point_do_index_updates(struct work_struct *work)
610 {
611         struct write_point *wp =
612                 container_of(work, struct write_point, index_update_work);
613         struct bch_write_op *op;
614
615         while (1) {
616                 spin_lock_irq(&wp->writes_lock);
617                 op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list);
618                 if (op)
619                         list_del(&op->wp_list);
620                 wp_update_state(wp, op != NULL);
621                 spin_unlock_irq(&wp->writes_lock);
622
623                 if (!op)
624                         break;
625
626                 op->flags |= BCH_WRITE_IN_WORKER;
627
628                 __bch2_write_index(op);
629
630                 if (!(op->flags & BCH_WRITE_DONE))
631                         __bch2_write(op);
632                 else
633                         bch2_write_done(&op->cl);
634         }
635 }
636
637 static void bch2_write_endio(struct bio *bio)
638 {
639         struct closure *cl              = bio->bi_private;
640         struct bch_write_op *op         = container_of(cl, struct bch_write_op, cl);
641         struct bch_write_bio *wbio      = to_wbio(bio);
642         struct bch_write_bio *parent    = wbio->split ? wbio->parent : NULL;
643         struct bch_fs *c                = wbio->c;
644         struct bch_dev *ca              = bch_dev_bkey_exists(c, wbio->dev);
645
646         if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
647                                     op->pos.inode,
648                                     wbio->inode_offset << 9,
649                                     "data write error: %s",
650                                     bch2_blk_status_to_str(bio->bi_status))) {
651                 set_bit(wbio->dev, op->failed.d);
652                 op->flags |= BCH_WRITE_IO_ERROR;
653         }
654
655         if (wbio->nocow)
656                 set_bit(wbio->dev, op->devs_need_flush->d);
657
658         if (wbio->have_ioref) {
659                 bch2_latency_acct(ca, wbio->submit_time, WRITE);
660                 percpu_ref_put(&ca->io_ref);
661         }
662
663         if (wbio->bounce)
664                 bch2_bio_free_pages_pool(c, bio);
665
666         if (wbio->put_bio)
667                 bio_put(bio);
668
669         if (parent)
670                 bio_endio(&parent->bio);
671         else
672                 closure_put(cl);
673 }
674
675 static void init_append_extent(struct bch_write_op *op,
676                                struct write_point *wp,
677                                struct bversion version,
678                                struct bch_extent_crc_unpacked crc)
679 {
680         struct bkey_i_extent *e;
681
682         op->pos.offset += crc.uncompressed_size;
683
684         e = bkey_extent_init(op->insert_keys.top);
685         e->k.p          = op->pos;
686         e->k.size       = crc.uncompressed_size;
687         e->k.version    = version;
688
689         if (crc.csum_type ||
690             crc.compression_type ||
691             crc.nonce)
692                 bch2_extent_crc_append(&e->k_i, crc);
693
694         bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
695                                        op->flags & BCH_WRITE_CACHED);
696
697         bch2_keylist_push(&op->insert_keys);
698 }
699
700 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
701                                         struct write_point *wp,
702                                         struct bio *src,
703                                         bool *page_alloc_failed,
704                                         void *buf)
705 {
706         struct bch_write_bio *wbio;
707         struct bio *bio;
708         unsigned output_available =
709                 min(wp->sectors_free << 9, src->bi_iter.bi_size);
710         unsigned pages = DIV_ROUND_UP(output_available +
711                                       (buf
712                                        ? ((unsigned long) buf & (PAGE_SIZE - 1))
713                                        : 0), PAGE_SIZE);
714
715         pages = min(pages, BIO_MAX_VECS);
716
717         bio = bio_alloc_bioset(NULL, pages, 0,
718                                GFP_NOFS, &c->bio_write);
719         wbio                    = wbio_init(bio);
720         wbio->put_bio           = true;
721         /* copy WRITE_SYNC flag */
722         wbio->bio.bi_opf        = src->bi_opf;
723
724         if (buf) {
725                 bch2_bio_map(bio, buf, output_available);
726                 return bio;
727         }
728
729         wbio->bounce            = true;
730
731         /*
732          * We can't use mempool for more than c->sb.encoded_extent_max
733          * worth of pages, but we'd like to allocate more if we can:
734          */
735         bch2_bio_alloc_pages_pool(c, bio,
736                                   min_t(unsigned, output_available,
737                                         c->opts.encoded_extent_max));
738
739         if (bio->bi_iter.bi_size < output_available)
740                 *page_alloc_failed =
741                         bch2_bio_alloc_pages(bio,
742                                              output_available -
743                                              bio->bi_iter.bi_size,
744                                              GFP_NOFS) != 0;
745
746         return bio;
747 }
748
749 static int bch2_write_rechecksum(struct bch_fs *c,
750                                  struct bch_write_op *op,
751                                  unsigned new_csum_type)
752 {
753         struct bio *bio = &op->wbio.bio;
754         struct bch_extent_crc_unpacked new_crc;
755         int ret;
756
757         /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
758
759         if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
760             bch2_csum_type_is_encryption(new_csum_type))
761                 new_csum_type = op->crc.csum_type;
762
763         ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
764                                   NULL, &new_crc,
765                                   op->crc.offset, op->crc.live_size,
766                                   new_csum_type);
767         if (ret)
768                 return ret;
769
770         bio_advance(bio, op->crc.offset << 9);
771         bio->bi_iter.bi_size = op->crc.live_size << 9;
772         op->crc = new_crc;
773         return 0;
774 }
775
776 static int bch2_write_decrypt(struct bch_write_op *op)
777 {
778         struct bch_fs *c = op->c;
779         struct nonce nonce = extent_nonce(op->version, op->crc);
780         struct bch_csum csum;
781         int ret;
782
783         if (!bch2_csum_type_is_encryption(op->crc.csum_type))
784                 return 0;
785
786         /*
787          * If we need to decrypt data in the write path, we'll no longer be able
788          * to verify the existing checksum (poly1305 mac, in this case) after
789          * it's decrypted - this is the last point we'll be able to reverify the
790          * checksum:
791          */
792         csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
793         if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
794                 return -EIO;
795
796         ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
797         op->crc.csum_type = 0;
798         op->crc.csum = (struct bch_csum) { 0, 0 };
799         return ret;
800 }
801
802 static enum prep_encoded_ret {
803         PREP_ENCODED_OK,
804         PREP_ENCODED_ERR,
805         PREP_ENCODED_CHECKSUM_ERR,
806         PREP_ENCODED_DO_WRITE,
807 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
808 {
809         struct bch_fs *c = op->c;
810         struct bio *bio = &op->wbio.bio;
811
812         if (!(op->flags & BCH_WRITE_DATA_ENCODED))
813                 return PREP_ENCODED_OK;
814
815         BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
816
817         /* Can we just write the entire extent as is? */
818         if (op->crc.uncompressed_size == op->crc.live_size &&
819             op->crc.uncompressed_size <= c->opts.encoded_extent_max >> 9 &&
820             op->crc.compressed_size <= wp->sectors_free &&
821             (op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
822              op->incompressible)) {
823                 if (!crc_is_compressed(op->crc) &&
824                     op->csum_type != op->crc.csum_type &&
825                     bch2_write_rechecksum(c, op, op->csum_type) &&
826                     !c->opts.no_data_io)
827                         return PREP_ENCODED_CHECKSUM_ERR;
828
829                 return PREP_ENCODED_DO_WRITE;
830         }
831
832         /*
833          * If the data is compressed and we couldn't write the entire extent as
834          * is, we have to decompress it:
835          */
836         if (crc_is_compressed(op->crc)) {
837                 struct bch_csum csum;
838
839                 if (bch2_write_decrypt(op))
840                         return PREP_ENCODED_CHECKSUM_ERR;
841
842                 /* Last point we can still verify checksum: */
843                 csum = bch2_checksum_bio(c, op->crc.csum_type,
844                                          extent_nonce(op->version, op->crc),
845                                          bio);
846                 if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
847                         return PREP_ENCODED_CHECKSUM_ERR;
848
849                 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
850                         return PREP_ENCODED_ERR;
851         }
852
853         /*
854          * No longer have compressed data after this point - data might be
855          * encrypted:
856          */
857
858         /*
859          * If the data is checksummed and we're only writing a subset,
860          * rechecksum and adjust bio to point to currently live data:
861          */
862         if ((op->crc.live_size != op->crc.uncompressed_size ||
863              op->crc.csum_type != op->csum_type) &&
864             bch2_write_rechecksum(c, op, op->csum_type) &&
865             !c->opts.no_data_io)
866                 return PREP_ENCODED_CHECKSUM_ERR;
867
868         /*
869          * If we want to compress the data, it has to be decrypted:
870          */
871         if ((op->compression_opt ||
872              bch2_csum_type_is_encryption(op->crc.csum_type) !=
873              bch2_csum_type_is_encryption(op->csum_type)) &&
874             bch2_write_decrypt(op))
875                 return PREP_ENCODED_CHECKSUM_ERR;
876
877         return PREP_ENCODED_OK;
878 }
879
880 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
881                              struct bio **_dst)
882 {
883         struct bch_fs *c = op->c;
884         struct bio *src = &op->wbio.bio, *dst = src;
885         struct bvec_iter saved_iter;
886         void *ec_buf;
887         unsigned total_output = 0, total_input = 0;
888         bool bounce = false;
889         bool page_alloc_failed = false;
890         int ret, more = 0;
891
892         BUG_ON(!bio_sectors(src));
893
894         ec_buf = bch2_writepoint_ec_buf(c, wp);
895
896         switch (bch2_write_prep_encoded_data(op, wp)) {
897         case PREP_ENCODED_OK:
898                 break;
899         case PREP_ENCODED_ERR:
900                 ret = -EIO;
901                 goto err;
902         case PREP_ENCODED_CHECKSUM_ERR:
903                 goto csum_err;
904         case PREP_ENCODED_DO_WRITE:
905                 /* XXX look for bug here */
906                 if (ec_buf) {
907                         dst = bch2_write_bio_alloc(c, wp, src,
908                                                    &page_alloc_failed,
909                                                    ec_buf);
910                         bio_copy_data(dst, src);
911                         bounce = true;
912                 }
913                 init_append_extent(op, wp, op->version, op->crc);
914                 goto do_write;
915         }
916
917         if (ec_buf ||
918             op->compression_opt ||
919             (op->csum_type &&
920              !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
921             (bch2_csum_type_is_encryption(op->csum_type) &&
922              !(op->flags & BCH_WRITE_PAGES_OWNED))) {
923                 dst = bch2_write_bio_alloc(c, wp, src,
924                                            &page_alloc_failed,
925                                            ec_buf);
926                 bounce = true;
927         }
928
929         saved_iter = dst->bi_iter;
930
931         do {
932                 struct bch_extent_crc_unpacked crc = { 0 };
933                 struct bversion version = op->version;
934                 size_t dst_len = 0, src_len = 0;
935
936                 if (page_alloc_failed &&
937                     dst->bi_iter.bi_size  < (wp->sectors_free << 9) &&
938                     dst->bi_iter.bi_size < c->opts.encoded_extent_max)
939                         break;
940
941                 BUG_ON(op->compression_opt &&
942                        (op->flags & BCH_WRITE_DATA_ENCODED) &&
943                        bch2_csum_type_is_encryption(op->crc.csum_type));
944                 BUG_ON(op->compression_opt && !bounce);
945
946                 crc.compression_type = op->incompressible
947                         ? BCH_COMPRESSION_TYPE_incompressible
948                         : op->compression_opt
949                         ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
950                                             op->compression_opt)
951                         : 0;
952                 if (!crc_is_compressed(crc)) {
953                         dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
954                         dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
955
956                         if (op->csum_type)
957                                 dst_len = min_t(unsigned, dst_len,
958                                                 c->opts.encoded_extent_max);
959
960                         if (bounce) {
961                                 swap(dst->bi_iter.bi_size, dst_len);
962                                 bio_copy_data(dst, src);
963                                 swap(dst->bi_iter.bi_size, dst_len);
964                         }
965
966                         src_len = dst_len;
967                 }
968
969                 BUG_ON(!src_len || !dst_len);
970
971                 if (bch2_csum_type_is_encryption(op->csum_type)) {
972                         if (bversion_zero(version)) {
973                                 version.lo = atomic64_inc_return(&c->key_version);
974                         } else {
975                                 crc.nonce = op->nonce;
976                                 op->nonce += src_len >> 9;
977                         }
978                 }
979
980                 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
981                     !crc_is_compressed(crc) &&
982                     bch2_csum_type_is_encryption(op->crc.csum_type) ==
983                     bch2_csum_type_is_encryption(op->csum_type)) {
984                         u8 compression_type = crc.compression_type;
985                         u16 nonce = crc.nonce;
986                         /*
987                          * Note: when we're using rechecksum(), we need to be
988                          * checksumming @src because it has all the data our
989                          * existing checksum covers - if we bounced (because we
990                          * were trying to compress), @dst will only have the
991                          * part of the data the new checksum will cover.
992                          *
993                          * But normally we want to be checksumming post bounce,
994                          * because part of the reason for bouncing is so the
995                          * data can't be modified (by userspace) while it's in
996                          * flight.
997                          */
998                         if (bch2_rechecksum_bio(c, src, version, op->crc,
999                                         &crc, &op->crc,
1000                                         src_len >> 9,
1001                                         bio_sectors(src) - (src_len >> 9),
1002                                         op->csum_type))
1003                                 goto csum_err;
1004                         /*
1005                          * rchecksum_bio sets compression_type on crc from op->crc,
1006                          * this isn't always correct as sometimes we're changing
1007                          * an extent from uncompressed to incompressible.
1008                          */
1009                         crc.compression_type = compression_type;
1010                         crc.nonce = nonce;
1011                 } else {
1012                         if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
1013                             bch2_rechecksum_bio(c, src, version, op->crc,
1014                                         NULL, &op->crc,
1015                                         src_len >> 9,
1016                                         bio_sectors(src) - (src_len >> 9),
1017                                         op->crc.csum_type))
1018                                 goto csum_err;
1019
1020                         crc.compressed_size     = dst_len >> 9;
1021                         crc.uncompressed_size   = src_len >> 9;
1022                         crc.live_size           = src_len >> 9;
1023
1024                         swap(dst->bi_iter.bi_size, dst_len);
1025                         ret = bch2_encrypt_bio(c, op->csum_type,
1026                                                extent_nonce(version, crc), dst);
1027                         if (ret)
1028                                 goto err;
1029
1030                         crc.csum = bch2_checksum_bio(c, op->csum_type,
1031                                          extent_nonce(version, crc), dst);
1032                         crc.csum_type = op->csum_type;
1033                         swap(dst->bi_iter.bi_size, dst_len);
1034                 }
1035
1036                 init_append_extent(op, wp, version, crc);
1037
1038                 if (dst != src)
1039                         bio_advance(dst, dst_len);
1040                 bio_advance(src, src_len);
1041                 total_output    += dst_len;
1042                 total_input     += src_len;
1043         } while (dst->bi_iter.bi_size &&
1044                  src->bi_iter.bi_size &&
1045                  wp->sectors_free &&
1046                  !bch2_keylist_realloc(&op->insert_keys,
1047                                       op->inline_keys,
1048                                       ARRAY_SIZE(op->inline_keys),
1049                                       BKEY_EXTENT_U64s_MAX));
1050
1051         more = src->bi_iter.bi_size != 0;
1052
1053         dst->bi_iter = saved_iter;
1054
1055         if (dst == src && more) {
1056                 BUG_ON(total_output != total_input);
1057
1058                 dst = bio_split(src, total_input >> 9,
1059                                 GFP_NOFS, &c->bio_write);
1060                 wbio_init(dst)->put_bio = true;
1061                 /* copy WRITE_SYNC flag */
1062                 dst->bi_opf             = src->bi_opf;
1063         }
1064
1065         dst->bi_iter.bi_size = total_output;
1066 do_write:
1067         *_dst = dst;
1068         return more;
1069 csum_err:
1070         bch_err(c, "error verifying existing checksum while rewriting existing data (memory corruption?)");
1071         ret = -EIO;
1072 err:
1073         if (to_wbio(dst)->bounce)
1074                 bch2_bio_free_pages_pool(c, dst);
1075         if (to_wbio(dst)->put_bio)
1076                 bio_put(dst);
1077
1078         return ret;
1079 }
1080
1081 static bool bch2_extent_is_writeable(struct bch_write_op *op,
1082                                      struct bkey_s_c k)
1083 {
1084         struct bch_fs *c = op->c;
1085         struct bkey_s_c_extent e;
1086         struct extent_ptr_decoded p;
1087         const union bch_extent_entry *entry;
1088         unsigned replicas = 0;
1089
1090         if (k.k->type != KEY_TYPE_extent)
1091                 return false;
1092
1093         e = bkey_s_c_to_extent(k);
1094         extent_for_each_ptr_decode(e, p, entry) {
1095                 if (crc_is_encoded(p.crc) || p.has_ec)
1096                         return false;
1097
1098                 replicas += bch2_extent_ptr_durability(c, &p);
1099         }
1100
1101         return replicas >= op->opts.data_replicas;
1102 }
1103
1104 static inline void bch2_nocow_write_unlock(struct bch_write_op *op)
1105 {
1106         struct bch_fs *c = op->c;
1107
1108         for_each_keylist_key(&op->insert_keys, k) {
1109                 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
1110
1111                 bkey_for_each_ptr(ptrs, ptr)
1112                         bch2_bucket_nocow_unlock(&c->nocow_locks,
1113                                                  PTR_BUCKET_POS(c, ptr),
1114                                                  BUCKET_NOCOW_LOCK_UPDATE);
1115         }
1116 }
1117
1118 static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
1119                                                   struct btree_iter *iter,
1120                                                   struct bkey_i *orig,
1121                                                   struct bkey_s_c k,
1122                                                   u64 new_i_size)
1123 {
1124         if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
1125                 /* trace this */
1126                 return 0;
1127         }
1128
1129         struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1130         int ret = PTR_ERR_OR_ZERO(new);
1131         if (ret)
1132                 return ret;
1133
1134         bch2_cut_front(bkey_start_pos(&orig->k), new);
1135         bch2_cut_back(orig->k.p, new);
1136
1137         struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
1138         bkey_for_each_ptr(ptrs, ptr)
1139                 ptr->unwritten = 0;
1140
1141         /*
1142          * Note that we're not calling bch2_subvol_get_snapshot() in this path -
1143          * that was done when we kicked off the write, and here it's important
1144          * that we update the extent that we wrote to - even if a snapshot has
1145          * since been created. The write is still outstanding, so we're ok
1146          * w.r.t. snapshot atomicity:
1147          */
1148         return  bch2_extent_update_i_size_sectors(trans, iter,
1149                                         min(new->k.p.offset << 9, new_i_size), 0) ?:
1150                 bch2_trans_update(trans, iter, new,
1151                                   BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1152 }
1153
1154 static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
1155 {
1156         struct bch_fs *c = op->c;
1157         struct btree_trans *trans = bch2_trans_get(c);
1158
1159         for_each_keylist_key(&op->insert_keys, orig) {
1160                 int ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents,
1161                                      bkey_start_pos(&orig->k), orig->k.p,
1162                                      BTREE_ITER_INTENT, k,
1163                                      NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
1164                         bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
1165                 }));
1166
1167                 if (ret && !bch2_err_matches(ret, EROFS)) {
1168                         struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
1169
1170                         bch_err_inum_offset_ratelimited(c,
1171                                 insert->k.p.inode, insert->k.p.offset << 9,
1172                                 "write error while doing btree update: %s",
1173                                 bch2_err_str(ret));
1174                 }
1175
1176                 if (ret) {
1177                         op->error = ret;
1178                         break;
1179                 }
1180         }
1181
1182         bch2_trans_put(trans);
1183 }
1184
1185 static void __bch2_nocow_write_done(struct bch_write_op *op)
1186 {
1187         bch2_nocow_write_unlock(op);
1188
1189         if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
1190                 op->error = -EIO;
1191         } else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
1192                 bch2_nocow_write_convert_unwritten(op);
1193 }
1194
1195 static CLOSURE_CALLBACK(bch2_nocow_write_done)
1196 {
1197         closure_type(op, struct bch_write_op, cl);
1198
1199         __bch2_nocow_write_done(op);
1200         bch2_write_done(cl);
1201 }
1202
1203 struct bucket_to_lock {
1204         struct bpos             b;
1205         unsigned                gen;
1206         struct nocow_lock_bucket *l;
1207 };
1208
1209 static void bch2_nocow_write(struct bch_write_op *op)
1210 {
1211         struct bch_fs *c = op->c;
1212         struct btree_trans *trans;
1213         struct btree_iter iter;
1214         struct bkey_s_c k;
1215         DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets;
1216         u32 snapshot;
1217         struct bucket_to_lock *stale_at;
1218         int ret;
1219
1220         if (op->flags & BCH_WRITE_MOVE)
1221                 return;
1222
1223         darray_init(&buckets);
1224         trans = bch2_trans_get(c);
1225 retry:
1226         bch2_trans_begin(trans);
1227
1228         ret = bch2_subvolume_get_snapshot(trans, op->subvol, &snapshot);
1229         if (unlikely(ret))
1230                 goto err;
1231
1232         bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1233                              SPOS(op->pos.inode, op->pos.offset, snapshot),
1234                              BTREE_ITER_SLOTS);
1235         while (1) {
1236                 struct bio *bio = &op->wbio.bio;
1237
1238                 buckets.nr = 0;
1239
1240                 k = bch2_btree_iter_peek_slot(&iter);
1241                 ret = bkey_err(k);
1242                 if (ret)
1243                         break;
1244
1245                 /* fall back to normal cow write path? */
1246                 if (unlikely(k.k->p.snapshot != snapshot ||
1247                              !bch2_extent_is_writeable(op, k)))
1248                         break;
1249
1250                 if (bch2_keylist_realloc(&op->insert_keys,
1251                                          op->inline_keys,
1252                                          ARRAY_SIZE(op->inline_keys),
1253                                          k.k->u64s))
1254                         break;
1255
1256                 /* Get iorefs before dropping btree locks: */
1257                 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1258                 bkey_for_each_ptr(ptrs, ptr) {
1259                         struct bpos b = PTR_BUCKET_POS(c, ptr);
1260                         struct nocow_lock_bucket *l =
1261                                 bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b));
1262                         prefetch(l);
1263
1264                         if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
1265                                 goto err_get_ioref;
1266
1267                         /* XXX allocating memory with btree locks held - rare */
1268                         darray_push_gfp(&buckets, ((struct bucket_to_lock) {
1269                                                    .b = b, .gen = ptr->gen, .l = l,
1270                                                    }), GFP_KERNEL|__GFP_NOFAIL);
1271
1272                         if (ptr->unwritten)
1273                                 op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
1274                 }
1275
1276                 /* Unlock before taking nocow locks, doing IO: */
1277                 bkey_reassemble(op->insert_keys.top, k);
1278                 bch2_trans_unlock(trans);
1279
1280                 bch2_cut_front(op->pos, op->insert_keys.top);
1281                 if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN)
1282                         bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
1283
1284                 darray_for_each(buckets, i) {
1285                         struct bch_dev *ca = bch_dev_bkey_exists(c, i->b.inode);
1286
1287                         __bch2_bucket_nocow_lock(&c->nocow_locks, i->l,
1288                                                  bucket_to_u64(i->b),
1289                                                  BUCKET_NOCOW_LOCK_UPDATE);
1290
1291                         rcu_read_lock();
1292                         bool stale = gen_after(*bucket_gen(ca, i->b.offset), i->gen);
1293                         rcu_read_unlock();
1294
1295                         if (unlikely(stale)) {
1296                                 stale_at = i;
1297                                 goto err_bucket_stale;
1298                         }
1299                 }
1300
1301                 bio = &op->wbio.bio;
1302                 if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
1303                         bio = bio_split(bio, k.k->p.offset - op->pos.offset,
1304                                         GFP_KERNEL, &c->bio_write);
1305                         wbio_init(bio)->put_bio = true;
1306                         bio->bi_opf = op->wbio.bio.bi_opf;
1307                 } else {
1308                         op->flags |= BCH_WRITE_DONE;
1309                 }
1310
1311                 op->pos.offset += bio_sectors(bio);
1312                 op->written += bio_sectors(bio);
1313
1314                 bio->bi_end_io  = bch2_write_endio;
1315                 bio->bi_private = &op->cl;
1316                 bio->bi_opf |= REQ_OP_WRITE;
1317                 closure_get(&op->cl);
1318                 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1319                                           op->insert_keys.top, true);
1320
1321                 bch2_keylist_push(&op->insert_keys);
1322                 if (op->flags & BCH_WRITE_DONE)
1323                         break;
1324                 bch2_btree_iter_advance(&iter);
1325         }
1326 out:
1327         bch2_trans_iter_exit(trans, &iter);
1328 err:
1329         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1330                 goto retry;
1331
1332         if (ret) {
1333                 bch_err_inum_offset_ratelimited(c,
1334                         op->pos.inode, op->pos.offset << 9,
1335                         "%s: btree lookup error %s", __func__, bch2_err_str(ret));
1336                 op->error = ret;
1337                 op->flags |= BCH_WRITE_DONE;
1338         }
1339
1340         bch2_trans_put(trans);
1341         darray_exit(&buckets);
1342
1343         /* fallback to cow write path? */
1344         if (!(op->flags & BCH_WRITE_DONE)) {
1345                 closure_sync(&op->cl);
1346                 __bch2_nocow_write_done(op);
1347                 op->insert_keys.top = op->insert_keys.keys;
1348         } else if (op->flags & BCH_WRITE_SYNC) {
1349                 closure_sync(&op->cl);
1350                 bch2_nocow_write_done(&op->cl.work);
1351         } else {
1352                 /*
1353                  * XXX
1354                  * needs to run out of process context because ei_quota_lock is
1355                  * a mutex
1356                  */
1357                 continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
1358         }
1359         return;
1360 err_get_ioref:
1361         darray_for_each(buckets, i)
1362                 percpu_ref_put(&bch_dev_bkey_exists(c, i->b.inode)->io_ref);
1363
1364         /* Fall back to COW path: */
1365         goto out;
1366 err_bucket_stale:
1367         darray_for_each(buckets, i) {
1368                 bch2_bucket_nocow_unlock(&c->nocow_locks, i->b, BUCKET_NOCOW_LOCK_UPDATE);
1369                 if (i == stale_at)
1370                         break;
1371         }
1372
1373         /* We can retry this: */
1374         ret = -BCH_ERR_transaction_restart;
1375         goto err_get_ioref;
1376 }
1377
1378 static void __bch2_write(struct bch_write_op *op)
1379 {
1380         struct bch_fs *c = op->c;
1381         struct write_point *wp = NULL;
1382         struct bio *bio = NULL;
1383         unsigned nofs_flags;
1384         int ret;
1385
1386         nofs_flags = memalloc_nofs_save();
1387
1388         if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
1389                 bch2_nocow_write(op);
1390                 if (op->flags & BCH_WRITE_DONE)
1391                         goto out_nofs_restore;
1392         }
1393 again:
1394         memset(&op->failed, 0, sizeof(op->failed));
1395
1396         do {
1397                 struct bkey_i *key_to_write;
1398                 unsigned key_to_write_offset = op->insert_keys.top_p -
1399                         op->insert_keys.keys_p;
1400
1401                 /* +1 for possible cache device: */
1402                 if (op->open_buckets.nr + op->nr_replicas + 1 >
1403                     ARRAY_SIZE(op->open_buckets.v))
1404                         break;
1405
1406                 if (bch2_keylist_realloc(&op->insert_keys,
1407                                         op->inline_keys,
1408                                         ARRAY_SIZE(op->inline_keys),
1409                                         BKEY_EXTENT_U64s_MAX))
1410                         break;
1411
1412                 /*
1413                  * The copygc thread is now global, which means it's no longer
1414                  * freeing up space on specific disks, which means that
1415                  * allocations for specific disks may hang arbitrarily long:
1416                  */
1417                 ret = bch2_trans_do(c, NULL, NULL, 0,
1418                         bch2_alloc_sectors_start_trans(trans,
1419                                 op->target,
1420                                 op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
1421                                 op->write_point,
1422                                 &op->devs_have,
1423                                 op->nr_replicas,
1424                                 op->nr_replicas_required,
1425                                 op->watermark,
1426                                 op->flags,
1427                                 (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
1428                                               BCH_WRITE_ONLY_SPECIFIED_DEVS))
1429                                 ? NULL : &op->cl, &wp));
1430                 if (unlikely(ret)) {
1431                         if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
1432                                 break;
1433
1434                         goto err;
1435                 }
1436
1437                 EBUG_ON(!wp);
1438
1439                 bch2_open_bucket_get(c, wp, &op->open_buckets);
1440                 ret = bch2_write_extent(op, wp, &bio);
1441
1442                 bch2_alloc_sectors_done_inlined(c, wp);
1443 err:
1444                 if (ret <= 0) {
1445                         op->flags |= BCH_WRITE_DONE;
1446
1447                         if (ret < 0) {
1448                                 if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT))
1449                                         bch_err_inum_offset_ratelimited(c,
1450                                                 op->pos.inode,
1451                                                 op->pos.offset << 9,
1452                                                 "%s(): error: %s", __func__, bch2_err_str(ret));
1453                                 op->error = ret;
1454                                 break;
1455                         }
1456                 }
1457
1458                 bio->bi_end_io  = bch2_write_endio;
1459                 bio->bi_private = &op->cl;
1460                 bio->bi_opf |= REQ_OP_WRITE;
1461
1462                 closure_get(bio->bi_private);
1463
1464                 key_to_write = (void *) (op->insert_keys.keys_p +
1465                                          key_to_write_offset);
1466
1467                 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1468                                           key_to_write, false);
1469         } while (ret);
1470
1471         /*
1472          * Sync or no?
1473          *
1474          * If we're running asynchronously, wne may still want to block
1475          * synchronously here if we weren't able to submit all of the IO at
1476          * once, as that signals backpressure to the caller.
1477          */
1478         if ((op->flags & BCH_WRITE_SYNC) ||
1479             (!(op->flags & BCH_WRITE_DONE) &&
1480              !(op->flags & BCH_WRITE_IN_WORKER))) {
1481                 closure_sync(&op->cl);
1482                 __bch2_write_index(op);
1483
1484                 if (!(op->flags & BCH_WRITE_DONE))
1485                         goto again;
1486                 bch2_write_done(&op->cl);
1487         } else {
1488                 bch2_write_queue(op, wp);
1489                 continue_at(&op->cl, bch2_write_index, NULL);
1490         }
1491 out_nofs_restore:
1492         memalloc_nofs_restore(nofs_flags);
1493 }
1494
1495 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
1496 {
1497         struct bio *bio = &op->wbio.bio;
1498         struct bvec_iter iter;
1499         struct bkey_i_inline_data *id;
1500         unsigned sectors;
1501         int ret;
1502
1503         op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
1504         op->flags |= BCH_WRITE_DONE;
1505
1506         bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
1507
1508         ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
1509                                    ARRAY_SIZE(op->inline_keys),
1510                                    BKEY_U64s + DIV_ROUND_UP(data_len, 8));
1511         if (ret) {
1512                 op->error = ret;
1513                 goto err;
1514         }
1515
1516         sectors = bio_sectors(bio);
1517         op->pos.offset += sectors;
1518
1519         id = bkey_inline_data_init(op->insert_keys.top);
1520         id->k.p         = op->pos;
1521         id->k.version   = op->version;
1522         id->k.size      = sectors;
1523
1524         iter = bio->bi_iter;
1525         iter.bi_size = data_len;
1526         memcpy_from_bio(id->v.data, bio, iter);
1527
1528         while (data_len & 7)
1529                 id->v.data[data_len++] = '\0';
1530         set_bkey_val_bytes(&id->k, data_len);
1531         bch2_keylist_push(&op->insert_keys);
1532
1533         __bch2_write_index(op);
1534 err:
1535         bch2_write_done(&op->cl);
1536 }
1537
1538 /**
1539  * bch2_write() - handle a write to a cache device or flash only volume
1540  * @cl:         &bch_write_op->cl
1541  *
1542  * This is the starting point for any data to end up in a cache device; it could
1543  * be from a normal write, or a writeback write, or a write to a flash only
1544  * volume - it's also used by the moving garbage collector to compact data in
1545  * mostly empty buckets.
1546  *
1547  * It first writes the data to the cache, creating a list of keys to be inserted
1548  * (if the data won't fit in a single open bucket, there will be multiple keys);
1549  * after the data is written it calls bch_journal, and after the keys have been
1550  * added to the next journal write they're inserted into the btree.
1551  *
1552  * If op->discard is true, instead of inserting the data it invalidates the
1553  * region of the cache represented by op->bio and op->inode.
1554  */
1555 CLOSURE_CALLBACK(bch2_write)
1556 {
1557         closure_type(op, struct bch_write_op, cl);
1558         struct bio *bio = &op->wbio.bio;
1559         struct bch_fs *c = op->c;
1560         unsigned data_len;
1561
1562         EBUG_ON(op->cl.parent);
1563         BUG_ON(!op->nr_replicas);
1564         BUG_ON(!op->write_point.v);
1565         BUG_ON(bkey_eq(op->pos, POS_MAX));
1566
1567         op->start_time = local_clock();
1568         bch2_keylist_init(&op->insert_keys, op->inline_keys);
1569         wbio_init(bio)->put_bio = false;
1570
1571         if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
1572                 bch_err_inum_offset_ratelimited(c,
1573                         op->pos.inode,
1574                         op->pos.offset << 9,
1575                         "misaligned write");
1576                 op->error = -EIO;
1577                 goto err;
1578         }
1579
1580         if (c->opts.nochanges) {
1581                 op->error = -BCH_ERR_erofs_no_writes;
1582                 goto err;
1583         }
1584
1585         if (!(op->flags & BCH_WRITE_MOVE) &&
1586             !bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) {
1587                 op->error = -BCH_ERR_erofs_no_writes;
1588                 goto err;
1589         }
1590
1591         this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
1592         bch2_increment_clock(c, bio_sectors(bio), WRITE);
1593
1594         data_len = min_t(u64, bio->bi_iter.bi_size,
1595                          op->new_i_size - (op->pos.offset << 9));
1596
1597         if (c->opts.inline_data &&
1598             data_len <= min(block_bytes(c) / 2, 1024U)) {
1599                 bch2_write_data_inline(op, data_len);
1600                 return;
1601         }
1602
1603         __bch2_write(op);
1604         return;
1605 err:
1606         bch2_disk_reservation_put(c, &op->res);
1607
1608         closure_debug_destroy(&op->cl);
1609         if (op->end_io)
1610                 op->end_io(op);
1611 }
1612
1613 static const char * const bch2_write_flags[] = {
1614 #define x(f)    #f,
1615         BCH_WRITE_FLAGS()
1616 #undef x
1617         NULL
1618 };
1619
1620 void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
1621 {
1622         prt_str(out, "pos: ");
1623         bch2_bpos_to_text(out, op->pos);
1624         prt_newline(out);
1625         printbuf_indent_add(out, 2);
1626
1627         prt_str(out, "started: ");
1628         bch2_pr_time_units(out, local_clock() - op->start_time);
1629         prt_newline(out);
1630
1631         prt_str(out, "flags: ");
1632         prt_bitflags(out, bch2_write_flags, op->flags);
1633         prt_newline(out);
1634
1635         prt_printf(out, "ref: %u", closure_nr_remaining(&op->cl));
1636         prt_newline(out);
1637
1638         printbuf_indent_sub(out, 2);
1639 }
1640
1641 void bch2_fs_io_write_exit(struct bch_fs *c)
1642 {
1643         mempool_exit(&c->bio_bounce_pages);
1644         bioset_exit(&c->bio_write);
1645 }
1646
1647 int bch2_fs_io_write_init(struct bch_fs *c)
1648 {
1649         if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
1650                         BIOSET_NEED_BVECS))
1651                 return -BCH_ERR_ENOMEM_bio_write_init;
1652
1653         if (mempool_init_page_pool(&c->bio_bounce_pages,
1654                                    max_t(unsigned,
1655                                          c->opts.btree_node_size,
1656                                          c->opts.encoded_extent_max) /
1657                                    PAGE_SIZE, 0))
1658                 return -BCH_ERR_ENOMEM_bio_bounce_pages_init;
1659
1660         return 0;
1661 }