]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/io.c
Update bcachefs sources to 6f603b8d79 bcachefs: some improvements to startup messages...
[bcachefs-tools-debian] / libbcachefs / io.c
1 /*
2  * Some low level IO code, and hacks for various block layer limitations
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bset.h"
11 #include "btree_update.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "compress.h"
15 #include "clock.h"
16 #include "debug.h"
17 #include "disk_groups.h"
18 #include "ec.h"
19 #include "error.h"
20 #include "extents.h"
21 #include "io.h"
22 #include "journal.h"
23 #include "keylist.h"
24 #include "move.h"
25 #include "rebalance.h"
26 #include "super.h"
27 #include "super-io.h"
28
29 #include <linux/blkdev.h>
30 #include <linux/random.h>
31
32 #include <trace/events/bcachefs.h>
33
34 static bool bch2_target_congested(struct bch_fs *c, u16 target)
35 {
36         const struct bch_devs_mask *devs;
37         unsigned d, nr = 0, total = 0;
38         u64 now = local_clock(), last;
39         s64 congested;
40         struct bch_dev *ca;
41
42         if (!target)
43                 return false;
44
45         rcu_read_lock();
46         devs = bch2_target_to_mask(c, target);
47         for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) {
48                 ca = rcu_dereference(c->devs[d]);
49                 if (!ca)
50                         continue;
51
52                 congested = atomic_read(&ca->congested);
53                 last = READ_ONCE(ca->congested_last);
54                 if (time_after64(now, last))
55                         congested -= (now - last) >> 12;
56
57                 total += max(congested, 0LL);
58                 nr++;
59         }
60         rcu_read_unlock();
61
62         return bch2_rand_range(nr * CONGESTED_MAX) < total;
63 }
64
65 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
66                                        u64 now, int rw)
67 {
68         u64 latency_capable =
69                 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
70         /* ideally we'd be taking into account the device's variance here: */
71         u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
72         s64 latency_over = io_latency - latency_threshold;
73
74         if (latency_threshold && latency_over > 0) {
75                 /*
76                  * bump up congested by approximately latency_over * 4 /
77                  * latency_threshold - we don't need much accuracy here so don't
78                  * bother with the divide:
79                  */
80                 if (atomic_read(&ca->congested) < CONGESTED_MAX)
81                         atomic_add(latency_over >>
82                                    max_t(int, ilog2(latency_threshold) - 2, 0),
83                                    &ca->congested);
84
85                 ca->congested_last = now;
86         } else if (atomic_read(&ca->congested) > 0) {
87                 atomic_dec(&ca->congested);
88         }
89 }
90
91 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
92 {
93         atomic64_t *latency = &ca->cur_latency[rw];
94         u64 now = local_clock();
95         u64 io_latency = time_after64(now, submit_time)
96                 ? now - submit_time
97                 : 0;
98         u64 old, new, v = atomic64_read(latency);
99
100         do {
101                 old = v;
102
103                 /*
104                  * If the io latency was reasonably close to the current
105                  * latency, skip doing the update and atomic operation - most of
106                  * the time:
107                  */
108                 if (abs((int) (old - io_latency)) < (old >> 1) &&
109                     now & ~(~0 << 5))
110                         break;
111
112                 new = ewma_add(old, io_latency, 5);
113         } while ((v = atomic64_cmpxchg(latency, old, new)) != old);
114
115         bch2_congested_acct(ca, io_latency, now, rw);
116
117         __bch2_time_stats_update(&ca->io_latency[rw], submit_time, now);
118 }
119
120 /* Allocate, free from mempool: */
121
122 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
123 {
124         struct bio_vec *bv;
125         unsigned i;
126
127         bio_for_each_segment_all(bv, bio, i)
128                 if (bv->bv_page != ZERO_PAGE(0))
129                         mempool_free(bv->bv_page, &c->bio_bounce_pages);
130         bio->bi_vcnt = 0;
131 }
132
133 static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
134                                     bool *using_mempool)
135 {
136         struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt++];
137
138         if (likely(!*using_mempool)) {
139                 bv->bv_page = alloc_page(GFP_NOIO);
140                 if (unlikely(!bv->bv_page)) {
141                         mutex_lock(&c->bio_bounce_pages_lock);
142                         *using_mempool = true;
143                         goto pool_alloc;
144
145                 }
146         } else {
147 pool_alloc:
148                 bv->bv_page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
149         }
150
151         bv->bv_len = PAGE_SIZE;
152         bv->bv_offset = 0;
153 }
154
155 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
156                                size_t bytes)
157 {
158         bool using_mempool = false;
159
160         BUG_ON(DIV_ROUND_UP(bytes, PAGE_SIZE) > bio->bi_max_vecs);
161
162         bio->bi_iter.bi_size = bytes;
163
164         while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE))
165                 bch2_bio_alloc_page_pool(c, bio, &using_mempool);
166
167         if (using_mempool)
168                 mutex_unlock(&c->bio_bounce_pages_lock);
169 }
170
171 void bch2_bio_alloc_more_pages_pool(struct bch_fs *c, struct bio *bio,
172                                     size_t bytes)
173 {
174         while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE)) {
175                 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
176
177                 BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
178
179                 bv->bv_page = alloc_page(GFP_NOIO);
180                 if (!bv->bv_page) {
181                         /*
182                          * We already allocated from mempool, we can't allocate from it again
183                          * without freeing the pages we already allocated or else we could
184                          * deadlock:
185                          */
186                         bch2_bio_free_pages_pool(c, bio);
187                         bch2_bio_alloc_pages_pool(c, bio, bytes);
188                         return;
189                 }
190
191                 bv->bv_len = PAGE_SIZE;
192                 bv->bv_offset = 0;
193                 bio->bi_vcnt++;
194         }
195
196         bio->bi_iter.bi_size = bytes;
197 }
198
199 /* Writes */
200
201 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
202                                enum bch_data_type type,
203                                const struct bkey_i *k)
204 {
205         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
206         const struct bch_extent_ptr *ptr;
207         struct bch_write_bio *n;
208         struct bch_dev *ca;
209
210         BUG_ON(c->opts.nochanges);
211
212         bkey_for_each_ptr(ptrs, ptr) {
213                 BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
214                        !c->devs[ptr->dev]);
215
216                 ca = bch_dev_bkey_exists(c, ptr->dev);
217
218                 if (to_entry(ptr + 1) < ptrs.end) {
219                         n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
220                                                    &ca->replica_set));
221
222                         n->bio.bi_end_io        = wbio->bio.bi_end_io;
223                         n->bio.bi_private       = wbio->bio.bi_private;
224                         n->parent               = wbio;
225                         n->split                = true;
226                         n->bounce               = false;
227                         n->put_bio              = true;
228                         n->bio.bi_opf           = wbio->bio.bi_opf;
229                         bio_inc_remaining(&wbio->bio);
230                 } else {
231                         n = wbio;
232                         n->split                = false;
233                 }
234
235                 n->c                    = c;
236                 n->dev                  = ptr->dev;
237                 n->have_ioref           = bch2_dev_get_ioref(ca, WRITE);
238                 n->submit_time          = local_clock();
239                 n->bio.bi_iter.bi_sector = ptr->offset;
240
241                 if (!journal_flushes_device(ca))
242                         n->bio.bi_opf |= REQ_FUA;
243
244                 if (likely(n->have_ioref)) {
245                         this_cpu_add(ca->io_done->sectors[WRITE][type],
246                                      bio_sectors(&n->bio));
247
248                         bio_set_dev(&n->bio, ca->disk_sb.bdev);
249                         submit_bio(&n->bio);
250                 } else {
251                         n->bio.bi_status        = BLK_STS_REMOVED;
252                         bio_endio(&n->bio);
253                 }
254         }
255 }
256
257 static void __bch2_write(struct closure *);
258
259 static void bch2_write_done(struct closure *cl)
260 {
261         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
262         struct bch_fs *c = op->c;
263
264         if (!op->error && (op->flags & BCH_WRITE_FLUSH))
265                 op->error = bch2_journal_error(&c->journal);
266
267         if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
268                 bch2_disk_reservation_put(c, &op->res);
269         percpu_ref_put(&c->writes);
270         bch2_keylist_free(&op->insert_keys, op->inline_keys);
271
272         bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
273
274         closure_return(cl);
275 }
276
277 int bch2_write_index_default(struct bch_write_op *op)
278 {
279         struct bch_fs *c = op->c;
280         struct btree_trans trans;
281         struct btree_iter *iter;
282         struct keylist *keys = &op->insert_keys;
283         int ret;
284
285         BUG_ON(bch2_keylist_empty(keys));
286         bch2_verify_keylist_sorted(keys);
287
288         bch2_trans_init(&trans, c);
289
290         iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
291                                    bkey_start_pos(&bch2_keylist_front(keys)->k),
292                                    BTREE_ITER_INTENT);
293
294         do {
295                 BKEY_PADDED(k) split;
296
297                 bkey_copy(&split.k, bch2_keylist_front(keys));
298
299                 bch2_extent_trim_atomic(&split.k, iter);
300
301                 bch2_trans_update(&trans,
302                                   BTREE_INSERT_ENTRY(iter, &split.k));
303
304                 ret = bch2_trans_commit(&trans, &op->res, op_journal_seq(op),
305                                         BTREE_INSERT_NOFAIL|
306                                         BTREE_INSERT_USE_RESERVE);
307                 if (ret)
308                         break;
309
310                 if (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) < 0)
311                         bch2_cut_front(iter->pos, bch2_keylist_front(keys));
312                 else
313                         bch2_keylist_pop_front(keys);
314         } while (!bch2_keylist_empty(keys));
315
316         bch2_trans_exit(&trans);
317
318         return ret;
319 }
320
321 /**
322  * bch_write_index - after a write, update index to point to new data
323  */
324 static void __bch2_write_index(struct bch_write_op *op)
325 {
326         struct bch_fs *c = op->c;
327         struct keylist *keys = &op->insert_keys;
328         struct bch_extent_ptr *ptr;
329         struct bkey_i *src, *dst = keys->keys, *n, *k;
330         unsigned dev;
331         int ret;
332
333         for (src = keys->keys; src != keys->top; src = n) {
334                 n = bkey_next(src);
335                 bkey_copy(dst, src);
336
337                 bch2_bkey_drop_ptrs(bkey_i_to_s(dst), ptr,
338                         test_bit(ptr->dev, op->failed.d));
339
340                 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(dst))) {
341                         ret = -EIO;
342                         goto err;
343                 }
344
345                 dst = bkey_next(dst);
346         }
347
348         keys->top = dst;
349
350         /*
351          * probably not the ideal place to hook this in, but I don't
352          * particularly want to plumb io_opts all the way through the btree
353          * update stack right now
354          */
355         for_each_keylist_key(keys, k)
356                 bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
357
358         if (!bch2_keylist_empty(keys)) {
359                 u64 sectors_start = keylist_sectors(keys);
360                 int ret = op->index_update_fn(op);
361
362                 BUG_ON(keylist_sectors(keys) && !ret);
363
364                 op->written += sectors_start - keylist_sectors(keys);
365
366                 if (ret) {
367                         __bcache_io_error(c, "btree IO error %i", ret);
368                         op->error = ret;
369                 }
370         }
371 out:
372         /* If some a bucket wasn't written, we can't erasure code it: */
373         for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
374                 bch2_open_bucket_write_error(c, &op->open_buckets, dev);
375
376         bch2_open_buckets_put(c, &op->open_buckets);
377         return;
378 err:
379         keys->top = keys->keys;
380         op->error = ret;
381         goto out;
382 }
383
384 static void bch2_write_index(struct closure *cl)
385 {
386         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
387         struct bch_fs *c = op->c;
388
389         __bch2_write_index(op);
390
391         if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
392                 bch2_journal_flush_seq_async(&c->journal,
393                                              *op_journal_seq(op),
394                                              cl);
395                 continue_at(cl, bch2_write_done, index_update_wq(op));
396         } else {
397                 continue_at_nobarrier(cl, bch2_write_done, NULL);
398         }
399 }
400
401 static void bch2_write_endio(struct bio *bio)
402 {
403         struct closure *cl              = bio->bi_private;
404         struct bch_write_op *op         = container_of(cl, struct bch_write_op, cl);
405         struct bch_write_bio *wbio      = to_wbio(bio);
406         struct bch_write_bio *parent    = wbio->split ? wbio->parent : NULL;
407         struct bch_fs *c                = wbio->c;
408         struct bch_dev *ca              = bch_dev_bkey_exists(c, wbio->dev);
409
410         if (bch2_dev_io_err_on(bio->bi_status, ca, "data write"))
411                 set_bit(wbio->dev, op->failed.d);
412
413         if (wbio->have_ioref) {
414                 bch2_latency_acct(ca, wbio->submit_time, WRITE);
415                 percpu_ref_put(&ca->io_ref);
416         }
417
418         if (wbio->bounce)
419                 bch2_bio_free_pages_pool(c, bio);
420
421         if (wbio->put_bio)
422                 bio_put(bio);
423
424         if (parent)
425                 bio_endio(&parent->bio);
426         else
427                 closure_put(cl);
428 }
429
430 static void init_append_extent(struct bch_write_op *op,
431                                struct write_point *wp,
432                                struct bversion version,
433                                struct bch_extent_crc_unpacked crc)
434 {
435         struct bkey_i_extent *e = bkey_extent_init(op->insert_keys.top);
436         struct bch_extent_ptr *ptr;
437
438         op->pos.offset += crc.uncompressed_size;
439         e->k.p = op->pos;
440         e->k.size = crc.uncompressed_size;
441         e->k.version = version;
442
443         bch2_extent_crc_append(e, crc);
444         bch2_alloc_sectors_append_ptrs(op->c, wp, &e->k_i,
445                                        crc.compressed_size);
446
447         if (op->flags & BCH_WRITE_CACHED)
448                 extent_for_each_ptr(extent_i_to_s(e), ptr)
449                         ptr->cached = true;
450
451         bch2_keylist_push(&op->insert_keys);
452 }
453
454 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
455                                         struct write_point *wp,
456                                         struct bio *src,
457                                         bool *page_alloc_failed,
458                                         void *buf)
459 {
460         struct bch_write_bio *wbio;
461         struct bio *bio;
462         unsigned output_available =
463                 min(wp->sectors_free << 9, src->bi_iter.bi_size);
464         unsigned pages = DIV_ROUND_UP(output_available, PAGE_SIZE);
465
466         bio = bio_alloc_bioset(GFP_NOIO, pages, &c->bio_write);
467         wbio                    = wbio_init(bio);
468         wbio->put_bio           = true;
469         /* copy WRITE_SYNC flag */
470         wbio->bio.bi_opf        = src->bi_opf;
471
472         if (buf) {
473                 bio->bi_iter.bi_size = output_available;
474                 bch2_bio_map(bio, buf);
475                 return bio;
476         }
477
478         wbio->bounce            = true;
479
480         /*
481          * We can't use mempool for more than c->sb.encoded_extent_max
482          * worth of pages, but we'd like to allocate more if we can:
483          */
484         while (bio->bi_iter.bi_size < output_available) {
485                 unsigned len = min_t(unsigned, PAGE_SIZE,
486                                      output_available - bio->bi_iter.bi_size);
487                 struct page *p;
488
489                 p = alloc_page(GFP_NOIO);
490                 if (!p) {
491                         unsigned pool_max =
492                                 min_t(unsigned, output_available,
493                                       c->sb.encoded_extent_max << 9);
494
495                         if (bio_sectors(bio) < pool_max)
496                                 bch2_bio_alloc_pages_pool(c, bio, pool_max);
497                         break;
498                 }
499
500                 bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
501                         .bv_page        = p,
502                         .bv_len         = len,
503                         .bv_offset      = 0,
504                 };
505                 bio->bi_iter.bi_size += len;
506         }
507
508         *page_alloc_failed = bio->bi_vcnt < pages;
509         return bio;
510 }
511
512 static int bch2_write_rechecksum(struct bch_fs *c,
513                                  struct bch_write_op *op,
514                                  unsigned new_csum_type)
515 {
516         struct bio *bio = &op->wbio.bio;
517         struct bch_extent_crc_unpacked new_crc;
518         int ret;
519
520         /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
521
522         if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
523             bch2_csum_type_is_encryption(new_csum_type))
524                 new_csum_type = op->crc.csum_type;
525
526         ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
527                                   NULL, &new_crc,
528                                   op->crc.offset, op->crc.live_size,
529                                   new_csum_type);
530         if (ret)
531                 return ret;
532
533         bio_advance(bio, op->crc.offset << 9);
534         bio->bi_iter.bi_size = op->crc.live_size << 9;
535         op->crc = new_crc;
536         return 0;
537 }
538
539 static int bch2_write_decrypt(struct bch_write_op *op)
540 {
541         struct bch_fs *c = op->c;
542         struct nonce nonce = extent_nonce(op->version, op->crc);
543         struct bch_csum csum;
544
545         if (!bch2_csum_type_is_encryption(op->crc.csum_type))
546                 return 0;
547
548         /*
549          * If we need to decrypt data in the write path, we'll no longer be able
550          * to verify the existing checksum (poly1305 mac, in this case) after
551          * it's decrypted - this is the last point we'll be able to reverify the
552          * checksum:
553          */
554         csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
555         if (bch2_crc_cmp(op->crc.csum, csum))
556                 return -EIO;
557
558         bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
559         op->crc.csum_type = 0;
560         op->crc.csum = (struct bch_csum) { 0, 0 };
561         return 0;
562 }
563
564 static enum prep_encoded_ret {
565         PREP_ENCODED_OK,
566         PREP_ENCODED_ERR,
567         PREP_ENCODED_CHECKSUM_ERR,
568         PREP_ENCODED_DO_WRITE,
569 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
570 {
571         struct bch_fs *c = op->c;
572         struct bio *bio = &op->wbio.bio;
573
574         if (!(op->flags & BCH_WRITE_DATA_ENCODED))
575                 return PREP_ENCODED_OK;
576
577         BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
578
579         /* Can we just write the entire extent as is? */
580         if (op->crc.uncompressed_size == op->crc.live_size &&
581             op->crc.compressed_size <= wp->sectors_free &&
582             op->crc.compression_type == op->compression_type) {
583                 if (!op->crc.compression_type &&
584                     op->csum_type != op->crc.csum_type &&
585                     bch2_write_rechecksum(c, op, op->csum_type))
586                         return PREP_ENCODED_CHECKSUM_ERR;
587
588                 return PREP_ENCODED_DO_WRITE;
589         }
590
591         /*
592          * If the data is compressed and we couldn't write the entire extent as
593          * is, we have to decompress it:
594          */
595         if (op->crc.compression_type) {
596                 struct bch_csum csum;
597
598                 if (bch2_write_decrypt(op))
599                         return PREP_ENCODED_CHECKSUM_ERR;
600
601                 /* Last point we can still verify checksum: */
602                 csum = bch2_checksum_bio(c, op->crc.csum_type,
603                                          extent_nonce(op->version, op->crc),
604                                          bio);
605                 if (bch2_crc_cmp(op->crc.csum, csum))
606                         return PREP_ENCODED_CHECKSUM_ERR;
607
608                 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
609                         return PREP_ENCODED_ERR;
610         }
611
612         /*
613          * No longer have compressed data after this point - data might be
614          * encrypted:
615          */
616
617         /*
618          * If the data is checksummed and we're only writing a subset,
619          * rechecksum and adjust bio to point to currently live data:
620          */
621         if ((op->crc.live_size != op->crc.uncompressed_size ||
622              op->crc.csum_type != op->csum_type) &&
623             bch2_write_rechecksum(c, op, op->csum_type))
624                 return PREP_ENCODED_CHECKSUM_ERR;
625
626         /*
627          * If we want to compress the data, it has to be decrypted:
628          */
629         if ((op->compression_type ||
630              bch2_csum_type_is_encryption(op->crc.csum_type) !=
631              bch2_csum_type_is_encryption(op->csum_type)) &&
632             bch2_write_decrypt(op))
633                 return PREP_ENCODED_CHECKSUM_ERR;
634
635         return PREP_ENCODED_OK;
636 }
637
638 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp)
639 {
640         struct bch_fs *c = op->c;
641         struct bio *src = &op->wbio.bio, *dst = src;
642         struct bvec_iter saved_iter;
643         struct bkey_i *key_to_write;
644         void *ec_buf;
645         unsigned key_to_write_offset = op->insert_keys.top_p -
646                 op->insert_keys.keys_p;
647         unsigned total_output = 0, total_input = 0;
648         bool bounce = false;
649         bool page_alloc_failed = false;
650         int ret, more = 0;
651
652         BUG_ON(!bio_sectors(src));
653
654         ec_buf = bch2_writepoint_ec_buf(c, wp);
655
656         switch (bch2_write_prep_encoded_data(op, wp)) {
657         case PREP_ENCODED_OK:
658                 break;
659         case PREP_ENCODED_ERR:
660                 ret = -EIO;
661                 goto err;
662         case PREP_ENCODED_CHECKSUM_ERR:
663                 goto csum_err;
664         case PREP_ENCODED_DO_WRITE:
665                 if (ec_buf) {
666                         dst = bch2_write_bio_alloc(c, wp, src,
667                                                    &page_alloc_failed,
668                                                    ec_buf);
669                         bio_copy_data(dst, src);
670                         bounce = true;
671                 }
672                 init_append_extent(op, wp, op->version, op->crc);
673                 goto do_write;
674         }
675
676         if (ec_buf ||
677             op->compression_type ||
678             (op->csum_type &&
679              !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
680             (bch2_csum_type_is_encryption(op->csum_type) &&
681              !(op->flags & BCH_WRITE_PAGES_OWNED))) {
682                 dst = bch2_write_bio_alloc(c, wp, src,
683                                            &page_alloc_failed,
684                                            ec_buf);
685                 bounce = true;
686         }
687
688         saved_iter = dst->bi_iter;
689
690         do {
691                 struct bch_extent_crc_unpacked crc =
692                         (struct bch_extent_crc_unpacked) { 0 };
693                 struct bversion version = op->version;
694                 size_t dst_len, src_len;
695
696                 if (page_alloc_failed &&
697                     bio_sectors(dst) < wp->sectors_free &&
698                     bio_sectors(dst) < c->sb.encoded_extent_max)
699                         break;
700
701                 BUG_ON(op->compression_type &&
702                        (op->flags & BCH_WRITE_DATA_ENCODED) &&
703                        bch2_csum_type_is_encryption(op->crc.csum_type));
704                 BUG_ON(op->compression_type && !bounce);
705
706                 crc.compression_type = op->compression_type
707                         ?  bch2_bio_compress(c, dst, &dst_len, src, &src_len,
708                                              op->compression_type)
709                         : 0;
710                 if (!crc.compression_type) {
711                         dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
712                         dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
713
714                         if (op->csum_type)
715                                 dst_len = min_t(unsigned, dst_len,
716                                                 c->sb.encoded_extent_max << 9);
717
718                         if (bounce) {
719                                 swap(dst->bi_iter.bi_size, dst_len);
720                                 bio_copy_data(dst, src);
721                                 swap(dst->bi_iter.bi_size, dst_len);
722                         }
723
724                         src_len = dst_len;
725                 }
726
727                 BUG_ON(!src_len || !dst_len);
728
729                 if (bch2_csum_type_is_encryption(op->csum_type)) {
730                         if (bversion_zero(version)) {
731                                 version.lo = atomic64_inc_return(&c->key_version) + 1;
732                         } else {
733                                 crc.nonce = op->nonce;
734                                 op->nonce += src_len >> 9;
735                         }
736                 }
737
738                 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
739                     !crc.compression_type &&
740                     bch2_csum_type_is_encryption(op->crc.csum_type) ==
741                     bch2_csum_type_is_encryption(op->csum_type)) {
742                         /*
743                          * Note: when we're using rechecksum(), we need to be
744                          * checksumming @src because it has all the data our
745                          * existing checksum covers - if we bounced (because we
746                          * were trying to compress), @dst will only have the
747                          * part of the data the new checksum will cover.
748                          *
749                          * But normally we want to be checksumming post bounce,
750                          * because part of the reason for bouncing is so the
751                          * data can't be modified (by userspace) while it's in
752                          * flight.
753                          */
754                         if (bch2_rechecksum_bio(c, src, version, op->crc,
755                                         &crc, &op->crc,
756                                         src_len >> 9,
757                                         bio_sectors(src) - (src_len >> 9),
758                                         op->csum_type))
759                                 goto csum_err;
760                 } else {
761                         if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
762                             bch2_rechecksum_bio(c, src, version, op->crc,
763                                         NULL, &op->crc,
764                                         src_len >> 9,
765                                         bio_sectors(src) - (src_len >> 9),
766                                         op->crc.csum_type))
767                                 goto csum_err;
768
769                         crc.compressed_size     = dst_len >> 9;
770                         crc.uncompressed_size   = src_len >> 9;
771                         crc.live_size           = src_len >> 9;
772
773                         swap(dst->bi_iter.bi_size, dst_len);
774                         bch2_encrypt_bio(c, op->csum_type,
775                                          extent_nonce(version, crc), dst);
776                         crc.csum = bch2_checksum_bio(c, op->csum_type,
777                                          extent_nonce(version, crc), dst);
778                         crc.csum_type = op->csum_type;
779                         swap(dst->bi_iter.bi_size, dst_len);
780                 }
781
782                 init_append_extent(op, wp, version, crc);
783
784                 if (dst != src)
785                         bio_advance(dst, dst_len);
786                 bio_advance(src, src_len);
787                 total_output    += dst_len;
788                 total_input     += src_len;
789         } while (dst->bi_iter.bi_size &&
790                  src->bi_iter.bi_size &&
791                  wp->sectors_free &&
792                  !bch2_keylist_realloc(&op->insert_keys,
793                                       op->inline_keys,
794                                       ARRAY_SIZE(op->inline_keys),
795                                       BKEY_EXTENT_U64s_MAX));
796
797         more = src->bi_iter.bi_size != 0;
798
799         dst->bi_iter = saved_iter;
800
801         if (dst == src && more) {
802                 BUG_ON(total_output != total_input);
803
804                 dst = bio_split(src, total_input >> 9,
805                                 GFP_NOIO, &c->bio_write);
806                 wbio_init(dst)->put_bio = true;
807                 /* copy WRITE_SYNC flag */
808                 dst->bi_opf             = src->bi_opf;
809         }
810
811         dst->bi_iter.bi_size = total_output;
812
813         /* Free unneeded pages after compressing: */
814         if (to_wbio(dst)->bounce)
815                 while (dst->bi_vcnt > DIV_ROUND_UP(dst->bi_iter.bi_size, PAGE_SIZE))
816                         mempool_free(dst->bi_io_vec[--dst->bi_vcnt].bv_page,
817                                      &c->bio_bounce_pages);
818 do_write:
819         /* might have done a realloc... */
820
821         key_to_write = (void *) (op->insert_keys.keys_p + key_to_write_offset);
822
823         bch2_ec_add_backpointer(c, wp,
824                                 bkey_start_pos(&key_to_write->k),
825                                 total_input >> 9);
826
827         dst->bi_end_io  = bch2_write_endio;
828         dst->bi_private = &op->cl;
829         bio_set_op_attrs(dst, REQ_OP_WRITE, 0);
830
831         closure_get(dst->bi_private);
832
833         bch2_submit_wbio_replicas(to_wbio(dst), c, BCH_DATA_USER,
834                                   key_to_write);
835         return more;
836 csum_err:
837         bch_err(c, "error verifying existing checksum while "
838                 "rewriting existing data (memory corruption?)");
839         ret = -EIO;
840 err:
841         if (to_wbio(dst)->bounce)
842                 bch2_bio_free_pages_pool(c, dst);
843         if (to_wbio(dst)->put_bio)
844                 bio_put(dst);
845
846         return ret;
847 }
848
849 static void __bch2_write(struct closure *cl)
850 {
851         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
852         struct bch_fs *c = op->c;
853         struct write_point *wp;
854         int ret;
855 again:
856         memset(&op->failed, 0, sizeof(op->failed));
857
858         do {
859                 /* +1 for possible cache device: */
860                 if (op->open_buckets.nr + op->nr_replicas + 1 >
861                     ARRAY_SIZE(op->open_buckets.v))
862                         goto flush_io;
863
864                 if (bch2_keylist_realloc(&op->insert_keys,
865                                         op->inline_keys,
866                                         ARRAY_SIZE(op->inline_keys),
867                                         BKEY_EXTENT_U64s_MAX))
868                         goto flush_io;
869
870                 wp = bch2_alloc_sectors_start(c,
871                         op->target,
872                         op->opts.erasure_code,
873                         op->write_point,
874                         &op->devs_have,
875                         op->nr_replicas,
876                         op->nr_replicas_required,
877                         op->alloc_reserve,
878                         op->flags,
879                         (op->flags & BCH_WRITE_ALLOC_NOWAIT) ? NULL : cl);
880                 EBUG_ON(!wp);
881
882                 if (unlikely(IS_ERR(wp))) {
883                         if (unlikely(PTR_ERR(wp) != -EAGAIN)) {
884                                 ret = PTR_ERR(wp);
885                                 goto err;
886                         }
887
888                         goto flush_io;
889                 }
890
891                 ret = bch2_write_extent(op, wp);
892
893                 bch2_open_bucket_get(c, wp, &op->open_buckets);
894                 bch2_alloc_sectors_done(c, wp);
895
896                 if (ret < 0)
897                         goto err;
898         } while (ret);
899
900         continue_at(cl, bch2_write_index, index_update_wq(op));
901         return;
902 err:
903         op->error = ret;
904
905         continue_at(cl, !bch2_keylist_empty(&op->insert_keys)
906                     ? bch2_write_index
907                     : bch2_write_done, index_update_wq(op));
908         return;
909 flush_io:
910         closure_sync(cl);
911
912         if (!bch2_keylist_empty(&op->insert_keys)) {
913                 __bch2_write_index(op);
914
915                 if (op->error) {
916                         continue_at_nobarrier(cl, bch2_write_done, NULL);
917                         return;
918                 }
919         }
920
921         goto again;
922 }
923
924 /**
925  * bch_write - handle a write to a cache device or flash only volume
926  *
927  * This is the starting point for any data to end up in a cache device; it could
928  * be from a normal write, or a writeback write, or a write to a flash only
929  * volume - it's also used by the moving garbage collector to compact data in
930  * mostly empty buckets.
931  *
932  * It first writes the data to the cache, creating a list of keys to be inserted
933  * (if the data won't fit in a single open bucket, there will be multiple keys);
934  * after the data is written it calls bch_journal, and after the keys have been
935  * added to the next journal write they're inserted into the btree.
936  *
937  * If op->discard is true, instead of inserting the data it invalidates the
938  * region of the cache represented by op->bio and op->inode.
939  */
940 void bch2_write(struct closure *cl)
941 {
942         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
943         struct bch_fs *c = op->c;
944
945         BUG_ON(!op->nr_replicas);
946         BUG_ON(!op->write_point.v);
947         BUG_ON(!bkey_cmp(op->pos, POS_MAX));
948         BUG_ON(bio_sectors(&op->wbio.bio) > U16_MAX);
949
950         op->start_time = local_clock();
951
952         bch2_keylist_init(&op->insert_keys, op->inline_keys);
953         wbio_init(&op->wbio.bio)->put_bio = false;
954
955         if (c->opts.nochanges ||
956             !percpu_ref_tryget(&c->writes)) {
957                 __bcache_io_error(c, "read only");
958                 op->error = -EROFS;
959                 if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
960                         bch2_disk_reservation_put(c, &op->res);
961                 closure_return(cl);
962                 return;
963         }
964
965         bch2_increment_clock(c, bio_sectors(&op->wbio.bio), WRITE);
966
967         continue_at_nobarrier(cl, __bch2_write, NULL);
968 }
969
970 /* Cache promotion on read */
971
972 struct promote_op {
973         struct closure          cl;
974         struct rcu_head         rcu;
975         u64                     start_time;
976
977         struct rhash_head       hash;
978         struct bpos             pos;
979
980         struct migrate_write    write;
981         struct bio_vec          bi_inline_vecs[0]; /* must be last */
982 };
983
984 static const struct rhashtable_params bch_promote_params = {
985         .head_offset    = offsetof(struct promote_op, hash),
986         .key_offset     = offsetof(struct promote_op, pos),
987         .key_len        = sizeof(struct bpos),
988 };
989
990 static inline bool should_promote(struct bch_fs *c, struct bkey_s_c k,
991                                   struct bpos pos,
992                                   struct bch_io_opts opts,
993                                   unsigned flags)
994 {
995         if (!opts.promote_target)
996                 return false;
997
998         if (!(flags & BCH_READ_MAY_PROMOTE))
999                 return false;
1000
1001         if (percpu_ref_is_dying(&c->writes))
1002                 return false;
1003
1004         if (!bkey_extent_is_data(k.k))
1005                 return false;
1006
1007         if (bch2_extent_has_target(c, bkey_s_c_to_extent(k), opts.promote_target))
1008                 return false;
1009
1010         if (bch2_target_congested(c, opts.promote_target))
1011                 return false;
1012
1013         if (rhashtable_lookup_fast(&c->promote_table, &pos,
1014                                    bch_promote_params))
1015                 return false;
1016
1017         return true;
1018 }
1019
1020 static void promote_free(struct bch_fs *c, struct promote_op *op)
1021 {
1022         int ret;
1023
1024         ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
1025                                      bch_promote_params);
1026         BUG_ON(ret);
1027         percpu_ref_put(&c->writes);
1028         kfree_rcu(op, rcu);
1029 }
1030
1031 static void promote_done(struct closure *cl)
1032 {
1033         struct promote_op *op =
1034                 container_of(cl, struct promote_op, cl);
1035         struct bch_fs *c = op->write.op.c;
1036
1037         bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
1038                                op->start_time);
1039
1040         bch2_bio_free_pages_pool(c, &op->write.op.wbio.bio);
1041         promote_free(c, op);
1042 }
1043
1044 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
1045 {
1046         struct bch_fs *c = rbio->c;
1047         struct closure *cl = &op->cl;
1048         struct bio *bio = &op->write.op.wbio.bio;
1049
1050         trace_promote(&rbio->bio);
1051
1052         /* we now own pages: */
1053         BUG_ON(!rbio->bounce);
1054         BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
1055
1056         memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
1057                sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
1058         swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
1059
1060         bch2_migrate_read_done(&op->write, rbio);
1061
1062         closure_init(cl, NULL);
1063         closure_call(&op->write.op.cl, bch2_write, c->wq, cl);
1064         closure_return_with_destructor(cl, promote_done);
1065 }
1066
1067 noinline
1068 static struct promote_op *__promote_alloc(struct bch_fs *c,
1069                                           struct bpos pos,
1070                                           struct extent_ptr_decoded *pick,
1071                                           struct bch_io_opts opts,
1072                                           unsigned rbio_sectors,
1073                                           struct bch_read_bio **rbio)
1074 {
1075         struct promote_op *op = NULL;
1076         struct bio *bio;
1077         unsigned rbio_pages = DIV_ROUND_UP(rbio_sectors, PAGE_SECTORS);
1078         /* data might have to be decompressed in the write path: */
1079         unsigned wbio_pages = DIV_ROUND_UP(pick->crc.uncompressed_size,
1080                                            PAGE_SECTORS);
1081         int ret;
1082
1083         if (!percpu_ref_tryget(&c->writes))
1084                 return NULL;
1085
1086         op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * wbio_pages,
1087                      GFP_NOIO);
1088         if (!op)
1089                 goto err;
1090
1091         op->start_time = local_clock();
1092         op->pos = pos;
1093
1094         /*
1095          * promotes require bouncing, but if the extent isn't
1096          * checksummed/compressed it might be too big for the mempool:
1097          */
1098         if (rbio_sectors > c->sb.encoded_extent_max) {
1099                 *rbio = kzalloc(sizeof(struct bch_read_bio) +
1100                                 sizeof(struct bio_vec) * rbio_pages,
1101                                 GFP_NOIO);
1102                 if (!*rbio)
1103                         goto err;
1104
1105                 rbio_init(&(*rbio)->bio, opts);
1106                 bio_init(&(*rbio)->bio, (*rbio)->bio.bi_inline_vecs,
1107                          rbio_pages);
1108
1109                 (*rbio)->bio.bi_iter.bi_size = rbio_sectors << 9;
1110                 bch2_bio_map(&(*rbio)->bio, NULL);
1111
1112                 if (bch2_bio_alloc_pages(&(*rbio)->bio, GFP_NOIO))
1113                         goto err;
1114
1115                 (*rbio)->bounce         = true;
1116                 (*rbio)->split          = true;
1117                 (*rbio)->kmalloc        = true;
1118         }
1119
1120         if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
1121                                           bch_promote_params))
1122                 goto err;
1123
1124         bio = &op->write.op.wbio.bio;
1125         bio_init(bio, bio->bi_inline_vecs, wbio_pages);
1126
1127         ret = bch2_migrate_write_init(c, &op->write,
1128                         writepoint_hashed((unsigned long) current),
1129                         opts,
1130                         DATA_PROMOTE,
1131                         (struct data_opts) {
1132                                 .target = opts.promote_target
1133                         },
1134                         bkey_s_c_null);
1135         BUG_ON(ret);
1136
1137         return op;
1138 err:
1139         if (*rbio)
1140                 bio_free_pages(&(*rbio)->bio);
1141         kfree(*rbio);
1142         *rbio = NULL;
1143         kfree(op);
1144         percpu_ref_put(&c->writes);
1145         return NULL;
1146 }
1147
1148 static inline struct promote_op *promote_alloc(struct bch_fs *c,
1149                                                struct bvec_iter iter,
1150                                                struct bkey_s_c k,
1151                                                struct extent_ptr_decoded *pick,
1152                                                struct bch_io_opts opts,
1153                                                unsigned flags,
1154                                                struct bch_read_bio **rbio,
1155                                                bool *bounce,
1156                                                bool *read_full)
1157 {
1158         bool promote_full = *read_full || READ_ONCE(c->promote_whole_extents);
1159         unsigned sectors = promote_full
1160                 ? pick->crc.compressed_size
1161                 : bvec_iter_sectors(iter);
1162         struct bpos pos = promote_full
1163                 ? bkey_start_pos(k.k)
1164                 : POS(k.k->p.inode, iter.bi_sector);
1165         struct promote_op *promote;
1166
1167         if (!should_promote(c, k, pos, opts, flags))
1168                 return NULL;
1169
1170         promote = __promote_alloc(c, pos, pick, opts, sectors, rbio);
1171         if (!promote)
1172                 return NULL;
1173
1174         *bounce         = true;
1175         *read_full      = promote_full;
1176         return promote;
1177 }
1178
1179 /* Read */
1180
1181 #define READ_RETRY_AVOID        1
1182 #define READ_RETRY              2
1183 #define READ_ERR                3
1184
1185 enum rbio_context {
1186         RBIO_CONTEXT_NULL,
1187         RBIO_CONTEXT_HIGHPRI,
1188         RBIO_CONTEXT_UNBOUND,
1189 };
1190
1191 static inline struct bch_read_bio *
1192 bch2_rbio_parent(struct bch_read_bio *rbio)
1193 {
1194         return rbio->split ? rbio->parent : rbio;
1195 }
1196
1197 __always_inline
1198 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
1199                            enum rbio_context context,
1200                            struct workqueue_struct *wq)
1201 {
1202         if (context <= rbio->context) {
1203                 fn(&rbio->work);
1204         } else {
1205                 rbio->work.func         = fn;
1206                 rbio->context           = context;
1207                 queue_work(wq, &rbio->work);
1208         }
1209 }
1210
1211 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
1212 {
1213         BUG_ON(rbio->bounce && !rbio->split);
1214
1215         if (rbio->promote)
1216                 promote_free(rbio->c, rbio->promote);
1217         rbio->promote = NULL;
1218
1219         if (rbio->bounce)
1220                 bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
1221
1222         if (rbio->split) {
1223                 struct bch_read_bio *parent = rbio->parent;
1224
1225                 if (rbio->kmalloc)
1226                         kfree(rbio);
1227                 else
1228                         bio_put(&rbio->bio);
1229
1230                 rbio = parent;
1231         }
1232
1233         return rbio;
1234 }
1235
1236 static void bch2_rbio_done(struct bch_read_bio *rbio)
1237 {
1238         bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
1239                                rbio->start_time);
1240         bio_endio(&rbio->bio);
1241 }
1242
1243 static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
1244                                      struct bvec_iter bvec_iter, u64 inode,
1245                                      struct bch_io_failures *failed,
1246                                      unsigned flags)
1247 {
1248         struct btree_trans trans;
1249         struct btree_iter *iter;
1250         BKEY_PADDED(k) tmp;
1251         struct bkey_s_c k;
1252         int ret;
1253
1254         flags &= ~BCH_READ_LAST_FRAGMENT;
1255
1256         bch2_trans_init(&trans, c);
1257
1258         iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
1259                                    rbio->pos, BTREE_ITER_SLOTS);
1260 retry:
1261         rbio->bio.bi_status = 0;
1262
1263         k = bch2_btree_iter_peek_slot(iter);
1264         if (bkey_err(k))
1265                 goto err;
1266
1267         bkey_reassemble(&tmp.k, k);
1268         k = bkey_i_to_s_c(&tmp.k);
1269         bch2_trans_unlock(&trans);
1270
1271         if (!bkey_extent_is_data(k.k) ||
1272             !bch2_extent_matches_ptr(c, bkey_i_to_s_c_extent(&tmp.k),
1273                                      rbio->pick.ptr,
1274                                      rbio->pos.offset -
1275                                      rbio->pick.crc.offset)) {
1276                 /* extent we wanted to read no longer exists: */
1277                 rbio->hole = true;
1278                 goto out;
1279         }
1280
1281         ret = __bch2_read_extent(c, rbio, bvec_iter, k, failed, flags);
1282         if (ret == READ_RETRY)
1283                 goto retry;
1284         if (ret)
1285                 goto err;
1286 out:
1287         bch2_rbio_done(rbio);
1288         bch2_trans_exit(&trans);
1289         return;
1290 err:
1291         rbio->bio.bi_status = BLK_STS_IOERR;
1292         goto out;
1293 }
1294
1295 static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio,
1296                             struct bvec_iter bvec_iter, u64 inode,
1297                             struct bch_io_failures *failed, unsigned flags)
1298 {
1299         struct btree_trans trans;
1300         struct btree_iter *iter;
1301         struct bkey_s_c k;
1302         int ret;
1303
1304         bch2_trans_init(&trans, c);
1305
1306         flags &= ~BCH_READ_LAST_FRAGMENT;
1307         flags |= BCH_READ_MUST_CLONE;
1308 retry:
1309         for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
1310                            POS(inode, bvec_iter.bi_sector),
1311                            BTREE_ITER_SLOTS, k, ret) {
1312                 BKEY_PADDED(k) tmp;
1313                 unsigned bytes;
1314
1315                 bkey_reassemble(&tmp.k, k);
1316                 k = bkey_i_to_s_c(&tmp.k);
1317                 bch2_btree_trans_unlock(&trans);
1318
1319                 bytes = min_t(unsigned, bvec_iter.bi_size,
1320                               (k.k->p.offset - bvec_iter.bi_sector) << 9);
1321                 swap(bvec_iter.bi_size, bytes);
1322
1323                 ret = __bch2_read_extent(c, rbio, bvec_iter, k, failed, flags);
1324                 switch (ret) {
1325                 case READ_RETRY:
1326                         goto retry;
1327                 case READ_ERR:
1328                         goto err;
1329                 };
1330
1331                 if (bytes == bvec_iter.bi_size)
1332                         goto out;
1333
1334                 swap(bvec_iter.bi_size, bytes);
1335                 bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
1336         }
1337
1338         /*
1339          * If we get here, it better have been because there was an error
1340          * reading a btree node
1341          */
1342         BUG_ON(!ret);
1343         __bcache_io_error(c, "btree IO error: %i", ret);
1344 err:
1345         rbio->bio.bi_status = BLK_STS_IOERR;
1346 out:
1347         bch2_trans_exit(&trans);
1348         bch2_rbio_done(rbio);
1349 }
1350
1351 static void bch2_rbio_retry(struct work_struct *work)
1352 {
1353         struct bch_read_bio *rbio =
1354                 container_of(work, struct bch_read_bio, work);
1355         struct bch_fs *c        = rbio->c;
1356         struct bvec_iter iter   = rbio->bvec_iter;
1357         unsigned flags          = rbio->flags;
1358         u64 inode               = rbio->pos.inode;
1359         struct bch_io_failures failed = { .nr = 0 };
1360
1361         trace_read_retry(&rbio->bio);
1362
1363         if (rbio->retry == READ_RETRY_AVOID)
1364                 bch2_mark_io_failure(&failed, &rbio->pick);
1365
1366         rbio->bio.bi_status = 0;
1367
1368         rbio = bch2_rbio_free(rbio);
1369
1370         flags |= BCH_READ_IN_RETRY;
1371         flags &= ~BCH_READ_MAY_PROMOTE;
1372
1373         if (flags & BCH_READ_NODECODE)
1374                 bch2_read_retry_nodecode(c, rbio, iter, inode, &failed, flags);
1375         else
1376                 bch2_read_retry(c, rbio, iter, inode, &failed, flags);
1377 }
1378
1379 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
1380                             blk_status_t error)
1381 {
1382         rbio->retry = retry;
1383
1384         if (rbio->flags & BCH_READ_IN_RETRY)
1385                 return;
1386
1387         if (retry == READ_ERR) {
1388                 rbio = bch2_rbio_free(rbio);
1389
1390                 rbio->bio.bi_status = error;
1391                 bch2_rbio_done(rbio);
1392         } else {
1393                 bch2_rbio_punt(rbio, bch2_rbio_retry,
1394                                RBIO_CONTEXT_UNBOUND, system_unbound_wq);
1395         }
1396 }
1397
1398 static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
1399 {
1400         struct bch_fs *c = rbio->c;
1401         struct btree_trans trans;
1402         struct btree_iter *iter;
1403         struct bkey_s_c k;
1404         struct bkey_i_extent *e;
1405         BKEY_PADDED(k) new;
1406         struct bch_extent_crc_unpacked new_crc;
1407         unsigned offset;
1408         int ret;
1409
1410         if (rbio->pick.crc.compression_type)
1411                 return;
1412
1413         bch2_trans_init(&trans, c);
1414 retry:
1415         bch2_trans_begin(&trans);
1416
1417         iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, rbio->pos,
1418                                    BTREE_ITER_INTENT);
1419         k = bch2_btree_iter_peek(iter);
1420         if (IS_ERR_OR_NULL(k.k))
1421                 goto out;
1422
1423         if (!bkey_extent_is_data(k.k))
1424                 goto out;
1425
1426         bkey_reassemble(&new.k, k);
1427         e = bkey_i_to_extent(&new.k);
1428
1429         if (!bch2_extent_matches_ptr(c, extent_i_to_s_c(e),
1430                                      rbio->pick.ptr,
1431                                      rbio->pos.offset -
1432                                      rbio->pick.crc.offset) ||
1433             bversion_cmp(e->k.version, rbio->version))
1434                 goto out;
1435
1436         /* Extent was merged? */
1437         if (bkey_start_offset(&e->k) < rbio->pos.offset ||
1438             e->k.p.offset > rbio->pos.offset + rbio->pick.crc.uncompressed_size)
1439                 goto out;
1440
1441         /* The extent might have been partially overwritten since we read it: */
1442         offset = rbio->pick.crc.offset + (bkey_start_offset(&e->k) - rbio->pos.offset);
1443
1444         if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
1445                                 rbio->pick.crc, NULL, &new_crc,
1446                                 offset, e->k.size,
1447                                 rbio->pick.crc.csum_type)) {
1448                 bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
1449                 goto out;
1450         }
1451
1452         if (!bch2_extent_narrow_crcs(e, new_crc))
1453                 goto out;
1454
1455         bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &e->k_i));
1456         ret = bch2_trans_commit(&trans, NULL, NULL,
1457                                 BTREE_INSERT_ATOMIC|
1458                                 BTREE_INSERT_NOFAIL|
1459                                 BTREE_INSERT_NOWAIT);
1460         if (ret == -EINTR)
1461                 goto retry;
1462 out:
1463         bch2_trans_exit(&trans);
1464 }
1465
1466 static bool should_narrow_crcs(struct bkey_s_c k,
1467                                struct extent_ptr_decoded *pick,
1468                                unsigned flags)
1469 {
1470         return !(flags & BCH_READ_IN_RETRY) &&
1471                 bkey_extent_is_data(k.k) &&
1472                 bch2_can_narrow_extent_crcs(bkey_s_c_to_extent(k), pick->crc);
1473 }
1474
1475 /* Inner part that may run in process context */
1476 static void __bch2_read_endio(struct work_struct *work)
1477 {
1478         struct bch_read_bio *rbio =
1479                 container_of(work, struct bch_read_bio, work);
1480         struct bch_fs *c        = rbio->c;
1481         struct bch_dev *ca      = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
1482         struct bio *src         = &rbio->bio;
1483         struct bio *dst         = &bch2_rbio_parent(rbio)->bio;
1484         struct bvec_iter dst_iter = rbio->bvec_iter;
1485         struct bch_extent_crc_unpacked crc = rbio->pick.crc;
1486         struct nonce nonce = extent_nonce(rbio->version, crc);
1487         struct bch_csum csum;
1488
1489         /* Reset iterator for checksumming and copying bounced data: */
1490         if (rbio->bounce) {
1491                 src->bi_iter.bi_size            = crc.compressed_size << 9;
1492                 src->bi_iter.bi_idx             = 0;
1493                 src->bi_iter.bi_bvec_done       = 0;
1494         } else {
1495                 src->bi_iter                    = rbio->bvec_iter;
1496         }
1497
1498         csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
1499         if (bch2_crc_cmp(csum, rbio->pick.crc.csum))
1500                 goto csum_err;
1501
1502         if (unlikely(rbio->narrow_crcs))
1503                 bch2_rbio_narrow_crcs(rbio);
1504
1505         if (rbio->flags & BCH_READ_NODECODE)
1506                 goto nodecode;
1507
1508         /* Adjust crc to point to subset of data we want: */
1509         crc.offset     += rbio->bvec_iter.bi_sector - rbio->pos.offset;
1510         crc.live_size   = bvec_iter_sectors(rbio->bvec_iter);
1511
1512         if (crc.compression_type != BCH_COMPRESSION_NONE) {
1513                 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1514                 if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
1515                         goto decompression_err;
1516         } else {
1517                 /* don't need to decrypt the entire bio: */
1518                 nonce = nonce_add(nonce, crc.offset << 9);
1519                 bio_advance(src, crc.offset << 9);
1520
1521                 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
1522                 src->bi_iter.bi_size = dst_iter.bi_size;
1523
1524                 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1525
1526                 if (rbio->bounce) {
1527                         struct bvec_iter src_iter = src->bi_iter;
1528                         bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1529                 }
1530         }
1531
1532         if (rbio->promote) {
1533                 /*
1534                  * Re encrypt data we decrypted, so it's consistent with
1535                  * rbio->crc:
1536                  */
1537                 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1538                 promote_start(rbio->promote, rbio);
1539                 rbio->promote = NULL;
1540         }
1541 nodecode:
1542         if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
1543                 rbio = bch2_rbio_free(rbio);
1544                 bch2_rbio_done(rbio);
1545         }
1546         return;
1547 csum_err:
1548         /*
1549          * Checksum error: if the bio wasn't bounced, we may have been
1550          * reading into buffers owned by userspace (that userspace can
1551          * scribble over) - retry the read, bouncing it this time:
1552          */
1553         if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
1554                 rbio->flags |= BCH_READ_MUST_BOUNCE;
1555                 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
1556                 return;
1557         }
1558
1559         bch2_dev_io_error(ca,
1560                 "data checksum error, inode %llu offset %llu: expected %0llx:%0llx got %0llx:%0llx (type %u)",
1561                 rbio->pos.inode, (u64) rbio->bvec_iter.bi_sector,
1562                 rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
1563                 csum.hi, csum.lo, crc.csum_type);
1564         bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1565         return;
1566 decompression_err:
1567         __bcache_io_error(c, "decompression error, inode %llu offset %llu",
1568                           rbio->pos.inode,
1569                           (u64) rbio->bvec_iter.bi_sector);
1570         bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
1571         return;
1572 }
1573
1574 static void bch2_read_endio(struct bio *bio)
1575 {
1576         struct bch_read_bio *rbio =
1577                 container_of(bio, struct bch_read_bio, bio);
1578         struct bch_fs *c        = rbio->c;
1579         struct bch_dev *ca      = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
1580         struct workqueue_struct *wq = NULL;
1581         enum rbio_context context = RBIO_CONTEXT_NULL;
1582
1583         if (rbio->have_ioref) {
1584                 bch2_latency_acct(ca, rbio->submit_time, READ);
1585                 percpu_ref_put(&ca->io_ref);
1586         }
1587
1588         if (!rbio->split)
1589                 rbio->bio.bi_end_io = rbio->end_io;
1590
1591         if (bch2_dev_io_err_on(bio->bi_status, ca, "data read")) {
1592                 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
1593                 return;
1594         }
1595
1596         if (rbio->pick.ptr.cached &&
1597             (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
1598              ptr_stale(ca, &rbio->pick.ptr))) {
1599                 atomic_long_inc(&c->read_realloc_races);
1600
1601                 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
1602                         bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
1603                 else
1604                         bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
1605                 return;
1606         }
1607
1608         if (rbio->narrow_crcs ||
1609             rbio->pick.crc.compression_type ||
1610             bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
1611                 context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
1612         else if (rbio->pick.crc.csum_type)
1613                 context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
1614
1615         bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
1616 }
1617
1618 int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
1619                        struct bvec_iter iter, struct bkey_s_c k,
1620                        struct bch_io_failures *failed, unsigned flags)
1621 {
1622         struct extent_ptr_decoded pick;
1623         struct bch_read_bio *rbio = NULL;
1624         struct bch_dev *ca;
1625         struct promote_op *promote = NULL;
1626         bool bounce = false, read_full = false, narrow_crcs = false;
1627         struct bpos pos = bkey_start_pos(k.k);
1628         int pick_ret;
1629
1630         pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
1631
1632         /* hole or reservation - just zero fill: */
1633         if (!pick_ret)
1634                 goto hole;
1635
1636         if (pick_ret < 0) {
1637                 __bcache_io_error(c, "no device to read from");
1638                 goto err;
1639         }
1640
1641         if (pick_ret > 0)
1642                 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1643
1644         if (flags & BCH_READ_NODECODE) {
1645                 /*
1646                  * can happen if we retry, and the extent we were going to read
1647                  * has been merged in the meantime:
1648                  */
1649                 if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS)
1650                         goto hole;
1651
1652                 iter.bi_sector  = pos.offset;
1653                 iter.bi_size    = pick.crc.compressed_size << 9;
1654                 goto noclone;
1655         }
1656
1657         if (!(flags & BCH_READ_LAST_FRAGMENT) ||
1658             bio_flagged(&orig->bio, BIO_CHAIN))
1659                 flags |= BCH_READ_MUST_CLONE;
1660
1661         narrow_crcs = should_narrow_crcs(k, &pick, flags);
1662
1663         if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
1664                 flags |= BCH_READ_MUST_BOUNCE;
1665
1666         EBUG_ON(bkey_start_offset(k.k) > iter.bi_sector ||
1667                 k.k->p.offset < bvec_iter_end_sector(iter));
1668
1669         if (pick.crc.compression_type != BCH_COMPRESSION_NONE ||
1670             (pick.crc.csum_type != BCH_CSUM_NONE &&
1671              (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
1672               (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
1673                (flags & BCH_READ_USER_MAPPED)) ||
1674               (flags & BCH_READ_MUST_BOUNCE)))) {
1675                 read_full = true;
1676                 bounce = true;
1677         }
1678
1679         promote = promote_alloc(c, iter, k, &pick, orig->opts, flags,
1680                                 &rbio, &bounce, &read_full);
1681
1682         if (!read_full) {
1683                 EBUG_ON(pick.crc.compression_type);
1684                 EBUG_ON(pick.crc.csum_type &&
1685                         (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
1686                          bvec_iter_sectors(iter) != pick.crc.live_size ||
1687                          pick.crc.offset ||
1688                          iter.bi_sector != pos.offset));
1689
1690                 pick.ptr.offset += pick.crc.offset +
1691                         (iter.bi_sector - pos.offset);
1692                 pick.crc.compressed_size        = bvec_iter_sectors(iter);
1693                 pick.crc.uncompressed_size      = bvec_iter_sectors(iter);
1694                 pick.crc.offset                 = 0;
1695                 pick.crc.live_size              = bvec_iter_sectors(iter);
1696                 pos.offset                      = iter.bi_sector;
1697         }
1698
1699         if (rbio) {
1700                 /* promote already allocated bounce rbio */
1701         } else if (bounce) {
1702                 unsigned sectors = pick.crc.compressed_size;
1703
1704                 rbio = rbio_init(bio_alloc_bioset(GFP_NOIO,
1705                                                   DIV_ROUND_UP(sectors, PAGE_SECTORS),
1706                                                   &c->bio_read_split),
1707                                  orig->opts);
1708
1709                 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
1710                 rbio->bounce    = true;
1711                 rbio->split     = true;
1712         } else if (flags & BCH_READ_MUST_CLONE) {
1713                 /*
1714                  * Have to clone if there were any splits, due to error
1715                  * reporting issues (if a split errored, and retrying didn't
1716                  * work, when it reports the error to its parent (us) we don't
1717                  * know if the error was from our bio, and we should retry, or
1718                  * from the whole bio, in which case we don't want to retry and
1719                  * lose the error)
1720                  */
1721                 rbio = rbio_init(bio_clone_fast(&orig->bio, GFP_NOIO,
1722                                                 &c->bio_read_split),
1723                                  orig->opts);
1724                 rbio->bio.bi_iter = iter;
1725                 rbio->split     = true;
1726         } else {
1727 noclone:
1728                 rbio = orig;
1729                 rbio->bio.bi_iter = iter;
1730                 BUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
1731         }
1732
1733         BUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
1734
1735         rbio->c                 = c;
1736         rbio->submit_time       = local_clock();
1737         if (rbio->split)
1738                 rbio->parent    = orig;
1739         else
1740                 rbio->end_io    = orig->bio.bi_end_io;
1741         rbio->bvec_iter         = iter;
1742         rbio->flags             = flags;
1743         rbio->have_ioref        = pick_ret > 0 && bch2_dev_get_ioref(ca, READ);
1744         rbio->narrow_crcs       = narrow_crcs;
1745         rbio->hole              = 0;
1746         rbio->retry             = 0;
1747         rbio->context           = 0;
1748         rbio->devs_have         = bch2_bkey_devs(k);
1749         rbio->pick              = pick;
1750         rbio->pos               = pos;
1751         rbio->version           = k.k->version;
1752         rbio->promote           = promote;
1753         INIT_WORK(&rbio->work, NULL);
1754
1755         rbio->bio.bi_opf        = orig->bio.bi_opf;
1756         rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
1757         rbio->bio.bi_end_io     = bch2_read_endio;
1758
1759         if (rbio->bounce)
1760                 trace_read_bounce(&rbio->bio);
1761
1762         bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
1763
1764         percpu_down_read_preempt_disable(&c->mark_lock);
1765         bucket_io_clock_reset(c, ca, PTR_BUCKET_NR(ca, &pick.ptr), READ);
1766         percpu_up_read_preempt_enable(&c->mark_lock);
1767
1768         if (likely(!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT)))) {
1769                 bio_inc_remaining(&orig->bio);
1770                 trace_read_split(&orig->bio);
1771         }
1772
1773         if (!rbio->pick.idx) {
1774                 if (!rbio->have_ioref) {
1775                         __bcache_io_error(c, "no device to read from");
1776                         bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1777                         goto out;
1778                 }
1779
1780                 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_USER],
1781                              bio_sectors(&rbio->bio));
1782                 bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
1783
1784                 if (likely(!(flags & BCH_READ_IN_RETRY)))
1785                         submit_bio(&rbio->bio);
1786                 else
1787                         submit_bio_wait(&rbio->bio);
1788         } else {
1789                 /* Attempting reconstruct read: */
1790                 if (bch2_ec_read_extent(c, rbio)) {
1791                         bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1792                         goto out;
1793                 }
1794
1795                 if (likely(!(flags & BCH_READ_IN_RETRY)))
1796                         bio_endio(&rbio->bio);
1797         }
1798 out:
1799         if (likely(!(flags & BCH_READ_IN_RETRY))) {
1800                 return 0;
1801         } else {
1802                 int ret;
1803
1804                 rbio->context = RBIO_CONTEXT_UNBOUND;
1805                 bch2_read_endio(&rbio->bio);
1806
1807                 ret = rbio->retry;
1808                 rbio = bch2_rbio_free(rbio);
1809
1810                 if (ret == READ_RETRY_AVOID) {
1811                         bch2_mark_io_failure(failed, &pick);
1812                         ret = READ_RETRY;
1813                 }
1814
1815                 return ret;
1816         }
1817
1818 err:
1819         if (flags & BCH_READ_IN_RETRY)
1820                 return READ_ERR;
1821
1822         orig->bio.bi_status = BLK_STS_IOERR;
1823         goto out_read_done;
1824
1825 hole:
1826         /*
1827          * won't normally happen in the BCH_READ_NODECODE
1828          * (bch2_move_extent()) path, but if we retry and the extent we wanted
1829          * to read no longer exists we have to signal that:
1830          */
1831         if (flags & BCH_READ_NODECODE)
1832                 orig->hole = true;
1833
1834         zero_fill_bio_iter(&orig->bio, iter);
1835 out_read_done:
1836         if (flags & BCH_READ_LAST_FRAGMENT)
1837                 bch2_rbio_done(orig);
1838         return 0;
1839 }
1840
1841 void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
1842 {
1843         struct btree_trans trans;
1844         struct btree_iter *iter;
1845         struct bkey_s_c k;
1846         unsigned flags = BCH_READ_RETRY_IF_STALE|
1847                 BCH_READ_MAY_PROMOTE|
1848                 BCH_READ_USER_MAPPED;
1849         int ret;
1850
1851         bch2_trans_init(&trans, c);
1852
1853         BUG_ON(rbio->_state);
1854         BUG_ON(flags & BCH_READ_NODECODE);
1855         BUG_ON(flags & BCH_READ_IN_RETRY);
1856
1857         rbio->c = c;
1858         rbio->start_time = local_clock();
1859
1860         for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
1861                            POS(inode, rbio->bio.bi_iter.bi_sector),
1862                            BTREE_ITER_SLOTS, k, ret) {
1863                 BKEY_PADDED(k) tmp;
1864                 unsigned bytes;
1865
1866                 /*
1867                  * Unlock the iterator while the btree node's lock is still in
1868                  * cache, before doing the IO:
1869                  */
1870                 bkey_reassemble(&tmp.k, k);
1871                 k = bkey_i_to_s_c(&tmp.k);
1872                 bch2_btree_trans_unlock(&trans);
1873
1874                 bytes = min_t(unsigned, rbio->bio.bi_iter.bi_size,
1875                               (k.k->p.offset - rbio->bio.bi_iter.bi_sector) << 9);
1876                 swap(rbio->bio.bi_iter.bi_size, bytes);
1877
1878                 if (rbio->bio.bi_iter.bi_size == bytes)
1879                         flags |= BCH_READ_LAST_FRAGMENT;
1880
1881                 bch2_read_extent(c, rbio, k, flags);
1882
1883                 if (flags & BCH_READ_LAST_FRAGMENT)
1884                         return;
1885
1886                 swap(rbio->bio.bi_iter.bi_size, bytes);
1887                 bio_advance(&rbio->bio, bytes);
1888         }
1889
1890         /*
1891          * If we get here, it better have been because there was an error
1892          * reading a btree node
1893          */
1894         BUG_ON(!ret);
1895         bcache_io_error(c, &rbio->bio, "btree IO error: %i", ret);
1896
1897         bch2_trans_exit(&trans);
1898         bch2_rbio_done(rbio);
1899 }
1900
1901 void bch2_fs_io_exit(struct bch_fs *c)
1902 {
1903         if (c->promote_table.tbl)
1904                 rhashtable_destroy(&c->promote_table);
1905         mempool_exit(&c->bio_bounce_pages);
1906         bioset_exit(&c->bio_write);
1907         bioset_exit(&c->bio_read_split);
1908         bioset_exit(&c->bio_read);
1909 }
1910
1911 int bch2_fs_io_init(struct bch_fs *c)
1912 {
1913         if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
1914                         BIOSET_NEED_BVECS) ||
1915             bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
1916                         BIOSET_NEED_BVECS) ||
1917             bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
1918                         BIOSET_NEED_BVECS) ||
1919             mempool_init_page_pool(&c->bio_bounce_pages,
1920                                    max_t(unsigned,
1921                                          c->opts.btree_node_size,
1922                                          c->sb.encoded_extent_max) /
1923                                    PAGE_SECTORS, 0) ||
1924             rhashtable_init(&c->promote_table, &bch_promote_params))
1925                 return -ENOMEM;
1926
1927         return 0;
1928 }