2 * Some low level IO code, and hacks for various block layer limitations
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
11 #include "btree_update.h"
25 #include <linux/blkdev.h>
26 #include <linux/random.h>
28 #include <trace/events/bcachefs.h>
30 static inline void __bio_inc_remaining(struct bio *bio)
32 bio_set_flag(bio, BIO_CHAIN);
33 smp_mb__before_atomic();
34 atomic_inc(&bio->__bi_remaining);
37 /* Allocate, free from mempool: */
39 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
44 bio_for_each_segment_all(bv, bio, i)
45 if (bv->bv_page != ZERO_PAGE(0))
46 mempool_free(bv->bv_page, &c->bio_bounce_pages);
50 static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
53 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt++];
55 if (likely(!*using_mempool)) {
56 bv->bv_page = alloc_page(GFP_NOIO);
57 if (unlikely(!bv->bv_page)) {
58 mutex_lock(&c->bio_bounce_pages_lock);
59 *using_mempool = true;
65 bv->bv_page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
68 bv->bv_len = PAGE_SIZE;
72 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
75 bool using_mempool = false;
77 bio->bi_iter.bi_size = bytes;
79 while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE))
80 bch2_bio_alloc_page_pool(c, bio, &using_mempool);
83 mutex_unlock(&c->bio_bounce_pages_lock);
86 /* Bios with headers */
88 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
89 const struct bkey_i *k)
91 struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
92 const struct bch_extent_ptr *ptr;
93 struct bch_write_bio *n;
96 BUG_ON(c->opts.nochanges);
101 extent_for_each_ptr(e, ptr) {
102 ca = c->devs[ptr->dev];
104 if (ptr + 1 < &extent_entry_last(e)->ptr) {
105 n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
108 n->bio.bi_end_io = wbio->bio.bi_end_io;
109 n->bio.bi_private = wbio->bio.bi_private;
111 n->orig = &wbio->bio;
115 n->bio.bi_opf = wbio->bio.bi_opf;
116 __bio_inc_remaining(n->orig);
121 if (!journal_flushes_device(ca))
122 n->bio.bi_opf |= REQ_FUA;
125 n->submit_time_us = local_clock_us();
126 n->bio.bi_iter.bi_sector = ptr->offset;
128 if (likely(percpu_ref_tryget(&ca->io_ref))) {
129 n->have_io_ref = true;
130 n->bio.bi_bdev = ca->disk_sb.bdev;
131 generic_make_request(&n->bio);
133 n->have_io_ref = false;
134 bcache_io_error(c, &n->bio, "device has been removed");
144 static struct workqueue_struct *index_update_wq(struct bch_write_op *op)
146 return op->alloc_reserve == RESERVE_MOVINGGC
151 static void __bch2_write(struct closure *);
153 static void bch2_write_done(struct closure *cl)
155 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
157 BUG_ON(!(op->flags & BCH_WRITE_DONE));
159 if (!op->error && (op->flags & BCH_WRITE_FLUSH))
160 op->error = bch2_journal_error(&op->c->journal);
162 bch2_disk_reservation_put(op->c, &op->res);
163 percpu_ref_put(&op->c->writes);
164 bch2_keylist_free(&op->insert_keys, op->inline_keys);
168 static u64 keylist_sectors(struct keylist *keys)
173 for_each_keylist_key(keys, k)
179 static int bch2_write_index_default(struct bch_write_op *op)
181 struct keylist *keys = &op->insert_keys;
182 struct btree_iter iter;
185 bch2_btree_iter_init(&iter, op->c, BTREE_ID_EXTENTS,
186 bkey_start_pos(&bch2_keylist_front(keys)->k),
189 ret = bch2_btree_insert_list_at(&iter, keys, &op->res,
190 NULL, op_journal_seq(op),
191 BTREE_INSERT_NOFAIL);
192 bch2_btree_iter_unlock(&iter);
198 * bch_write_index - after a write, update index to point to new data
200 static void bch2_write_index(struct closure *cl)
202 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
203 struct bch_fs *c = op->c;
204 struct keylist *keys = &op->insert_keys;
207 op->flags |= BCH_WRITE_LOOPED;
209 if (!bch2_keylist_empty(keys)) {
210 u64 sectors_start = keylist_sectors(keys);
211 int ret = op->index_update_fn(op);
213 BUG_ON(keylist_sectors(keys) && !ret);
215 op->written += sectors_start - keylist_sectors(keys);
218 __bcache_io_error(c, "btree IO error %i", ret);
223 for (i = 0; i < ARRAY_SIZE(op->open_buckets); i++)
224 if (op->open_buckets[i]) {
225 bch2_open_bucket_put(c,
227 op->open_buckets[i]);
228 op->open_buckets[i] = 0;
231 if (!(op->flags & BCH_WRITE_DONE))
232 continue_at(cl, __bch2_write, op->io_wq);
234 if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
235 bch2_journal_flush_seq_async(&c->journal,
238 continue_at(cl, bch2_write_done, index_update_wq(op));
240 continue_at_nobarrier(cl, bch2_write_done, NULL);
245 * bch_write_discard - discard range of keys
247 * Used to implement discard, and to handle when writethrough write hits
248 * a write error on the cache device.
250 static void bch2_write_discard(struct closure *cl)
252 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
253 struct bio *bio = &op->bio->bio;
254 struct bpos end = op->pos;
256 end.offset += bio_sectors(bio);
258 op->error = bch2_discard(op->c, op->pos, end, op->version,
259 &op->res, NULL, NULL);
263 * Convert extents to be inserted to discards after an error:
265 static void bch2_write_io_error(struct closure *cl)
267 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
269 if (op->flags & BCH_WRITE_DISCARD_ON_ERROR) {
270 struct bkey_i *src = bch2_keylist_front(&op->insert_keys);
271 struct bkey_i *dst = bch2_keylist_front(&op->insert_keys);
274 * Our data write just errored, which means we've got a bunch
275 * of keys to insert that point to data that wasn't
276 * successfully written.
278 * We don't have to insert those keys but we still have to
279 * invalidate that region of the cache - so, if we just strip
280 * off all the pointers from the keys we'll accomplish just
284 while (src != op->insert_keys.top) {
285 struct bkey_i *n = bkey_next(src);
287 set_bkey_val_u64s(&src->k, 0);
288 src->k.type = KEY_TYPE_DISCARD;
291 dst = bkey_next(dst);
295 op->insert_keys.top = dst;
296 op->flags |= BCH_WRITE_DISCARD;
298 /* TODO: We could try to recover from this. */
299 while (!bch2_keylist_empty(&op->insert_keys))
300 bch2_keylist_pop_front(&op->insert_keys);
303 op->flags |= BCH_WRITE_DONE;
306 bch2_write_index(cl);
309 static void bch2_write_endio(struct bio *bio)
311 struct closure *cl = bio->bi_private;
312 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
313 struct bch_write_bio *wbio = to_wbio(bio);
314 struct bch_fs *c = wbio->c;
315 struct bio *orig = wbio->orig;
316 struct bch_dev *ca = wbio->ca;
318 if (bch2_dev_nonfatal_io_err_on(bio->bi_error, ca,
320 set_closure_fn(cl, bch2_write_io_error, index_update_wq(op));
322 if (wbio->have_io_ref)
323 percpu_ref_put(&ca->io_ref);
325 if (bio->bi_error && orig)
326 orig->bi_error = bio->bi_error;
329 bch2_bio_free_pages_pool(c, bio);
340 static struct nonce extent_nonce(struct bversion version,
342 unsigned uncompressed_size,
343 unsigned compression_type)
345 return (struct nonce) {{
346 [0] = cpu_to_le32((nonce << 12) |
347 (uncompressed_size << 22)),
348 [1] = cpu_to_le32(version.lo),
349 [2] = cpu_to_le32(version.lo >> 32),
350 [3] = cpu_to_le32(version.hi|
351 (compression_type << 24))^BCH_NONCE_EXTENT,
355 static void init_append_extent(struct bch_write_op *op,
356 unsigned compressed_size,
357 unsigned uncompressed_size,
358 unsigned compression_type,
360 struct bch_csum csum, unsigned csum_type,
361 struct open_bucket *ob)
363 struct bkey_i_extent *e = bkey_extent_init(op->insert_keys.top);
365 op->pos.offset += uncompressed_size;
367 e->k.size = uncompressed_size;
368 e->k.version = op->version;
369 bkey_extent_set_cached(&e->k, op->flags & BCH_WRITE_CACHED);
371 bch2_extent_crc_append(e, compressed_size,
374 nonce, csum, csum_type);
376 bch2_alloc_sectors_append_ptrs(op->c, e, op->nr_replicas,
377 ob, compressed_size);
379 bkey_extent_set_cached(&e->k, (op->flags & BCH_WRITE_CACHED));
380 bch2_keylist_push(&op->insert_keys);
383 static int bch2_write_extent(struct bch_write_op *op,
384 struct open_bucket *ob,
387 struct bch_fs *c = op->c;
389 struct bch_write_bio *wbio;
390 unsigned key_to_write_offset = op->insert_keys.top_p -
391 op->insert_keys.keys_p;
392 struct bkey_i *key_to_write;
393 unsigned csum_type = op->csum_type;
394 unsigned compression_type = op->compression_type;
397 /* don't refetch csum type/compression type */
400 /* Need to decompress data? */
401 if ((op->flags & BCH_WRITE_DATA_COMPRESSED) &&
402 (crc_uncompressed_size(NULL, &op->crc) != op->size ||
403 crc_compressed_size(NULL, &op->crc) > ob->sectors_free)) {
406 ret = bch2_bio_uncompress_inplace(c, orig, op->size, op->crc);
410 op->flags &= ~BCH_WRITE_DATA_COMPRESSED;
413 if (op->flags & BCH_WRITE_DATA_COMPRESSED) {
414 init_append_extent(op,
415 crc_compressed_size(NULL, &op->crc),
416 crc_uncompressed_size(NULL, &op->crc),
417 op->crc.compression_type,
426 wbio->bounce = false;
427 wbio->put_bio = false;
429 } else if (csum_type != BCH_CSUM_NONE ||
430 compression_type != BCH_COMPRESSION_NONE) {
431 /* all units here in bytes */
432 unsigned total_output = 0, output_available =
433 min(ob->sectors_free << 9, orig->bi_iter.bi_size);
434 unsigned crc_nonce = bch2_csum_type_is_encryption(csum_type)
436 struct bch_csum csum;
439 bio = bio_alloc_bioset(GFP_NOIO,
440 DIV_ROUND_UP(output_available, PAGE_SIZE),
443 * XXX: can't use mempool for more than
444 * BCH_COMPRESSED_EXTENT_MAX worth of pages
446 bch2_bio_alloc_pages_pool(c, bio, output_available);
448 /* copy WRITE_SYNC flag */
449 bio->bi_opf = orig->bi_opf;
453 wbio->put_bio = true;
456 unsigned fragment_compression_type = compression_type;
457 size_t dst_len, src_len;
459 bch2_bio_compress(c, bio, &dst_len,
461 &fragment_compression_type);
463 BUG_ON(!dst_len || dst_len > bio->bi_iter.bi_size);
464 BUG_ON(!src_len || src_len > orig->bi_iter.bi_size);
465 BUG_ON(dst_len & (block_bytes(c) - 1));
466 BUG_ON(src_len & (block_bytes(c) - 1));
468 swap(bio->bi_iter.bi_size, dst_len);
469 nonce = extent_nonce(op->version,
472 fragment_compression_type),
474 bch2_encrypt_bio(c, csum_type, nonce, bio);
476 csum = bch2_checksum_bio(c, csum_type, nonce, bio);
477 swap(bio->bi_iter.bi_size, dst_len);
479 init_append_extent(op,
480 dst_len >> 9, src_len >> 9,
481 fragment_compression_type,
482 crc_nonce, csum, csum_type, ob);
484 total_output += dst_len;
485 bio_advance(bio, dst_len);
486 bio_advance(orig, src_len);
487 } while (bio->bi_iter.bi_size &&
488 orig->bi_iter.bi_size &&
489 !bch2_keylist_realloc(&op->insert_keys,
491 ARRAY_SIZE(op->inline_keys),
492 BKEY_EXTENT_U64s_MAX));
494 BUG_ON(total_output > output_available);
496 memset(&bio->bi_iter, 0, sizeof(bio->bi_iter));
497 bio->bi_iter.bi_size = total_output;
500 * Free unneeded pages after compressing:
502 while (bio->bi_vcnt * PAGE_SIZE >
503 round_up(bio->bi_iter.bi_size, PAGE_SIZE))
504 mempool_free(bio->bi_io_vec[--bio->bi_vcnt].bv_page,
505 &c->bio_bounce_pages);
507 ret = orig->bi_iter.bi_size != 0;
509 bio = bio_next_split(orig, ob->sectors_free, GFP_NOIO,
514 wbio->bounce = false;
515 wbio->put_bio = bio != orig;
517 init_append_extent(op, bio_sectors(bio), bio_sectors(bio),
519 (struct bch_csum) { 0 }, csum_type, ob);
524 bio->bi_end_io = bch2_write_endio;
525 bio->bi_private = &op->cl;
526 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
528 closure_get(bio->bi_private);
530 /* might have done a realloc... */
532 key_to_write = (void *) (op->insert_keys.keys_p + key_to_write_offset);
534 bch2_check_mark_super(c, bkey_i_to_s_c_extent(key_to_write),
537 bch2_submit_wbio_replicas(to_wbio(bio), c, key_to_write);
541 static void __bch2_write(struct closure *cl)
543 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
544 struct bch_fs *c = op->c;
545 struct bio *bio = &op->bio->bio;
546 unsigned open_bucket_nr = 0;
547 struct open_bucket *b;
550 memset(op->open_buckets, 0, sizeof(op->open_buckets));
552 if (op->flags & BCH_WRITE_DISCARD) {
553 op->flags |= BCH_WRITE_DONE;
554 bch2_write_discard(cl);
556 continue_at(cl, bch2_write_done, index_update_wq(op));
560 * Journal writes are marked REQ_PREFLUSH; if the original write was a
561 * flush, it'll wait on the journal write.
563 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
566 EBUG_ON(bio->bi_iter.bi_sector != op->pos.offset);
567 EBUG_ON(!bio_sectors(bio));
569 if (open_bucket_nr == ARRAY_SIZE(op->open_buckets))
570 continue_at(cl, bch2_write_index, index_update_wq(op));
572 /* for the device pointers and 1 for the chksum */
573 if (bch2_keylist_realloc(&op->insert_keys,
575 ARRAY_SIZE(op->inline_keys),
576 BKEY_EXTENT_U64s_MAX))
577 continue_at(cl, bch2_write_index, index_update_wq(op));
579 b = bch2_alloc_sectors_start(c, op->wp,
581 c->opts.data_replicas_required,
583 (op->flags & BCH_WRITE_ALLOC_NOWAIT) ? NULL : cl);
586 if (unlikely(IS_ERR(b))) {
587 if (unlikely(PTR_ERR(b) != -EAGAIN)) {
593 * If we already have some keys, must insert them first
594 * before allocating another open bucket. We only hit
595 * this case if open_bucket_nr > 1.
597 if (!bch2_keylist_empty(&op->insert_keys))
598 continue_at(cl, bch2_write_index,
599 index_update_wq(op));
602 * If we've looped, we're running out of a workqueue -
603 * not the bch2_write() caller's context - and we don't
604 * want to block the workqueue:
606 if (op->flags & BCH_WRITE_LOOPED)
607 continue_at(cl, __bch2_write, op->io_wq);
610 * Otherwise, we do want to block the caller on alloc
611 * failure instead of letting it queue up more and more
613 * XXX: this technically needs a try_to_freeze() -
614 * except that that's not safe because caller may have
615 * issued other IO... hmm..
621 BUG_ON(b - c->open_buckets == 0 ||
622 b - c->open_buckets > U8_MAX);
623 op->open_buckets[open_bucket_nr++] = b - c->open_buckets;
625 ret = bch2_write_extent(op, b, bio);
627 bch2_alloc_sectors_done(c, op->wp, b);
633 op->flags |= BCH_WRITE_DONE;
634 continue_at(cl, bch2_write_index, index_update_wq(op));
636 if (op->flags & BCH_WRITE_DISCARD_ON_ERROR) {
638 * If we were writing cached data, not doing the write is fine
639 * so long as we discard whatever would have been overwritten -
640 * then it's equivalent to doing the write and immediately
644 bch2_write_discard(cl);
647 * Right now we can only error here if we went RO - the
648 * allocation failed, but we already checked for -ENOSPC when we
649 * got our reservation.
651 * XXX capacity might have changed, but we don't check for that
657 op->flags |= BCH_WRITE_DONE;
660 * No reason not to insert keys for whatever data was successfully
661 * written (especially for a cmpxchg operation that's moving data
664 continue_at(cl, !bch2_keylist_empty(&op->insert_keys)
666 : bch2_write_done, index_update_wq(op));
669 void bch2_wake_delayed_writes(unsigned long data)
671 struct bch_fs *c = (void *) data;
672 struct bch_write_op *op;
675 spin_lock_irqsave(&c->foreground_write_pd_lock, flags);
677 while ((op = c->write_wait_head)) {
678 if (time_after(op->expires, jiffies)) {
679 mod_timer(&c->foreground_write_wakeup, op->expires);
683 c->write_wait_head = op->next;
684 if (!c->write_wait_head)
685 c->write_wait_tail = NULL;
687 closure_put(&op->cl);
690 spin_unlock_irqrestore(&c->foreground_write_pd_lock, flags);
694 * bch_write - handle a write to a cache device or flash only volume
696 * This is the starting point for any data to end up in a cache device; it could
697 * be from a normal write, or a writeback write, or a write to a flash only
698 * volume - it's also used by the moving garbage collector to compact data in
699 * mostly empty buckets.
701 * It first writes the data to the cache, creating a list of keys to be inserted
702 * (if the data won't fit in a single open bucket, there will be multiple keys);
703 * after the data is written it calls bch_journal, and after the keys have been
704 * added to the next journal write they're inserted into the btree.
706 * It inserts the data in op->bio; bi_sector is used for the key offset, and
707 * op->inode is used for the key inode.
709 * If op->discard is true, instead of inserting the data it invalidates the
710 * region of the cache represented by op->bio and op->inode.
712 void bch2_write(struct closure *cl)
714 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
715 struct bio *bio = &op->bio->bio;
716 struct bch_fs *c = op->c;
717 u64 inode = op->pos.inode;
719 if (c->opts.nochanges ||
720 !percpu_ref_tryget(&c->writes)) {
721 __bcache_io_error(c, "read only");
723 bch2_disk_reservation_put(c, &op->res);
727 if (bversion_zero(op->version) &&
728 bch2_csum_type_is_encryption(op->csum_type))
730 atomic64_inc_return(&c->key_version) + 1;
732 if (!(op->flags & BCH_WRITE_DISCARD))
733 bch2_increment_clock(c, bio_sectors(bio), WRITE);
735 /* Don't call bch2_next_delay() if rate is >= 1 GB/sec */
737 if (c->foreground_write_ratelimit_enabled &&
738 c->foreground_write_pd.rate.rate < (1 << 30) &&
739 !(op->flags & BCH_WRITE_DISCARD) && op->wp->throttle) {
743 spin_lock_irqsave(&c->foreground_write_pd_lock, flags);
744 bch2_ratelimit_increment(&c->foreground_write_pd.rate,
745 bio->bi_iter.bi_size);
747 delay = bch2_ratelimit_delay(&c->foreground_write_pd.rate);
749 if (delay >= HZ / 100) {
750 trace_write_throttle(c, inode, bio, delay);
752 closure_get(&op->cl); /* list takes a ref */
754 op->expires = jiffies + delay;
757 if (c->write_wait_tail)
758 c->write_wait_tail->next = op;
760 c->write_wait_head = op;
761 c->write_wait_tail = op;
763 if (!timer_pending(&c->foreground_write_wakeup))
764 mod_timer(&c->foreground_write_wakeup,
767 spin_unlock_irqrestore(&c->foreground_write_pd_lock,
769 continue_at(cl, __bch2_write, index_update_wq(op));
772 spin_unlock_irqrestore(&c->foreground_write_pd_lock, flags);
775 continue_at_nobarrier(cl, __bch2_write, NULL);
778 void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
779 struct bch_write_bio *bio, struct disk_reservation res,
780 struct write_point *wp, struct bpos pos,
781 u64 *journal_seq, unsigned flags)
783 EBUG_ON(res.sectors && !res.nr_replicas);
786 op->io_wq = index_update_wq(op);
791 op->csum_type = bch2_data_checksum_type(c);
792 op->compression_type = c->opts.compression;
793 op->nr_replicas = res.nr_replicas;
794 op->alloc_reserve = RESERVE_NONE;
797 op->version = ZERO_VERSION;
802 op->journal_seq_p = journal_seq;
803 op->flags |= BCH_WRITE_JOURNAL_SEQ_PTR;
808 op->index_update_fn = bch2_write_index_default;
810 bch2_keylist_init(&op->insert_keys,
812 ARRAY_SIZE(op->inline_keys));
814 if (version_stress_test(c))
815 get_random_bytes(&op->version, sizeof(op->version));
820 /* bch_discard - discard a range of keys from start_key to end_key.
822 * @start_key pointer to start location
823 * NOTE: discard starts at bkey_start_offset(start_key)
824 * @end_key pointer to end location
825 * NOTE: discard ends at KEY_OFFSET(end_key)
826 * @version version of discard (0ULL if none)
832 * XXX: this needs to be refactored with inode_truncate, or more
833 * appropriately inode_truncate should call this
835 int bch2_discard(struct bch_fs *c, struct bpos start,
836 struct bpos end, struct bversion version,
837 struct disk_reservation *disk_res,
838 struct extent_insert_hook *hook,
841 return bch2_btree_delete_range(c, BTREE_ID_EXTENTS, start, end, version,
842 disk_res, hook, journal_seq);
845 /* Cache promotion on read */
847 struct cache_promote_op {
849 struct migrate_write write;
850 struct bio_vec bi_inline_vecs[0]; /* must be last */
855 static int bio_checksum_uncompress(struct bch_fs *c,
856 struct bch_read_bio *rbio)
858 struct bio *src = &rbio->bio;
859 struct bio *dst = &bch2_rbio_parent(rbio)->bio;
860 struct bvec_iter dst_iter = rbio->parent_iter;
861 struct nonce nonce = extent_nonce(rbio->version,
863 crc_uncompressed_size(NULL, &rbio->crc),
864 rbio->crc.compression_type);
865 struct bch_csum csum;
869 * reset iterator for checksumming and copying bounced data: here we've
870 * set rbio->compressed_size to the amount of data we actually read,
871 * which was not necessarily the full extent if we were only bouncing
872 * in order to promote
875 src->bi_iter.bi_size = crc_compressed_size(NULL, &rbio->crc) << 9;
876 src->bi_iter.bi_idx = 0;
877 src->bi_iter.bi_bvec_done = 0;
879 src->bi_iter = rbio->parent_iter;
882 csum = bch2_checksum_bio(c, rbio->crc.csum_type, nonce, src);
883 if (bch2_dev_nonfatal_io_err_on(bch2_crc_cmp(rbio->crc.csum, csum),
885 "data checksum error, inode %llu offset %llu: expected %0llx%0llx got %0llx%0llx (type %u)",
886 rbio->inode, (u64) rbio->parent_iter.bi_sector << 9,
887 rbio->crc.csum.hi, rbio->crc.csum.lo, csum.hi, csum.lo,
888 rbio->crc.csum_type))
892 * If there was a checksum error, still copy the data back - unless it
893 * was compressed, we don't want to decompress bad data:
895 if (rbio->crc.compression_type != BCH_COMPRESSION_NONE) {
897 bch2_encrypt_bio(c, rbio->crc.csum_type, nonce, src);
898 ret = bch2_bio_uncompress(c, src, dst,
899 dst_iter, rbio->crc);
901 __bcache_io_error(c, "decompression error");
903 } else if (rbio->bounce) {
904 bio_advance(src, rbio->crc.offset << 9);
906 /* don't need to decrypt the entire bio: */
907 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
908 src->bi_iter.bi_size = dst_iter.bi_size;
910 nonce = nonce_add(nonce, rbio->crc.offset << 9);
912 bch2_encrypt_bio(c, rbio->crc.csum_type,
915 bio_copy_data_iter(dst, &dst_iter,
918 bch2_encrypt_bio(c, rbio->crc.csum_type, nonce, src);
924 static void bch2_rbio_free(struct bch_read_bio *rbio)
926 struct bch_fs *c = rbio->c;
927 struct bio *bio = &rbio->bio;
930 BUG_ON(!rbio->split);
933 kfree(rbio->promote);
935 bch2_bio_free_pages_pool(c, bio);
940 static void bch2_rbio_done(struct bch_read_bio *rbio)
942 struct bio *orig = &bch2_rbio_parent(rbio)->bio;
944 percpu_ref_put(&rbio->ca->io_ref);
948 if (rbio->bio.bi_error)
949 orig->bi_error = rbio->bio.bi_error;
952 bch2_rbio_free(rbio);
955 kfree(rbio->promote);
957 orig->bi_end_io = rbio->orig_bi_end_io;
958 bio_endio_nodec(orig);
962 static void bch2_rbio_error(struct bch_read_bio *rbio, int error)
964 bch2_rbio_parent(rbio)->bio.bi_error = error;
965 bch2_rbio_done(rbio);
968 static void bch2_rbio_retry(struct bch_fs *c, struct bch_read_bio *rbio)
972 percpu_ref_put(&rbio->ca->io_ref);
975 spin_lock_irqsave(&c->read_retry_lock, flags);
976 bio_list_add(&c->read_retry_list, &rbio->bio);
977 spin_unlock_irqrestore(&c->read_retry_lock, flags);
978 queue_work(c->wq, &c->read_retry_work);
981 static void cache_promote_done(struct closure *cl)
983 struct cache_promote_op *op =
984 container_of(cl, struct cache_promote_op, cl);
986 bch2_bio_free_pages_pool(op->write.op.c, &op->write.wbio.bio);
990 /* Inner part that may run in process context */
991 static void __bch2_read_endio(struct work_struct *work)
993 struct bch_read_bio *rbio =
994 container_of(work, struct bch_read_bio, work);
995 struct bch_fs *c = rbio->c;
998 ret = bio_checksum_uncompress(c, rbio);
1001 * Checksum error: if the bio wasn't bounced, we may have been
1002 * reading into buffers owned by userspace (that userspace can
1003 * scribble over) - retry the read, bouncing it this time:
1005 if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
1006 rbio->flags |= BCH_READ_FORCE_BOUNCE;
1007 bch2_rbio_retry(c, rbio);
1009 bch2_rbio_error(rbio, -EIO);
1014 if (rbio->promote) {
1015 struct cache_promote_op *promote = rbio->promote;
1016 struct closure *cl = &promote->cl;
1018 BUG_ON(!rbio->split || !rbio->bounce);
1020 trace_promote(&rbio->bio);
1022 /* we now own pages: */
1023 swap(promote->write.wbio.bio.bi_vcnt, rbio->bio.bi_vcnt);
1024 rbio->promote = NULL;
1026 bch2_rbio_done(rbio);
1028 closure_init(cl, &c->cl);
1029 closure_call(&promote->write.op.cl, bch2_write, c->wq, cl);
1030 closure_return_with_destructor(cl, cache_promote_done);
1032 bch2_rbio_done(rbio);
1036 static void bch2_read_endio(struct bio *bio)
1038 struct bch_read_bio *rbio =
1039 container_of(bio, struct bch_read_bio, bio);
1040 struct bch_fs *c = rbio->c;
1042 if (bch2_dev_nonfatal_io_err_on(bio->bi_error, rbio->ca, "data read")) {
1043 /* XXX: retry IO errors when we have another replica */
1044 bch2_rbio_error(rbio, bio->bi_error);
1048 if (rbio->ptr.cached &&
1049 (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
1050 ptr_stale(rbio->ca, &rbio->ptr))) {
1051 atomic_long_inc(&c->read_realloc_races);
1053 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
1054 bch2_rbio_retry(c, rbio);
1056 bch2_rbio_error(rbio, -EINTR);
1060 if (rbio->crc.compression_type ||
1061 bch2_csum_type_is_encryption(rbio->crc.csum_type))
1062 queue_work(system_unbound_wq, &rbio->work);
1063 else if (rbio->crc.csum_type)
1064 queue_work(system_highpri_wq, &rbio->work);
1066 __bch2_read_endio(&rbio->work);
1069 static bool should_promote(struct bch_fs *c,
1070 struct extent_pick_ptr *pick, unsigned flags)
1072 if (!(flags & BCH_READ_PROMOTE))
1075 if (percpu_ref_is_dying(&c->writes))
1078 return c->fastest_tier &&
1079 c->fastest_tier < c->tiers + pick->ca->mi.tier;
1082 void bch2_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig,
1083 struct bvec_iter iter, struct bkey_s_c k,
1084 struct extent_pick_ptr *pick, unsigned flags)
1086 struct bch_read_bio *rbio;
1087 struct cache_promote_op *promote_op = NULL;
1088 unsigned skip = iter.bi_sector - bkey_start_offset(k.k);
1089 bool bounce = false, split, read_full = false;
1091 bch2_increment_clock(c, bio_sectors(&orig->bio), READ);
1093 EBUG_ON(bkey_start_offset(k.k) > iter.bi_sector ||
1094 k.k->p.offset < bvec_iter_end_sector(iter));
1096 /* only promote if we're not reading from the fastest tier: */
1099 * XXX: multiple promotes can race with each other, wastefully. Keep a
1100 * list of outstanding promotes?
1102 if (should_promote(c, pick, flags)) {
1104 * biovec needs to be big enough to hold decompressed data, if
1105 * the bch2_write_extent() has to decompress/recompress it:
1108 max_t(unsigned, k.k->size,
1109 crc_uncompressed_size(NULL, &pick->crc));
1110 unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
1112 promote_op = kmalloc(sizeof(*promote_op) +
1113 sizeof(struct bio_vec) * pages, GFP_NOIO);
1115 struct bio *promote_bio = &promote_op->write.wbio.bio;
1117 bio_init(promote_bio,
1118 promote_bio->bi_inline_vecs,
1121 /* could also set read_full */
1126 * note: if compression_type and crc_type both == none, then
1127 * compressed/uncompressed size is zero
1129 if (pick->crc.compression_type != BCH_COMPRESSION_NONE ||
1130 (pick->crc.csum_type != BCH_CSUM_NONE &&
1131 (bvec_iter_sectors(iter) != crc_uncompressed_size(NULL, &pick->crc) ||
1132 (bch2_csum_type_is_encryption(pick->crc.csum_type) &&
1133 (flags & BCH_READ_USER_MAPPED)) ||
1134 (flags & BCH_READ_FORCE_BOUNCE)))) {
1140 unsigned sectors = read_full
1141 ? (crc_compressed_size(NULL, &pick->crc) ?: k.k->size)
1142 : bvec_iter_sectors(iter);
1144 rbio = container_of(bio_alloc_bioset(GFP_NOIO,
1145 DIV_ROUND_UP(sectors, PAGE_SECTORS),
1146 &c->bio_read_split),
1147 struct bch_read_bio, bio);
1149 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
1151 } else if (!(flags & BCH_READ_MAY_REUSE_BIO) ||
1152 !(flags & BCH_READ_IS_LAST)) {
1154 * Have to clone if there were any splits, due to error
1155 * reporting issues (if a split errored, and retrying didn't
1156 * work, when it reports the error to its parent (us) we don't
1157 * know if the error was from our bio, and we should retry, or
1158 * from the whole bio, in which case we don't want to retry and
1161 rbio = container_of(bio_clone_fast(&orig->bio,
1162 GFP_NOIO, &c->bio_read_split),
1163 struct bch_read_bio, bio);
1164 rbio->bio.bi_iter = iter;
1168 rbio->bio.bi_iter = iter;
1170 BUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
1173 if (!(flags & BCH_READ_IS_LAST))
1174 __bio_inc_remaining(&orig->bio);
1177 rbio->parent = orig;
1179 rbio->orig_bi_end_io = orig->bio.bi_end_io;
1180 rbio->parent_iter = iter;
1182 rbio->flags = flags;
1183 rbio->bounce = bounce;
1184 rbio->split = split;
1186 rbio->ca = pick->ca;
1187 rbio->ptr = pick->ptr;
1188 rbio->crc = pick->crc;
1190 * crc.compressed_size will be 0 if there wasn't any checksum
1191 * information, also we need to stash the original size of the bio if we
1192 * bounced (which isn't necessarily the original key size, if we bounced
1193 * only for promoting)
1195 rbio->crc._compressed_size = bio_sectors(&rbio->bio) - 1;
1196 rbio->version = k.k->version;
1197 rbio->promote = promote_op;
1198 rbio->inode = k.k->p.inode;
1199 INIT_WORK(&rbio->work, __bch2_read_endio);
1201 rbio->bio.bi_bdev = pick->ca->disk_sb.bdev;
1202 rbio->bio.bi_opf = orig->bio.bi_opf;
1203 rbio->bio.bi_iter.bi_sector = pick->ptr.offset;
1204 rbio->bio.bi_end_io = bch2_read_endio;
1207 struct bio *promote_bio = &promote_op->write.wbio.bio;
1209 promote_bio->bi_iter = rbio->bio.bi_iter;
1210 memcpy(promote_bio->bi_io_vec, rbio->bio.bi_io_vec,
1211 sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
1213 bch2_migrate_write_init(c, &promote_op->write,
1214 &c->promote_write_point,
1216 BCH_WRITE_ALLOC_NOWAIT|
1218 promote_op->write.promote = true;
1220 if (rbio->crc.compression_type) {
1221 promote_op->write.op.flags |= BCH_WRITE_DATA_COMPRESSED;
1222 promote_op->write.op.crc = rbio->crc;
1223 promote_op->write.op.size = k.k->size;
1224 } else if (read_full) {
1226 * Adjust bio to correspond to _live_ portion of @k -
1227 * which might be less than what we're actually reading:
1229 bio_advance(promote_bio, rbio->crc.offset << 9);
1230 BUG_ON(bio_sectors(promote_bio) < k.k->size);
1231 promote_bio->bi_iter.bi_size = k.k->size << 9;
1234 * Set insert pos to correspond to what we're actually
1237 promote_op->write.op.pos.offset = iter.bi_sector;
1240 promote_bio->bi_iter.bi_sector =
1241 promote_op->write.op.pos.offset;
1244 /* _after_ promete stuff has looked at rbio->crc.offset */
1246 rbio->crc.offset += skip;
1248 rbio->bio.bi_iter.bi_sector += skip;
1250 rbio->submit_time_us = local_clock_us();
1253 trace_read_bounce(&rbio->bio);
1255 if (!(flags & BCH_READ_IS_LAST))
1256 trace_read_split(&rbio->bio);
1258 generic_make_request(&rbio->bio);
1261 static void bch2_read_iter(struct bch_fs *c, struct bch_read_bio *rbio,
1262 struct bvec_iter bvec_iter, u64 inode,
1265 struct bio *bio = &rbio->bio;
1266 struct btree_iter iter;
1270 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
1271 POS(inode, bvec_iter.bi_sector),
1272 BTREE_ITER_WITH_HOLES, k) {
1274 struct extent_pick_ptr pick;
1275 unsigned bytes, sectors;
1279 * Unlock the iterator while the btree node's lock is still in
1280 * cache, before doing the IO:
1282 bkey_reassemble(&tmp.k, k);
1283 k = bkey_i_to_s_c(&tmp.k);
1284 bch2_btree_iter_unlock(&iter);
1286 bch2_extent_pick_ptr(c, k, &pick);
1287 if (IS_ERR(pick.ca)) {
1288 bcache_io_error(c, bio, "no device to read from");
1293 sectors = min_t(u64, k.k->p.offset,
1294 bvec_iter_end_sector(bvec_iter)) -
1295 bvec_iter.bi_sector;
1296 bytes = sectors << 9;
1297 is_last = bytes == bvec_iter.bi_size;
1298 swap(bvec_iter.bi_size, bytes);
1301 flags |= BCH_READ_IS_LAST;
1304 PTR_BUCKET(pick.ca, &pick.ptr)->prio[READ] =
1305 c->prio_clock[READ].hand;
1307 bch2_read_extent_iter(c, rbio, bvec_iter,
1310 flags &= ~BCH_READ_MAY_REUSE_BIO;
1312 zero_fill_bio_iter(bio, bvec_iter);
1321 swap(bvec_iter.bi_size, bytes);
1322 bio_advance_iter(bio, &bvec_iter, bytes);
1326 * If we get here, it better have been because there was an error
1327 * reading a btree node
1329 ret = bch2_btree_iter_unlock(&iter);
1331 bcache_io_error(c, bio, "btree IO error %i", ret);
1335 void bch2_read(struct bch_fs *c, struct bch_read_bio *bio, u64 inode)
1337 bch2_read_iter(c, bio, bio->bio.bi_iter, inode,
1338 BCH_READ_RETRY_IF_STALE|
1340 BCH_READ_MAY_REUSE_BIO|
1341 BCH_READ_USER_MAPPED);
1345 * bch_read_retry - re-submit a bio originally from bch2_read()
1347 static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio)
1349 struct bch_read_bio *parent = bch2_rbio_parent(rbio);
1350 struct bvec_iter iter = rbio->parent_iter;
1351 unsigned flags = rbio->flags;
1352 u64 inode = rbio->inode;
1354 trace_read_retry(&rbio->bio);
1357 bch2_rbio_free(rbio);
1359 rbio->bio.bi_end_io = rbio->orig_bi_end_io;
1361 bch2_read_iter(c, parent, iter, inode, flags);
1364 void bch2_read_retry_work(struct work_struct *work)
1366 struct bch_fs *c = container_of(work, struct bch_fs,
1368 struct bch_read_bio *rbio;
1370 unsigned long flags;
1373 spin_lock_irqsave(&c->read_retry_lock, flags);
1374 bio = bio_list_pop(&c->read_retry_list);
1375 spin_unlock_irqrestore(&c->read_retry_lock, flags);
1380 rbio = container_of(bio, struct bch_read_bio, bio);
1381 bch2_read_retry(c, rbio);