2 * Some low level IO code, and hacks for various block layer limitations
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
11 #include "btree_update.h"
27 #include <linux/blkdev.h>
28 #include <linux/random.h>
30 #include <trace/events/bcache.h>
32 static inline void __bio_inc_remaining(struct bio *bio)
34 bio_set_flag(bio, BIO_CHAIN);
35 smp_mb__before_atomic();
36 atomic_inc(&bio->__bi_remaining);
39 void bch_generic_make_request(struct bio *bio, struct cache_set *c)
41 if (current->bio_list) {
42 spin_lock(&c->bio_submit_lock);
43 bio_list_add(&c->bio_submit_list, bio);
44 spin_unlock(&c->bio_submit_lock);
45 queue_work(bcache_io_wq, &c->bio_submit_work);
47 generic_make_request(bio);
51 void bch_bio_submit_work(struct work_struct *work)
53 struct cache_set *c = container_of(work, struct cache_set,
58 spin_lock(&c->bio_submit_lock);
59 bl = c->bio_submit_list;
60 bio_list_init(&c->bio_submit_list);
61 spin_unlock(&c->bio_submit_lock);
63 while ((bio = bio_list_pop(&bl)))
64 generic_make_request(bio);
67 /* Allocate, free from mempool: */
69 void bch_bio_free_pages_pool(struct cache_set *c, struct bio *bio)
74 bio_for_each_segment_all(bv, bio, i)
75 if (bv->bv_page != ZERO_PAGE(0))
76 mempool_free(bv->bv_page, &c->bio_bounce_pages);
80 static void bch_bio_alloc_page_pool(struct cache_set *c, struct bio *bio,
83 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt++];
85 if (likely(!*using_mempool)) {
86 bv->bv_page = alloc_page(GFP_NOIO);
87 if (unlikely(!bv->bv_page)) {
88 mutex_lock(&c->bio_bounce_pages_lock);
89 *using_mempool = true;
95 bv->bv_page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
98 bv->bv_len = PAGE_SIZE;
102 void bch_bio_alloc_pages_pool(struct cache_set *c, struct bio *bio,
105 bool using_mempool = false;
107 bio->bi_iter.bi_size = bytes;
109 while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE))
110 bch_bio_alloc_page_pool(c, bio, &using_mempool);
113 mutex_unlock(&c->bio_bounce_pages_lock);
116 /* Bios with headers */
118 static void bch_submit_wbio(struct cache_set *c, struct bch_write_bio *wbio,
119 struct cache *ca, const struct bch_extent_ptr *ptr,
123 wbio->submit_time_us = local_clock_us();
124 wbio->bio.bi_iter.bi_sector = ptr->offset;
125 wbio->bio.bi_bdev = ca ? ca->disk_sb.bdev : NULL;
128 bcache_io_error(c, &wbio->bio, "device has been removed");
130 bch_generic_make_request(&wbio->bio, c);
132 generic_make_request(&wbio->bio);
135 void bch_submit_wbio_replicas(struct bch_write_bio *wbio, struct cache_set *c,
136 const struct bkey_i *k, bool punt)
138 struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
139 const struct bch_extent_ptr *ptr;
140 struct bch_write_bio *n;
143 BUG_ON(c->opts.nochanges);
148 extent_for_each_ptr(e, ptr) {
150 ca = PTR_CACHE(c, ptr);
152 percpu_ref_get(&ca->ref);
156 bch_submit_wbio(c, wbio, ca, ptr, punt);
160 if (ptr + 1 < &extent_entry_last(e)->ptr) {
161 n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
164 n->bio.bi_end_io = wbio->bio.bi_end_io;
165 n->bio.bi_private = wbio->bio.bi_private;
167 n->orig = &wbio->bio;
171 n->bio.bi_opf = wbio->bio.bi_opf;
172 __bio_inc_remaining(n->orig);
177 if (!journal_flushes_device(ca))
178 n->bio.bi_opf |= REQ_FUA;
180 bch_submit_wbio(c, n, ca, ptr, punt);
188 static struct workqueue_struct *index_update_wq(struct bch_write_op *op)
190 return op->alloc_reserve == RESERVE_MOVINGGC
195 static void __bch_write(struct closure *);
197 static void bch_write_done(struct closure *cl)
199 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
201 BUG_ON(!(op->flags & BCH_WRITE_DONE));
203 if (!op->error && (op->flags & BCH_WRITE_FLUSH))
204 op->error = bch_journal_error(&op->c->journal);
206 bch_disk_reservation_put(op->c, &op->res);
207 percpu_ref_put(&op->c->writes);
208 bch_keylist_free(&op->insert_keys, op->inline_keys);
212 static u64 keylist_sectors(struct keylist *keys)
217 for_each_keylist_key(keys, k)
223 static int bch_write_index_default(struct bch_write_op *op)
225 struct keylist *keys = &op->insert_keys;
226 struct btree_iter iter;
229 bch_btree_iter_init_intent(&iter, op->c, BTREE_ID_EXTENTS,
230 bkey_start_pos(&bch_keylist_front(keys)->k));
232 ret = bch_btree_insert_list_at(&iter, keys, &op->res,
233 NULL, op_journal_seq(op),
234 BTREE_INSERT_NOFAIL);
235 bch_btree_iter_unlock(&iter);
241 * bch_write_index - after a write, update index to point to new data
243 static void bch_write_index(struct closure *cl)
245 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
246 struct cache_set *c = op->c;
247 struct keylist *keys = &op->insert_keys;
250 op->flags |= BCH_WRITE_LOOPED;
252 if (!bch_keylist_empty(keys)) {
253 u64 sectors_start = keylist_sectors(keys);
254 int ret = op->index_update_fn(op);
256 BUG_ON(keylist_sectors(keys) && !ret);
258 op->written += sectors_start - keylist_sectors(keys);
261 __bcache_io_error(c, "btree IO error %i", ret);
266 for (i = 0; i < ARRAY_SIZE(op->open_buckets); i++)
267 if (op->open_buckets[i]) {
268 bch_open_bucket_put(c,
270 op->open_buckets[i]);
271 op->open_buckets[i] = 0;
274 if (!(op->flags & BCH_WRITE_DONE))
275 continue_at(cl, __bch_write, op->io_wq);
277 if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
278 bch_journal_flush_seq_async(&c->journal,
281 continue_at(cl, bch_write_done, index_update_wq(op));
283 continue_at_nobarrier(cl, bch_write_done, NULL);
288 * bch_write_discard - discard range of keys
290 * Used to implement discard, and to handle when writethrough write hits
291 * a write error on the cache device.
293 static void bch_write_discard(struct closure *cl)
295 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
296 struct bio *bio = &op->bio->bio;
297 struct bpos end = op->pos;
299 end.offset += bio_sectors(bio);
301 op->error = bch_discard(op->c, op->pos, end, op->version,
302 &op->res, NULL, NULL);
306 * Convert extents to be inserted to discards after an error:
308 static void bch_write_io_error(struct closure *cl)
310 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
312 if (op->flags & BCH_WRITE_DISCARD_ON_ERROR) {
313 struct bkey_i *src = bch_keylist_front(&op->insert_keys);
314 struct bkey_i *dst = bch_keylist_front(&op->insert_keys);
317 * Our data write just errored, which means we've got a bunch
318 * of keys to insert that point to data that wasn't
319 * successfully written.
321 * We don't have to insert those keys but we still have to
322 * invalidate that region of the cache - so, if we just strip
323 * off all the pointers from the keys we'll accomplish just
327 while (src != op->insert_keys.top) {
328 struct bkey_i *n = bkey_next(src);
330 set_bkey_val_u64s(&src->k, 0);
331 src->k.type = KEY_TYPE_DISCARD;
334 dst = bkey_next(dst);
338 op->insert_keys.top = dst;
339 op->flags |= BCH_WRITE_DISCARD;
341 /* TODO: We could try to recover from this. */
342 while (!bch_keylist_empty(&op->insert_keys))
343 bch_keylist_pop_front(&op->insert_keys);
346 op->flags |= BCH_WRITE_DONE;
352 static void bch_write_endio(struct bio *bio)
354 struct closure *cl = bio->bi_private;
355 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
356 struct bch_write_bio *wbio = to_wbio(bio);
357 struct cache_set *c = wbio->c;
358 struct bio *orig = wbio->orig;
359 struct cache *ca = wbio->ca;
361 if (cache_nonfatal_io_err_on(bio->bi_error, ca,
363 set_closure_fn(cl, bch_write_io_error, index_update_wq(op));
365 bch_account_io_completion_time(ca, wbio->submit_time_us,
368 percpu_ref_put(&ca->ref);
370 if (bio->bi_error && orig)
371 orig->bi_error = bio->bi_error;
374 bch_bio_free_pages_pool(c, bio);
385 static struct nonce extent_nonce(struct bversion version,
387 unsigned uncompressed_size,
388 unsigned compression_type)
390 return (struct nonce) {{
391 [0] = cpu_to_le32((nonce << 12) |
392 (uncompressed_size << 22)),
393 [1] = cpu_to_le32(version.lo),
394 [2] = cpu_to_le32(version.lo >> 32),
395 [3] = cpu_to_le32(version.hi|
396 (compression_type << 24))^BCH_NONCE_EXTENT,
400 static void init_append_extent(struct bch_write_op *op,
401 unsigned compressed_size,
402 unsigned uncompressed_size,
403 unsigned compression_type,
405 struct bch_csum csum, unsigned csum_type,
406 struct open_bucket *ob)
408 struct bkey_i_extent *e = bkey_extent_init(op->insert_keys.top);
410 op->pos.offset += uncompressed_size;
412 e->k.size = uncompressed_size;
413 e->k.version = op->version;
414 bkey_extent_set_cached(&e->k, op->flags & BCH_WRITE_CACHED);
416 bch_extent_crc_append(e, compressed_size,
419 nonce, csum, csum_type);
421 bch_alloc_sectors_append_ptrs(op->c, e, op->nr_replicas,
422 ob, compressed_size);
424 bkey_extent_set_cached(&e->k, (op->flags & BCH_WRITE_CACHED));
425 bch_keylist_push(&op->insert_keys);
428 static int bch_write_extent(struct bch_write_op *op,
429 struct open_bucket *ob,
432 struct cache_set *c = op->c;
434 struct bch_write_bio *wbio;
435 unsigned key_to_write_offset = op->insert_keys.top_p -
436 op->insert_keys.keys_p;
437 struct bkey_i *key_to_write;
438 unsigned csum_type = op->csum_type;
439 unsigned compression_type = op->compression_type;
442 /* don't refetch csum type/compression type */
445 /* Need to decompress data? */
446 if ((op->flags & BCH_WRITE_DATA_COMPRESSED) &&
447 (crc_uncompressed_size(NULL, &op->crc) != op->size ||
448 crc_compressed_size(NULL, &op->crc) > ob->sectors_free)) {
451 ret = bch_bio_uncompress_inplace(c, orig, op->size, op->crc);
455 op->flags &= ~BCH_WRITE_DATA_COMPRESSED;
458 if (op->flags & BCH_WRITE_DATA_COMPRESSED) {
459 init_append_extent(op,
460 crc_compressed_size(NULL, &op->crc),
461 crc_uncompressed_size(NULL, &op->crc),
462 op->crc.compression_type,
471 wbio->bounce = false;
472 wbio->put_bio = false;
474 } else if (csum_type != BCH_CSUM_NONE ||
475 compression_type != BCH_COMPRESSION_NONE) {
476 /* all units here in bytes */
477 unsigned total_output = 0, output_available =
478 min(ob->sectors_free << 9, orig->bi_iter.bi_size);
479 unsigned crc_nonce = bch_csum_type_is_encryption(csum_type)
481 struct bch_csum csum;
484 bio = bio_alloc_bioset(GFP_NOIO,
485 DIV_ROUND_UP(output_available, PAGE_SIZE),
488 * XXX: can't use mempool for more than
489 * BCH_COMPRESSED_EXTENT_MAX worth of pages
491 bch_bio_alloc_pages_pool(c, bio, output_available);
493 /* copy WRITE_SYNC flag */
494 bio->bi_opf = orig->bi_opf;
498 wbio->put_bio = true;
501 unsigned fragment_compression_type = compression_type;
502 size_t dst_len, src_len;
504 bch_bio_compress(c, bio, &dst_len,
506 &fragment_compression_type);
508 BUG_ON(!dst_len || dst_len > bio->bi_iter.bi_size);
509 BUG_ON(!src_len || src_len > orig->bi_iter.bi_size);
510 BUG_ON(dst_len & (block_bytes(c) - 1));
511 BUG_ON(src_len & (block_bytes(c) - 1));
513 swap(bio->bi_iter.bi_size, dst_len);
514 nonce = extent_nonce(op->version,
519 bch_encrypt_bio(c, csum_type, nonce, bio);
521 csum = bch_checksum_bio(c, csum_type, nonce, bio);
522 swap(bio->bi_iter.bi_size, dst_len);
524 init_append_extent(op,
525 dst_len >> 9, src_len >> 9,
526 fragment_compression_type,
527 crc_nonce, csum, csum_type, ob);
529 total_output += dst_len;
530 bio_advance(bio, dst_len);
531 bio_advance(orig, src_len);
532 } while (bio->bi_iter.bi_size &&
533 orig->bi_iter.bi_size &&
534 !bch_keylist_realloc(&op->insert_keys,
536 ARRAY_SIZE(op->inline_keys),
537 BKEY_EXTENT_U64s_MAX));
539 BUG_ON(total_output > output_available);
541 memset(&bio->bi_iter, 0, sizeof(bio->bi_iter));
542 bio->bi_iter.bi_size = total_output;
545 * Free unneeded pages after compressing:
547 while (bio->bi_vcnt * PAGE_SIZE >
548 round_up(bio->bi_iter.bi_size, PAGE_SIZE))
549 mempool_free(bio->bi_io_vec[--bio->bi_vcnt].bv_page,
550 &c->bio_bounce_pages);
552 ret = orig->bi_iter.bi_size != 0;
554 bio = bio_next_split(orig, ob->sectors_free, GFP_NOIO,
559 wbio->bounce = false;
560 wbio->put_bio = bio != orig;
562 init_append_extent(op, bio_sectors(bio), bio_sectors(bio),
564 (struct bch_csum) { 0 }, csum_type, ob);
569 bio->bi_end_io = bch_write_endio;
570 bio->bi_private = &op->cl;
571 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
573 closure_get(bio->bi_private);
575 /* might have done a realloc... */
577 key_to_write = (void *) (op->insert_keys.keys_p + key_to_write_offset);
579 bch_check_mark_super(c, key_to_write, false);
581 #ifndef CONFIG_BCACHE_NO_IO
582 bch_submit_wbio_replicas(to_wbio(bio), c, key_to_write, false);
584 to_wbio(bio)->ca = NULL;
590 static void __bch_write(struct closure *cl)
592 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
593 struct cache_set *c = op->c;
594 struct bio *bio = &op->bio->bio;
595 unsigned open_bucket_nr = 0;
596 struct open_bucket *b;
599 memset(op->open_buckets, 0, sizeof(op->open_buckets));
601 if (op->flags & BCH_WRITE_DISCARD) {
602 op->flags |= BCH_WRITE_DONE;
603 bch_write_discard(cl);
605 continue_at(cl, bch_write_done, index_update_wq(op));
609 * Journal writes are marked REQ_PREFLUSH; if the original write was a
610 * flush, it'll wait on the journal write.
612 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
615 EBUG_ON(bio->bi_iter.bi_sector != op->pos.offset);
616 EBUG_ON(!bio_sectors(bio));
618 if (open_bucket_nr == ARRAY_SIZE(op->open_buckets))
619 continue_at(cl, bch_write_index, index_update_wq(op));
621 /* for the device pointers and 1 for the chksum */
622 if (bch_keylist_realloc(&op->insert_keys,
624 ARRAY_SIZE(op->inline_keys),
625 BKEY_EXTENT_U64s_MAX))
626 continue_at(cl, bch_write_index, index_update_wq(op));
628 b = bch_alloc_sectors_start(c, op->wp, op->nr_replicas,
630 (op->flags & BCH_WRITE_ALLOC_NOWAIT) ? NULL : cl);
633 if (unlikely(IS_ERR(b))) {
634 if (unlikely(PTR_ERR(b) != -EAGAIN)) {
640 * If we already have some keys, must insert them first
641 * before allocating another open bucket. We only hit
642 * this case if open_bucket_nr > 1.
644 if (!bch_keylist_empty(&op->insert_keys))
645 continue_at(cl, bch_write_index,
646 index_update_wq(op));
649 * If we've looped, we're running out of a workqueue -
650 * not the bch_write() caller's context - and we don't
651 * want to block the workqueue:
653 if (op->flags & BCH_WRITE_LOOPED)
654 continue_at(cl, __bch_write, op->io_wq);
657 * Otherwise, we do want to block the caller on alloc
658 * failure instead of letting it queue up more and more
660 * XXX: this technically needs a try_to_freeze() -
661 * except that that's not safe because caller may have
662 * issued other IO... hmm..
668 BUG_ON(b - c->open_buckets == 0 ||
669 b - c->open_buckets > U8_MAX);
670 op->open_buckets[open_bucket_nr++] = b - c->open_buckets;
672 ret = bch_write_extent(op, b, bio);
674 bch_alloc_sectors_done(c, op->wp, b);
680 op->flags |= BCH_WRITE_DONE;
681 continue_at(cl, bch_write_index, index_update_wq(op));
683 if (op->flags & BCH_WRITE_DISCARD_ON_ERROR) {
685 * If we were writing cached data, not doing the write is fine
686 * so long as we discard whatever would have been overwritten -
687 * then it's equivalent to doing the write and immediately
691 bch_write_discard(cl);
694 * Right now we can only error here if we went RO - the
695 * allocation failed, but we already checked for -ENOSPC when we
696 * got our reservation.
698 * XXX capacity might have changed, but we don't check for that
704 op->flags |= BCH_WRITE_DONE;
707 * No reason not to insert keys for whatever data was successfully
708 * written (especially for a cmpxchg operation that's moving data
711 continue_at(cl, !bch_keylist_empty(&op->insert_keys)
713 : bch_write_done, index_update_wq(op));
716 void bch_wake_delayed_writes(unsigned long data)
718 struct cache_set *c = (void *) data;
719 struct bch_write_op *op;
722 spin_lock_irqsave(&c->foreground_write_pd_lock, flags);
724 while ((op = c->write_wait_head)) {
725 if (!test_bit(CACHE_SET_RO, &c->flags) &&
726 !test_bit(CACHE_SET_STOPPING, &c->flags) &&
727 time_after(op->expires, jiffies)) {
728 mod_timer(&c->foreground_write_wakeup, op->expires);
732 c->write_wait_head = op->next;
733 if (!c->write_wait_head)
734 c->write_wait_tail = NULL;
736 closure_put(&op->cl);
739 spin_unlock_irqrestore(&c->foreground_write_pd_lock, flags);
743 * bch_write - handle a write to a cache device or flash only volume
745 * This is the starting point for any data to end up in a cache device; it could
746 * be from a normal write, or a writeback write, or a write to a flash only
747 * volume - it's also used by the moving garbage collector to compact data in
748 * mostly empty buckets.
750 * It first writes the data to the cache, creating a list of keys to be inserted
751 * (if the data won't fit in a single open bucket, there will be multiple keys);
752 * after the data is written it calls bch_journal, and after the keys have been
753 * added to the next journal write they're inserted into the btree.
755 * It inserts the data in op->bio; bi_sector is used for the key offset, and
756 * op->inode is used for the key inode.
758 * If op->discard is true, instead of inserting the data it invalidates the
759 * region of the cache represented by op->bio and op->inode.
761 void bch_write(struct closure *cl)
763 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
764 struct bio *bio = &op->bio->bio;
765 struct cache_set *c = op->c;
766 u64 inode = op->pos.inode;
768 trace_bcache_write(c, inode, bio,
769 !(op->flags & BCH_WRITE_CACHED),
770 op->flags & BCH_WRITE_DISCARD);
772 if (c->opts.nochanges ||
773 !percpu_ref_tryget(&c->writes)) {
774 __bcache_io_error(c, "read only");
776 bch_disk_reservation_put(c, &op->res);
780 if (bversion_zero(op->version) &&
781 bch_csum_type_is_encryption(op->csum_type))
783 atomic64_inc_return(&c->key_version) + 1;
785 if (!(op->flags & BCH_WRITE_DISCARD))
786 bch_increment_clock(c, bio_sectors(bio), WRITE);
788 if (!(op->flags & BCH_WRITE_DISCARD))
789 bch_mark_foreground_write(c, bio_sectors(bio));
791 bch_mark_discard(c, bio_sectors(bio));
793 /* Don't call bch_next_delay() if rate is >= 1 GB/sec */
795 if (c->foreground_write_ratelimit_enabled &&
796 c->foreground_write_pd.rate.rate < (1 << 30) &&
797 !(op->flags & BCH_WRITE_DISCARD) && op->wp->throttle) {
801 spin_lock_irqsave(&c->foreground_write_pd_lock, flags);
802 bch_ratelimit_increment(&c->foreground_write_pd.rate,
803 bio->bi_iter.bi_size);
805 delay = bch_ratelimit_delay(&c->foreground_write_pd.rate);
807 if (delay >= HZ / 100) {
808 trace_bcache_write_throttle(c, inode, bio, delay);
810 closure_get(&op->cl); /* list takes a ref */
812 op->expires = jiffies + delay;
815 if (c->write_wait_tail)
816 c->write_wait_tail->next = op;
818 c->write_wait_head = op;
819 c->write_wait_tail = op;
821 if (!timer_pending(&c->foreground_write_wakeup))
822 mod_timer(&c->foreground_write_wakeup,
825 spin_unlock_irqrestore(&c->foreground_write_pd_lock,
827 continue_at(cl, __bch_write, index_update_wq(op));
830 spin_unlock_irqrestore(&c->foreground_write_pd_lock, flags);
833 continue_at_nobarrier(cl, __bch_write, NULL);
836 void bch_write_op_init(struct bch_write_op *op, struct cache_set *c,
837 struct bch_write_bio *bio, struct disk_reservation res,
838 struct write_point *wp, struct bpos pos,
839 u64 *journal_seq, unsigned flags)
841 EBUG_ON(res.sectors && !res.nr_replicas);
844 op->io_wq = index_update_wq(op);
849 op->csum_type = bch_data_checksum_type(c);
850 op->compression_type = c->opts.compression;
851 op->nr_replicas = res.nr_replicas;
852 op->alloc_reserve = RESERVE_NONE;
855 op->version = ZERO_VERSION;
860 op->journal_seq_p = journal_seq;
861 op->flags |= BCH_WRITE_JOURNAL_SEQ_PTR;
866 op->index_update_fn = bch_write_index_default;
868 bch_keylist_init(&op->insert_keys,
870 ARRAY_SIZE(op->inline_keys));
872 if (version_stress_test(c))
873 get_random_bytes(&op->version, sizeof(op->version));
878 /* bch_discard - discard a range of keys from start_key to end_key.
880 * @start_key pointer to start location
881 * NOTE: discard starts at bkey_start_offset(start_key)
882 * @end_key pointer to end location
883 * NOTE: discard ends at KEY_OFFSET(end_key)
884 * @version version of discard (0ULL if none)
890 * XXX: this needs to be refactored with inode_truncate, or more
891 * appropriately inode_truncate should call this
893 int bch_discard(struct cache_set *c, struct bpos start,
894 struct bpos end, struct bversion version,
895 struct disk_reservation *disk_res,
896 struct extent_insert_hook *hook,
899 return bch_btree_delete_range(c, BTREE_ID_EXTENTS, start, end, version,
900 disk_res, hook, journal_seq);
903 /* Cache promotion on read */
905 struct cache_promote_op {
907 struct migrate_write write;
908 struct bio_vec bi_inline_vecs[0]; /* must be last */
913 static int bio_checksum_uncompress(struct cache_set *c,
914 struct bch_read_bio *rbio)
916 struct bio *src = &rbio->bio;
917 struct bio *dst = &bch_rbio_parent(rbio)->bio;
918 struct bvec_iter dst_iter = rbio->parent_iter;
919 struct nonce nonce = extent_nonce(rbio->version,
921 crc_uncompressed_size(NULL, &rbio->crc),
922 rbio->crc.compression_type);
923 struct bch_csum csum;
927 * reset iterator for checksumming and copying bounced data: here we've
928 * set rbio->compressed_size to the amount of data we actually read,
929 * which was not necessarily the full extent if we were only bouncing
930 * in order to promote
933 src->bi_iter.bi_size = crc_compressed_size(NULL, &rbio->crc) << 9;
934 src->bi_iter.bi_idx = 0;
935 src->bi_iter.bi_bvec_done = 0;
937 src->bi_iter = rbio->parent_iter;
940 csum = bch_checksum_bio(c, rbio->crc.csum_type, nonce, src);
941 if (cache_nonfatal_io_err_on(bch_crc_cmp(rbio->crc.csum, csum), rbio->ca,
942 "data checksum error, inode %llu offset %llu: expected %0llx%0llx got %0llx%0llx (type %u)",
943 rbio->inode, (u64) rbio->parent_iter.bi_sector << 9,
944 rbio->crc.csum.hi, rbio->crc.csum.lo, csum.hi, csum.lo,
945 rbio->crc.csum_type))
949 * If there was a checksum error, still copy the data back - unless it
950 * was compressed, we don't want to decompress bad data:
952 if (rbio->crc.compression_type != BCH_COMPRESSION_NONE) {
954 bch_encrypt_bio(c, rbio->crc.csum_type, nonce, src);
955 ret = bch_bio_uncompress(c, src, dst,
956 dst_iter, rbio->crc);
958 __bcache_io_error(c, "decompression error");
960 } else if (rbio->bounce) {
961 bio_advance(src, rbio->crc.offset << 9);
963 /* don't need to decrypt the entire bio: */
964 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
965 src->bi_iter.bi_size = dst_iter.bi_size;
967 nonce = nonce_add(nonce, rbio->crc.offset << 9);
969 bch_encrypt_bio(c, rbio->crc.csum_type,
972 bio_copy_data_iter(dst, dst_iter,
975 bch_encrypt_bio(c, rbio->crc.csum_type, nonce, src);
981 static void bch_rbio_free(struct cache_set *c, struct bch_read_bio *rbio)
983 struct bio *bio = &rbio->bio;
986 BUG_ON(!rbio->split);
989 kfree(rbio->promote);
991 bch_bio_free_pages_pool(c, bio);
996 static void bch_rbio_done(struct cache_set *c, struct bch_read_bio *rbio)
998 struct bio *orig = &bch_rbio_parent(rbio)->bio;
1000 percpu_ref_put(&rbio->ca->ref);
1004 if (rbio->bio.bi_error)
1005 orig->bi_error = rbio->bio.bi_error;
1008 bch_rbio_free(c, rbio);
1011 kfree(rbio->promote);
1013 orig->bi_end_io = rbio->orig_bi_end_io;
1014 bio_endio_nodec(orig);
1019 * Decide if we want to retry the read - returns true if read is being retried,
1020 * false if caller should pass error on up
1022 static void bch_read_error_maybe_retry(struct cache_set *c,
1023 struct bch_read_bio *rbio,
1026 unsigned long flags;
1028 if ((error == -EINTR) &&
1029 (rbio->flags & BCH_READ_RETRY_IF_STALE)) {
1030 atomic_long_inc(&c->cache_read_races);
1034 if (error == -EIO) {
1035 /* io error - do we have another replica? */
1038 bch_rbio_parent(rbio)->bio.bi_error = error;
1039 bch_rbio_done(c, rbio);
1042 percpu_ref_put(&rbio->ca->ref);
1045 spin_lock_irqsave(&c->read_retry_lock, flags);
1046 bio_list_add(&c->read_retry_list, &rbio->bio);
1047 spin_unlock_irqrestore(&c->read_retry_lock, flags);
1048 queue_work(c->wq, &c->read_retry_work);
1051 static void cache_promote_done(struct closure *cl)
1053 struct cache_promote_op *op =
1054 container_of(cl, struct cache_promote_op, cl);
1056 bch_bio_free_pages_pool(op->write.op.c, &op->write.wbio.bio);
1060 /* Inner part that may run in process context */
1061 static void __bch_read_endio(struct cache_set *c, struct bch_read_bio *rbio)
1065 ret = bio_checksum_uncompress(c, rbio);
1067 bch_read_error_maybe_retry(c, rbio, ret);
1071 if (rbio->promote &&
1072 !test_bit(CACHE_SET_RO, &c->flags) &&
1073 !test_bit(CACHE_SET_STOPPING, &c->flags)) {
1074 struct cache_promote_op *promote = rbio->promote;
1075 struct closure *cl = &promote->cl;
1077 BUG_ON(!rbio->split || !rbio->bounce);
1079 /* we now own pages: */
1080 swap(promote->write.wbio.bio.bi_vcnt, rbio->bio.bi_vcnt);
1081 rbio->promote = NULL;
1083 bch_rbio_done(c, rbio);
1085 closure_init(cl, &c->cl);
1086 closure_call(&promote->write.op.cl, bch_write, c->wq, cl);
1087 closure_return_with_destructor(cl, cache_promote_done);
1089 bch_rbio_done(c, rbio);
1093 void bch_bio_decompress_work(struct work_struct *work)
1095 struct bio_decompress_worker *d =
1096 container_of(work, struct bio_decompress_worker, work);
1097 struct llist_node *list, *next;
1098 struct bch_read_bio *rbio;
1100 while ((list = llist_del_all(&d->bio_list)))
1101 for (list = llist_reverse_order(list);
1104 next = llist_next(list);
1105 rbio = container_of(list, struct bch_read_bio, list);
1107 __bch_read_endio(d->c, rbio);
1111 static void bch_read_endio(struct bio *bio)
1113 struct bch_read_bio *rbio =
1114 container_of(bio, struct bch_read_bio, bio);
1115 struct cache_set *c = rbio->ca->set;
1116 int stale = ((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
1117 ptr_stale(rbio->ca, &rbio->ptr) ? -EINTR : 0;
1118 int error = bio->bi_error ?: stale;
1120 bch_account_io_completion_time(rbio->ca, rbio->submit_time_us, REQ_OP_READ);
1122 cache_nonfatal_io_err_on(bio->bi_error, rbio->ca, "data read");
1125 bch_read_error_maybe_retry(c, rbio, error);
1129 if (rbio->crc.compression_type != BCH_COMPRESSION_NONE) {
1130 struct bio_decompress_worker *d;
1133 d = this_cpu_ptr(c->bio_decompress_worker);
1134 llist_add(&rbio->list, &d->bio_list);
1135 queue_work(system_unbound_wq, &d->work);
1138 __bch_read_endio(c, rbio);
1142 void bch_read_extent_iter(struct cache_set *c, struct bch_read_bio *orig,
1143 struct bvec_iter iter, struct bkey_s_c k,
1144 struct extent_pick_ptr *pick, unsigned flags)
1146 struct bch_read_bio *rbio;
1147 struct cache_promote_op *promote_op = NULL;
1148 unsigned skip = iter.bi_sector - bkey_start_offset(k.k);
1149 bool bounce = false, split, read_full = false;
1151 EBUG_ON(bkey_start_offset(k.k) > iter.bi_sector ||
1152 k.k->p.offset < bvec_iter_end_sector(iter));
1154 /* only promote if we're not reading from the fastest tier: */
1157 * XXX: multiple promotes can race with each other, wastefully. Keep a
1158 * list of outstanding promotes?
1160 if ((flags & BCH_READ_PROMOTE) && pick->ca->mi.tier) {
1162 * biovec needs to be big enough to hold decompressed data, if
1163 * the bch_write_extent() has to decompress/recompress it:
1166 max_t(unsigned, k.k->size,
1167 crc_uncompressed_size(NULL, &pick->crc));
1168 unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
1170 promote_op = kmalloc(sizeof(*promote_op) +
1171 sizeof(struct bio_vec) * pages, GFP_NOIO);
1173 struct bio *promote_bio = &promote_op->write.wbio.bio;
1175 bio_init(promote_bio);
1176 promote_bio->bi_max_vecs = pages;
1177 promote_bio->bi_io_vec = promote_bio->bi_inline_vecs;
1179 /* could also set read_full */
1184 * note: if compression_type and crc_type both == none, then
1185 * compressed/uncompressed size is zero
1187 if (pick->crc.compression_type != BCH_COMPRESSION_NONE ||
1188 (pick->crc.csum_type != BCH_CSUM_NONE &&
1189 (bvec_iter_sectors(iter) != crc_uncompressed_size(NULL, &pick->crc) ||
1190 (flags & BCH_READ_FORCE_BOUNCE)))) {
1196 unsigned sectors = read_full
1197 ? (crc_compressed_size(NULL, &pick->crc) ?: k.k->size)
1198 : bvec_iter_sectors(iter);
1200 rbio = container_of(bio_alloc_bioset(GFP_NOIO,
1201 DIV_ROUND_UP(sectors, PAGE_SECTORS),
1202 &c->bio_read_split),
1203 struct bch_read_bio, bio);
1205 bch_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
1207 } else if (!(flags & BCH_READ_MAY_REUSE_BIO) ||
1208 !(flags & BCH_READ_IS_LAST)) {
1210 * Have to clone if there were any splits, due to error
1211 * reporting issues (if a split errored, and retrying didn't
1212 * work, when it reports the error to its parent (us) we don't
1213 * know if the error was from our bio, and we should retry, or
1214 * from the whole bio, in which case we don't want to retry and
1217 rbio = container_of(bio_clone_fast(&orig->bio,
1218 GFP_NOIO, &c->bio_read_split),
1219 struct bch_read_bio, bio);
1220 rbio->bio.bi_iter = iter;
1224 rbio->bio.bi_iter = iter;
1226 BUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
1229 if (!(flags & BCH_READ_IS_LAST))
1230 __bio_inc_remaining(&orig->bio);
1233 rbio->parent = orig;
1235 rbio->orig_bi_end_io = orig->bio.bi_end_io;
1236 rbio->parent_iter = iter;
1238 rbio->inode = k.k->p.inode;
1239 rbio->flags = flags;
1240 rbio->bounce = bounce;
1241 rbio->split = split;
1242 rbio->version = k.k->version;
1243 rbio->crc = pick->crc;
1245 * crc.compressed_size will be 0 if there wasn't any checksum
1246 * information, also we need to stash the original size of the bio if we
1247 * bounced (which isn't necessarily the original key size, if we bounced
1248 * only for promoting)
1250 rbio->crc._compressed_size = bio_sectors(&rbio->bio) - 1;
1251 rbio->ptr = pick->ptr;
1252 rbio->ca = pick->ca;
1253 rbio->promote = promote_op;
1255 rbio->bio.bi_bdev = pick->ca->disk_sb.bdev;
1256 rbio->bio.bi_opf = orig->bio.bi_opf;
1257 rbio->bio.bi_iter.bi_sector = pick->ptr.offset;
1258 rbio->bio.bi_end_io = bch_read_endio;
1261 struct bio *promote_bio = &promote_op->write.wbio.bio;
1263 promote_bio->bi_iter = rbio->bio.bi_iter;
1264 memcpy(promote_bio->bi_io_vec, rbio->bio.bi_io_vec,
1265 sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
1267 bch_migrate_write_init(c, &promote_op->write,
1268 &c->promote_write_point,
1270 BCH_WRITE_ALLOC_NOWAIT|
1272 promote_op->write.promote = true;
1274 if (rbio->crc.compression_type) {
1275 promote_op->write.op.flags |= BCH_WRITE_DATA_COMPRESSED;
1276 promote_op->write.op.crc = rbio->crc;
1277 promote_op->write.op.size = k.k->size;
1278 } else if (read_full) {
1280 * Adjust bio to correspond to _live_ portion of @k -
1281 * which might be less than what we're actually reading:
1283 bio_advance(promote_bio, rbio->crc.offset << 9);
1284 BUG_ON(bio_sectors(promote_bio) < k.k->size);
1285 promote_bio->bi_iter.bi_size = k.k->size << 9;
1288 * Set insert pos to correspond to what we're actually
1291 promote_op->write.op.pos.offset = iter.bi_sector;
1294 promote_bio->bi_iter.bi_sector =
1295 promote_op->write.op.pos.offset;
1298 /* _after_ promete stuff has looked at rbio->crc.offset */
1300 rbio->crc.offset += skip;
1302 rbio->bio.bi_iter.bi_sector += skip;
1304 rbio->submit_time_us = local_clock_us();
1306 #ifndef CONFIG_BCACHE_NO_IO
1307 generic_make_request(&rbio->bio);
1309 bio_endio(&rbio->bio);
1313 static void bch_read_iter(struct cache_set *c, struct bch_read_bio *rbio,
1314 struct bvec_iter bvec_iter, u64 inode,
1317 struct bio *bio = &rbio->bio;
1318 struct btree_iter iter;
1322 for_each_btree_key_with_holes(&iter, c, BTREE_ID_EXTENTS,
1323 POS(inode, bvec_iter.bi_sector), k) {
1325 struct extent_pick_ptr pick;
1326 unsigned bytes, sectors;
1330 * Unlock the iterator while the btree node's lock is still in
1331 * cache, before doing the IO:
1333 bkey_reassemble(&tmp.k, k);
1334 k = bkey_i_to_s_c(&tmp.k);
1335 bch_btree_iter_unlock(&iter);
1337 bch_extent_pick_ptr(c, k, &pick);
1338 if (IS_ERR(pick.ca)) {
1339 bcache_io_error(c, bio, "no device to read from");
1344 sectors = min_t(u64, k.k->p.offset,
1345 bvec_iter_end_sector(bvec_iter)) -
1346 bvec_iter.bi_sector;
1347 bytes = sectors << 9;
1348 is_last = bytes == bvec_iter.bi_size;
1349 swap(bvec_iter.bi_size, bytes);
1352 flags |= BCH_READ_IS_LAST;
1355 PTR_BUCKET(pick.ca, &pick.ptr)->read_prio =
1356 c->prio_clock[READ].hand;
1358 bch_read_extent_iter(c, rbio, bvec_iter,
1361 flags &= ~BCH_READ_MAY_REUSE_BIO;
1363 zero_fill_bio_iter(bio, bvec_iter);
1372 swap(bvec_iter.bi_size, bytes);
1373 bio_advance_iter(bio, &bvec_iter, bytes);
1377 * If we get here, it better have been because there was an error
1378 * reading a btree node
1380 ret = bch_btree_iter_unlock(&iter);
1382 bcache_io_error(c, bio, "btree IO error %i", ret);
1386 void bch_read(struct cache_set *c, struct bch_read_bio *bio, u64 inode)
1388 bch_increment_clock(c, bio_sectors(&bio->bio), READ);
1390 bch_read_iter(c, bio, bio->bio.bi_iter, inode,
1391 BCH_READ_FORCE_BOUNCE|
1392 BCH_READ_RETRY_IF_STALE|
1394 BCH_READ_MAY_REUSE_BIO);
1396 EXPORT_SYMBOL(bch_read);
1399 * bch_read_retry - re-submit a bio originally from bch_read()
1401 static void bch_read_retry(struct cache_set *c, struct bch_read_bio *rbio)
1403 struct bch_read_bio *parent = bch_rbio_parent(rbio);
1404 struct bvec_iter iter = rbio->parent_iter;
1405 u64 inode = rbio->inode;
1407 trace_bcache_read_retry(&rbio->bio);
1410 bch_rbio_free(c, rbio);
1412 rbio->bio.bi_end_io = rbio->orig_bi_end_io;
1414 bch_read_iter(c, parent, iter, inode,
1415 BCH_READ_FORCE_BOUNCE|
1416 BCH_READ_RETRY_IF_STALE|
1420 void bch_read_retry_work(struct work_struct *work)
1422 struct cache_set *c = container_of(work, struct cache_set,
1424 struct bch_read_bio *rbio;
1426 unsigned long flags;
1429 spin_lock_irqsave(&c->read_retry_lock, flags);
1430 bio = bio_list_pop(&c->read_retry_list);
1431 spin_unlock_irqrestore(&c->read_retry_lock, flags);
1436 rbio = container_of(bio, struct bch_read_bio, bio);
1437 bch_read_retry(c, rbio);