2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
18 #include <linux/bio.h>
19 #include <linux/blkdev.h>
20 #include <linux/slab.h>
21 #include <linux/kernel.h>
27 [BLK_STS_OK] = { 0, "" },
28 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
29 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
30 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
31 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
32 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
33 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
34 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
35 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
36 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
37 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
39 /* device mapper special case, should not leak out: */
40 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
42 /* everything else not covered above: */
43 [BLK_STS_IOERR] = { -EIO, "I/O" },
46 int blk_status_to_errno(blk_status_t status)
48 int idx = (__force int)status;
50 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
52 return blk_errors[idx].err;
55 const char *blk_status_to_str(blk_status_t status)
57 int idx = (__force int)status;
59 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
60 return "(invalid error)";
61 return blk_errors[idx].name;
64 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
65 struct bio *src, struct bvec_iter *src_iter)
67 struct bio_vec src_bv, dst_bv;
71 while (src_iter->bi_size && dst_iter->bi_size) {
72 src_bv = bio_iter_iovec(src, *src_iter);
73 dst_bv = bio_iter_iovec(dst, *dst_iter);
75 bytes = min(src_bv.bv_len, dst_bv.bv_len);
77 src_p = kmap_atomic(src_bv.bv_page);
78 dst_p = kmap_atomic(dst_bv.bv_page);
80 memcpy(dst_p + dst_bv.bv_offset,
81 src_p + src_bv.bv_offset,
87 flush_dcache_page(dst_bv.bv_page);
89 bio_advance_iter(src, src_iter, bytes);
90 bio_advance_iter(dst, dst_iter, bytes);
95 * bio_copy_data - copy contents of data buffers from one bio to another
97 * @dst: destination bio
99 * Stops when it reaches the end of either @src or @dst - that is, copies
100 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
102 void bio_copy_data(struct bio *dst, struct bio *src)
104 struct bvec_iter src_iter = src->bi_iter;
105 struct bvec_iter dst_iter = dst->bi_iter;
107 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
110 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
114 struct bvec_iter iter;
116 __bio_for_each_segment(bv, bio, iter, start) {
117 char *data = bvec_kmap_irq(&bv, &flags);
118 memset(data, 0, bv.bv_len);
119 bvec_kunmap_irq(data, &flags);
123 static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
125 bio_set_flag(bio, BIO_CLONED);
126 bio->bi_ioprio = bio_src->bi_ioprio;
127 bio->bi_iter = bio_src->bi_iter;
131 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
132 gfp_t gfp, struct bio_set *bs)
136 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
140 if (__bio_clone(bio, bio_src, gfp) < 0) {
144 bio->bi_io_vec = bio_src->bi_io_vec;
149 struct bio *bio_split(struct bio *bio, int sectors,
150 gfp_t gfp, struct bio_set *bs)
152 struct bio *split = NULL;
154 BUG_ON(sectors <= 0);
155 BUG_ON(sectors >= bio_sectors(bio));
157 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
161 split->bi_iter.bi_size = sectors << 9;
163 bio_advance(bio, split->bi_iter.bi_size);
168 void bio_free_pages(struct bio *bio)
170 struct bvec_iter_all iter;
171 struct bio_vec *bvec;
173 bio_for_each_segment_all(bvec, bio, iter)
174 __free_page(bvec->bv_page);
177 void bio_advance(struct bio *bio, unsigned bytes)
179 bio_advance_iter(bio, &bio->bi_iter, bytes);
182 static void bio_free(struct bio *bio)
184 struct bio_set *bs = bio->bi_pool;
187 if (bio->bi_max_vecs > BIO_INLINE_VECS)
188 mempool_free(bio->bi_io_vec, &bs->bvec_pool);
190 mempool_free((void *) bio - bs->front_pad, &bs->bio_pool);
196 void bio_put(struct bio *bio)
198 if (!bio_flagged(bio, BIO_REFFED))
201 BUG_ON(!atomic_read(&bio->__bi_cnt));
206 if (atomic_dec_and_test(&bio->__bi_cnt))
211 int bio_add_page(struct bio *bio, struct page *page,
212 unsigned int len, unsigned int off)
214 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
216 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
217 WARN_ON_ONCE(bio->bi_vcnt >= bio->bi_max_vecs);
223 bio->bi_iter.bi_size += len;
228 static inline bool bio_remaining_done(struct bio *bio)
231 * If we're not chaining, then ->__bi_remaining is always 1 and
232 * we always end io on the first invocation.
234 if (!bio_flagged(bio, BIO_CHAIN))
237 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
239 if (atomic_dec_and_test(&bio->__bi_remaining)) {
240 bio_clear_flag(bio, BIO_CHAIN);
247 static struct bio *__bio_chain_endio(struct bio *bio)
249 struct bio *parent = bio->bi_private;
251 if (!parent->bi_status)
252 parent->bi_status = bio->bi_status;
257 static void bio_chain_endio(struct bio *bio)
259 bio_endio(__bio_chain_endio(bio));
262 void bio_endio(struct bio *bio)
265 if (!bio_remaining_done(bio))
269 * Need to have a real endio function for chained bios, otherwise
270 * various corner cases will break (like stacking block devices that
271 * save/restore bi_end_io) - however, we want to avoid unbounded
272 * recursion and blowing the stack. Tail call optimization would
273 * handle this, but compiling with frame pointers also disables
274 * gcc's sibling call optimization.
276 if (bio->bi_end_io == bio_chain_endio) {
277 bio = __bio_chain_endio(bio);
285 void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf)
287 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
289 memset(bio, 0, BIO_RESET_BYTES);
292 bio->bi_flags = flags;
293 atomic_set(&bio->__bi_remaining, 1);
296 struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
300 bio = kmalloc(sizeof(struct bio) +
301 sizeof(struct bio_vec) * nr_iovecs, gfp_mask);
304 bio_init(bio, NULL, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs, 0);
309 static struct bio_vec *bvec_alloc(mempool_t *pool, int *nr_vecs,
312 *nr_vecs = roundup_pow_of_two(*nr_vecs);
314 * Try a slab allocation first for all smaller allocations. If that
315 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
316 * The mempool is sized to handle up to BIO_MAX_VECS entries.
318 if (*nr_vecs < BIO_MAX_VECS) {
321 bvl = kmalloc(sizeof(*bvl) * *nr_vecs, gfp_mask);
324 *nr_vecs = BIO_MAX_VECS;
327 return mempool_alloc(pool, gfp_mask);
330 struct bio *bio_alloc_bioset(struct block_device *bdev,
339 if (nr_iovecs > BIO_MAX_VECS)
342 p = mempool_alloc(&bs->bio_pool, gfp_mask);
346 bio = p + bs->front_pad;
347 if (nr_iovecs > BIO_INLINE_VECS) {
348 struct bio_vec *bvl = NULL;
350 bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
354 bio_init(bio, bdev, bvl, nr_iovecs, opf);
355 } else if (nr_iovecs) {
356 bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
358 bio_init(bio, bdev, NULL, 0, opf);
365 mempool_free(p, &bs->bio_pool);
369 void bioset_exit(struct bio_set *bs)
371 mempool_exit(&bs->bio_pool);
372 mempool_exit(&bs->bvec_pool);
375 int bioset_init(struct bio_set *bs,
376 unsigned int pool_size,
377 unsigned int front_pad,
382 bs->front_pad = front_pad;
383 if (flags & BIOSET_NEED_BVECS)
384 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
388 ret = mempool_init_kmalloc_pool(&bs->bio_pool, pool_size, bs->front_pad +
389 sizeof(struct bio) + bs->back_pad) ?:
390 mempool_init_kmalloc_pool(&bs->bvec_pool, pool_size,
391 sizeof(struct bio_vec) * BIO_MAX_VECS);