2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
18 #include <linux/bio.h>
19 #include <linux/blkdev.h>
20 #include <linux/slab.h>
21 #include <linux/kernel.h>
22 #include <linux/export.h>
24 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
25 struct bio *src, struct bvec_iter *src_iter)
27 struct bio_vec src_bv, dst_bv;
31 while (src_iter->bi_size && dst_iter->bi_size) {
32 src_bv = bio_iter_iovec(src, *src_iter);
33 dst_bv = bio_iter_iovec(dst, *dst_iter);
35 bytes = min(src_bv.bv_len, dst_bv.bv_len);
37 src_p = kmap_atomic(src_bv.bv_page);
38 dst_p = kmap_atomic(dst_bv.bv_page);
40 memcpy(dst_p + dst_bv.bv_offset,
41 src_p + src_bv.bv_offset,
47 flush_dcache_page(dst_bv.bv_page);
49 bio_advance_iter(src, src_iter, bytes);
50 bio_advance_iter(dst, dst_iter, bytes);
55 * bio_copy_data - copy contents of data buffers from one bio to another
57 * @dst: destination bio
59 * Stops when it reaches the end of either @src or @dst - that is, copies
60 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
62 void bio_copy_data(struct bio *dst, struct bio *src)
64 struct bvec_iter src_iter = src->bi_iter;
65 struct bvec_iter dst_iter = dst->bi_iter;
67 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
70 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
74 struct bvec_iter iter;
76 __bio_for_each_segment(bv, bio, iter, start) {
77 char *data = bvec_kmap_irq(&bv, &flags);
78 memset(data, 0, bv.bv_len);
79 bvec_kunmap_irq(data, &flags);
83 void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
86 * most users will be overriding ->bi_bdev with a new target,
87 * so we don't set nor calculate new physical/hw segment counts here
89 bio->bi_bdev = bio_src->bi_bdev;
90 bio_set_flag(bio, BIO_CLONED);
91 bio->bi_opf = bio_src->bi_opf;
92 bio->bi_iter = bio_src->bi_iter;
93 bio->bi_io_vec = bio_src->bi_io_vec;
96 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
100 b = bio_alloc_bioset(gfp_mask, 0, bs);
104 __bio_clone_fast(b, bio);
108 struct bio *bio_split(struct bio *bio, int sectors,
109 gfp_t gfp, struct bio_set *bs)
111 struct bio *split = NULL;
113 BUG_ON(sectors <= 0);
114 BUG_ON(sectors >= bio_sectors(bio));
117 * Discards need a mutable bio_vec to accommodate the payload
118 * required by the DSM TRIM and UNMAP commands.
120 if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
121 split = bio_clone_bioset(bio, gfp, bs);
123 split = bio_clone_fast(bio, gfp, bs);
128 split->bi_iter.bi_size = sectors << 9;
130 bio_advance(bio, split->bi_iter.bi_size);
135 int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
140 bio_for_each_segment_all(bv, bio, i) {
141 bv->bv_page = alloc_page(gfp_mask);
143 while (--bv >= bio->bi_io_vec)
144 __free_page(bv->bv_page);
152 void bio_advance(struct bio *bio, unsigned bytes)
154 bio_advance_iter(bio, &bio->bi_iter, bytes);
157 static void bio_free(struct bio *bio)
159 unsigned front_pad = bio->bi_pool ? bio->bi_pool->front_pad : 0;
161 kfree((void *) bio - front_pad);
164 void bio_put(struct bio *bio)
166 if (!bio_flagged(bio, BIO_REFFED))
169 BUG_ON(!atomic_read(&bio->__bi_cnt));
174 if (atomic_dec_and_test(&bio->__bi_cnt))
179 static inline bool bio_remaining_done(struct bio *bio)
182 * If we're not chaining, then ->__bi_remaining is always 1 and
183 * we always end io on the first invocation.
185 if (!bio_flagged(bio, BIO_CHAIN))
188 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
190 if (atomic_dec_and_test(&bio->__bi_remaining)) {
191 bio_clear_flag(bio, BIO_CHAIN);
198 static struct bio *__bio_chain_endio(struct bio *bio)
200 struct bio *parent = bio->bi_private;
202 if (!parent->bi_error)
203 parent->bi_error = bio->bi_error;
208 static void bio_chain_endio(struct bio *bio)
210 bio_endio(__bio_chain_endio(bio));
213 void bio_endio(struct bio *bio)
216 if (!bio_remaining_done(bio))
220 * Need to have a real endio function for chained bios, otherwise
221 * various corner cases will break (like stacking block devices that
222 * save/restore bi_end_io) - however, we want to avoid unbounded
223 * recursion and blowing the stack. Tail call optimization would
224 * handle this, but compiling with frame pointers also disables
225 * gcc's sibling call optimization.
227 if (bio->bi_end_io == bio_chain_endio) {
228 bio = __bio_chain_endio(bio);
236 void bio_endio_nodec(struct bio *bio)
241 if (unlikely(!bio_remaining_done(bio)))
244 if (bio->bi_end_io == bio_chain_endio) {
245 struct bio *parent = bio->bi_private;
246 parent->bi_error = bio->bi_error;
257 void bio_reset(struct bio *bio)
259 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
261 memset(bio, 0, BIO_RESET_BYTES);
262 bio->bi_flags = flags;
263 atomic_set(&bio->__bi_remaining, 1);
266 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
268 unsigned front_pad = bs ? bs->front_pad : 0;
272 p = kmalloc(front_pad +
274 nr_iovecs * sizeof(struct bio_vec),
281 bio_init(bio, bio->bi_inline_vecs, nr_iovecs);
287 struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
290 struct bvec_iter iter;
294 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
298 bio->bi_bdev = bio_src->bi_bdev;
299 bio->bi_opf = bio_src->bi_opf;
300 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
301 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
303 switch (bio_op(bio)) {
305 case REQ_OP_SECURE_ERASE:
307 case REQ_OP_WRITE_SAME:
308 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
311 bio_for_each_segment(bv, bio_src, iter)
312 bio->bi_io_vec[bio->bi_vcnt++] = bv;