2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
18 #include <linux/bio.h>
19 #include <linux/blkdev.h>
20 #include <linux/slab.h>
21 #include <linux/kernel.h>
22 #include <linux/export.h>
24 void bio_copy_data_iter(struct bio *dst, struct bvec_iter dst_iter,
25 struct bio *src, struct bvec_iter src_iter)
27 struct bio_vec src_bv, dst_bv;
32 if (!src_iter.bi_size) {
37 src_iter = src->bi_iter;
40 if (!dst_iter.bi_size) {
45 dst_iter = dst->bi_iter;
48 src_bv = bio_iter_iovec(src, src_iter);
49 dst_bv = bio_iter_iovec(dst, dst_iter);
51 bytes = min(src_bv.bv_len, dst_bv.bv_len);
53 src_p = kmap_atomic(src_bv.bv_page);
54 dst_p = kmap_atomic(dst_bv.bv_page);
56 memcpy(dst_p + dst_bv.bv_offset,
57 src_p + src_bv.bv_offset,
63 bio_advance_iter(src, &src_iter, bytes);
64 bio_advance_iter(dst, &dst_iter, bytes);
68 void bio_copy_data(struct bio *dst, struct bio *src)
70 bio_copy_data_iter(dst, dst->bi_iter,
74 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
78 struct bvec_iter iter;
80 __bio_for_each_segment(bv, bio, iter, start) {
81 char *data = bvec_kmap_irq(&bv, &flags);
82 memset(data, 0, bv.bv_len);
83 bvec_kunmap_irq(data, &flags);
87 void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
90 * most users will be overriding ->bi_bdev with a new target,
91 * so we don't set nor calculate new physical/hw segment counts here
93 bio->bi_bdev = bio_src->bi_bdev;
94 bio_set_flag(bio, BIO_CLONED);
95 bio->bi_opf = bio_src->bi_opf;
96 bio->bi_iter = bio_src->bi_iter;
97 bio->bi_io_vec = bio_src->bi_io_vec;
100 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
104 b = bio_alloc_bioset(gfp_mask, 0, bs);
108 __bio_clone_fast(b, bio);
112 struct bio *bio_split(struct bio *bio, int sectors,
113 gfp_t gfp, struct bio_set *bs)
115 struct bio *split = NULL;
117 BUG_ON(sectors <= 0);
118 BUG_ON(sectors >= bio_sectors(bio));
121 * Discards need a mutable bio_vec to accommodate the payload
122 * required by the DSM TRIM and UNMAP commands.
124 if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
125 split = bio_clone_bioset(bio, gfp, bs);
127 split = bio_clone_fast(bio, gfp, bs);
132 split->bi_iter.bi_size = sectors << 9;
134 bio_advance(bio, split->bi_iter.bi_size);
139 int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
144 bio_for_each_segment_all(bv, bio, i) {
145 bv->bv_page = alloc_page(gfp_mask);
147 while (--bv >= bio->bi_io_vec)
148 __free_page(bv->bv_page);
156 void bio_advance(struct bio *bio, unsigned bytes)
158 bio_advance_iter(bio, &bio->bi_iter, bytes);
161 static void bio_free(struct bio *bio)
163 unsigned front_pad = bio->bi_pool ? bio->bi_pool->front_pad : 0;
165 kfree((void *) bio - front_pad);
168 void bio_put(struct bio *bio)
170 if (!bio_flagged(bio, BIO_REFFED))
173 BUG_ON(!atomic_read(&bio->__bi_cnt));
178 if (atomic_dec_and_test(&bio->__bi_cnt))
183 static inline bool bio_remaining_done(struct bio *bio)
186 * If we're not chaining, then ->__bi_remaining is always 1 and
187 * we always end io on the first invocation.
189 if (!bio_flagged(bio, BIO_CHAIN))
192 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
194 if (atomic_dec_and_test(&bio->__bi_remaining)) {
195 bio_clear_flag(bio, BIO_CHAIN);
202 static struct bio *__bio_chain_endio(struct bio *bio)
204 struct bio *parent = bio->bi_private;
206 if (!parent->bi_error)
207 parent->bi_error = bio->bi_error;
212 static void bio_chain_endio(struct bio *bio)
214 bio_endio(__bio_chain_endio(bio));
217 void bio_endio(struct bio *bio)
220 if (!bio_remaining_done(bio))
224 * Need to have a real endio function for chained bios, otherwise
225 * various corner cases will break (like stacking block devices that
226 * save/restore bi_end_io) - however, we want to avoid unbounded
227 * recursion and blowing the stack. Tail call optimization would
228 * handle this, but compiling with frame pointers also disables
229 * gcc's sibling call optimization.
231 if (bio->bi_end_io == bio_chain_endio) {
232 bio = __bio_chain_endio(bio);
240 void bio_endio_nodec(struct bio *bio)
245 if (unlikely(!bio_remaining_done(bio)))
248 if (bio->bi_end_io == bio_chain_endio) {
249 struct bio *parent = bio->bi_private;
250 parent->bi_error = bio->bi_error;
261 void bio_reset(struct bio *bio)
263 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
265 memset(bio, 0, BIO_RESET_BYTES);
266 bio->bi_flags = flags;
267 atomic_set(&bio->__bi_remaining, 1);
270 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
272 unsigned front_pad = bs ? bs->front_pad : 0;
276 p = kmalloc(front_pad +
278 nr_iovecs * sizeof(struct bio_vec),
287 bio->bi_max_vecs = nr_iovecs;
288 bio->bi_io_vec = bio->bi_inline_vecs;
293 struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
296 struct bvec_iter iter;
300 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
304 bio->bi_bdev = bio_src->bi_bdev;
305 bio->bi_opf = bio_src->bi_opf;
306 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
307 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
309 switch (bio_op(bio)) {
311 case REQ_OP_SECURE_ERASE:
313 case REQ_OP_WRITE_SAME:
314 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
317 bio_for_each_segment(bv, bio_src, iter)
318 bio->bi_io_vec[bio->bi_vcnt++] = bv;