4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public Licens
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
23 #include <linux/mempool.h>
24 #include <linux/bug.h>
25 #include <linux/err.h>
27 #include <linux/blkdev.h>
28 #include <linux/blk_types.h>
29 #include <linux/workqueue.h>
31 #define bio_prio(bio) (bio)->bi_ioprio
32 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
34 #define bio_iter_iovec(bio, iter) \
35 bvec_iter_bvec((bio)->bi_io_vec, (iter))
37 #define bio_iter_page(bio, iter) \
38 bvec_iter_page((bio)->bi_io_vec, (iter))
39 #define bio_iter_len(bio, iter) \
40 bvec_iter_len((bio)->bi_io_vec, (iter))
41 #define bio_iter_offset(bio, iter) \
42 bvec_iter_offset((bio)->bi_io_vec, (iter))
44 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
45 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
46 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
48 #define bio_multiple_segments(bio) \
49 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
51 #define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
52 #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
54 #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
55 #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
57 static inline bool bio_has_data(struct bio *bio)
60 bio->bi_iter.bi_size &&
61 bio_op(bio) != REQ_OP_DISCARD &&
62 bio_op(bio) != REQ_OP_SECURE_ERASE)
68 static inline bool bio_no_advance_iter(struct bio *bio)
70 return bio_op(bio) == REQ_OP_DISCARD ||
71 bio_op(bio) == REQ_OP_SECURE_ERASE ||
72 bio_op(bio) == REQ_OP_WRITE_SAME;
75 static inline bool bio_is_rw(struct bio *bio)
77 if (!bio_has_data(bio))
80 if (bio_no_advance_iter(bio))
86 static inline bool bio_mergeable(struct bio *bio)
88 if (bio->bi_opf & REQ_NOMERGE_FLAGS)
94 static inline unsigned int bio_cur_bytes(struct bio *bio)
96 if (bio_has_data(bio))
97 return bio_iovec(bio).bv_len;
98 else /* dataless requests such as discard */
99 return bio->bi_iter.bi_size;
102 static inline void *bio_data(struct bio *bio)
104 if (bio_has_data(bio))
105 return page_address(bio_page(bio)) + bio_offset(bio);
110 #define __bio_kmap_atomic(bio, iter) \
111 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
112 bio_iter_iovec((bio), (iter)).bv_offset)
114 #define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
116 static inline struct bio_vec bio_iter_all_peek(const struct bio *bio,
117 struct bvec_iter_all *iter)
119 if (WARN_ON(iter->idx >= bio->bi_vcnt))
120 return (struct bio_vec) { NULL };
122 return bvec_iter_all_peek(bio->bi_io_vec, iter);
125 static inline void bio_iter_all_advance(const struct bio *bio,
126 struct bvec_iter_all *iter,
129 bvec_iter_all_advance(bio->bi_io_vec, iter, bytes);
131 WARN_ON(iter->idx > bio->bi_vcnt ||
132 (iter->idx == bio->bi_vcnt && iter->done));
135 #define bio_for_each_segment_all_continue(bvl, bio, iter) \
137 iter.idx < bio->bi_vcnt && \
138 ((bvl = bio_iter_all_peek(bio, &iter)), true); \
139 bio_iter_all_advance((bio), &iter, bvl.bv_len))
142 * drivers should _never_ use the all version - the bio may have been split
143 * before it got to the driver and the driver won't own all of it
145 #define bio_for_each_segment_all(bvl, bio, iter) \
146 for (bvec_iter_all_init(&iter); \
147 iter.idx < (bio)->bi_vcnt && \
148 ((bvl = bio_iter_all_peek((bio), &iter)), true); \
149 bio_iter_all_advance((bio), &iter, bvl.bv_len))
151 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
154 iter->bi_sector += bytes >> 9;
156 if (bio_no_advance_iter(bio))
157 iter->bi_size -= bytes;
159 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
162 #define __bio_for_each_segment(bvl, bio, iter, start) \
163 for (iter = (start); \
165 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
166 bio_advance_iter((bio), &(iter), (bvl).bv_len))
168 #define bio_for_each_segment(bvl, bio, iter) \
169 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
171 #define __bio_for_each_bvec(bvl, bio, iter, start) \
172 __bio_for_each_segment(bvl, bio, iter, start)
174 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
176 static inline unsigned bio_segments(struct bio *bio)
180 struct bvec_iter iter;
183 * We special case discard/write same, because they interpret bi_size
187 if (bio_op(bio) == REQ_OP_DISCARD)
190 if (bio_op(bio) == REQ_OP_SECURE_ERASE)
193 if (bio_op(bio) == REQ_OP_WRITE_SAME)
196 bio_for_each_segment(bv, bio, iter)
202 static inline void bio_get(struct bio *bio)
204 bio->bi_flags |= (1 << BIO_REFFED);
205 smp_mb__before_atomic();
206 atomic_inc(&bio->__bi_cnt);
209 static inline bool bio_flagged(struct bio *bio, unsigned int bit)
211 return (bio->bi_flags & (1U << bit)) != 0;
214 static inline void bio_set_flag(struct bio *bio, unsigned int bit)
216 bio->bi_flags |= (1U << bit);
219 static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
221 bio->bi_flags &= ~(1U << bit);
224 extern struct bio *bio_split(struct bio *bio, int sectors,
225 gfp_t gfp, struct bio_set *bs);
227 static inline struct bio *bio_next_split(struct bio *bio, int sectors,
228 gfp_t gfp, struct bio_set *bs)
230 if (sectors >= bio_sectors(bio))
233 return bio_split(bio, sectors, gfp, bs);
237 unsigned int front_pad;
238 unsigned int back_pad;
244 static inline void bioset_free(struct bio_set *bs)
249 void bioset_exit(struct bio_set *);
250 int bioset_init(struct bio_set *, unsigned, unsigned, int);
252 extern struct bio_set *bioset_create(unsigned int, unsigned int);
253 extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
255 BIOSET_NEED_BVECS = 1 << 0,
256 BIOSET_NEED_RESCUER = 1 << 1,
259 struct bio *bio_alloc_bioset(struct block_device *, unsigned,
260 unsigned, gfp_t, struct bio_set *);
261 extern void bio_put(struct bio *);
263 int bio_add_page(struct bio *, struct page *, unsigned, unsigned);
265 struct bio *bio_alloc_clone(struct block_device *, struct bio *,
266 gfp_t, struct bio_set *);
268 struct bio *bio_kmalloc(unsigned int, gfp_t);
270 extern void bio_endio(struct bio *);
272 extern void bio_advance(struct bio *, unsigned);
274 extern void bio_reset(struct bio *, struct block_device *, unsigned);
275 void bio_chain(struct bio *, struct bio *);
277 extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
278 struct bio *src, struct bvec_iter *src_iter);
279 extern void bio_copy_data(struct bio *dst, struct bio *src);
281 void bio_free_pages(struct bio *bio);
283 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
285 static inline void zero_fill_bio(struct bio *bio)
287 zero_fill_bio_iter(bio, bio->bi_iter);
290 #define bio_set_dev(bio, bdev) \
292 (bio)->bi_bdev = (bdev); \
295 #define bio_copy_dev(dst, src) \
297 (dst)->bi_bdev = (src)->bi_bdev; \
300 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
302 return page_address(bvec->bv_page) + bvec->bv_offset;
305 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
310 static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
311 unsigned long *flags)
313 return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
315 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
317 #define bio_kmap_irq(bio, flags) \
318 __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
319 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
326 static inline int bio_list_empty(const struct bio_list *bl)
328 return bl->head == NULL;
331 static inline void bio_list_init(struct bio_list *bl)
333 bl->head = bl->tail = NULL;
336 #define BIO_EMPTY_LIST { NULL, NULL }
338 #define bio_list_for_each(bio, bl) \
339 for (bio = (bl)->head; bio; bio = bio->bi_next)
341 static inline unsigned bio_list_size(const struct bio_list *bl)
346 bio_list_for_each(bio, bl)
352 static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
357 bl->tail->bi_next = bio;
364 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
366 bio->bi_next = bl->head;
374 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
380 bl->tail->bi_next = bl2->head;
382 bl->head = bl2->head;
384 bl->tail = bl2->tail;
387 static inline void bio_list_merge_head(struct bio_list *bl,
388 struct bio_list *bl2)
394 bl2->tail->bi_next = bl->head;
396 bl->tail = bl2->tail;
398 bl->head = bl2->head;
401 static inline struct bio *bio_list_peek(struct bio_list *bl)
406 static inline struct bio *bio_list_pop(struct bio_list *bl)
408 struct bio *bio = bl->head;
411 bl->head = bl->head->bi_next;
421 static inline struct bio *bio_list_get(struct bio_list *bl)
423 struct bio *bio = bl->head;
425 bl->head = bl->tail = NULL;
431 * Increment chain count for the bio. Make sure the CHAIN flag update
432 * is visible before the raised count.
434 static inline void bio_inc_remaining(struct bio *bio)
436 bio_set_flag(bio, BIO_CHAIN);
437 smp_mb__before_atomic();
438 atomic_inc(&bio->__bi_remaining);
441 static inline void bio_init(struct bio *bio,
442 struct block_device *bdev,
443 struct bio_vec *table,
444 unsigned short max_vecs,
447 memset(bio, 0, sizeof(*bio));
450 atomic_set(&bio->__bi_remaining, 1);
451 atomic_set(&bio->__bi_cnt, 1);
453 bio->bi_io_vec = table;
454 bio->bi_max_vecs = max_vecs;
457 #endif /* __LINUX_BIO_H */