4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public Licens
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
23 #include <linux/mempool.h>
24 #include <linux/bug.h>
25 #include <linux/err.h>
27 #include <linux/blkdev.h>
28 #include <linux/blk_types.h>
29 #include <linux/workqueue.h>
31 #define bio_prio(bio) (bio)->bi_ioprio
32 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
34 #define bio_iter_iovec(bio, iter) \
35 bvec_iter_bvec((bio)->bi_io_vec, (iter))
37 #define bio_iter_page(bio, iter) \
38 bvec_iter_page((bio)->bi_io_vec, (iter))
39 #define bio_iter_len(bio, iter) \
40 bvec_iter_len((bio)->bi_io_vec, (iter))
41 #define bio_iter_offset(bio, iter) \
42 bvec_iter_offset((bio)->bi_io_vec, (iter))
44 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
45 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
46 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
48 #define bio_multiple_segments(bio) \
49 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
51 #define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
52 #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
54 #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
55 #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
57 static inline bool bio_has_data(struct bio *bio)
60 bio->bi_iter.bi_size &&
61 bio_op(bio) != REQ_OP_DISCARD &&
62 bio_op(bio) != REQ_OP_SECURE_ERASE)
68 static inline bool bio_no_advance_iter(struct bio *bio)
70 return bio_op(bio) == REQ_OP_DISCARD ||
71 bio_op(bio) == REQ_OP_SECURE_ERASE ||
72 bio_op(bio) == REQ_OP_WRITE_SAME;
75 static inline bool bio_is_rw(struct bio *bio)
77 if (!bio_has_data(bio))
80 if (bio_no_advance_iter(bio))
86 static inline bool bio_mergeable(struct bio *bio)
88 if (bio->bi_opf & REQ_NOMERGE_FLAGS)
94 static inline unsigned int bio_cur_bytes(struct bio *bio)
96 if (bio_has_data(bio))
97 return bio_iovec(bio).bv_len;
98 else /* dataless requests such as discard */
99 return bio->bi_iter.bi_size;
102 static inline void *bio_data(struct bio *bio)
104 if (bio_has_data(bio))
105 return page_address(bio_page(bio)) + bio_offset(bio);
110 #define __bio_kmap_atomic(bio, iter) \
111 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
112 bio_iter_iovec((bio), (iter)).bv_offset)
114 #define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
116 #define bio_for_each_segment_all(bvl, bio, i) \
117 for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
119 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
122 iter->bi_sector += bytes >> 9;
124 if (bio_no_advance_iter(bio))
125 iter->bi_size -= bytes;
127 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
130 #define __bio_for_each_segment(bvl, bio, iter, start) \
131 for (iter = (start); \
133 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
134 bio_advance_iter((bio), &(iter), (bvl).bv_len))
136 #define bio_for_each_segment(bvl, bio, iter) \
137 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
139 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
141 static inline unsigned bio_segments(struct bio *bio)
145 struct bvec_iter iter;
148 * We special case discard/write same, because they interpret bi_size
152 if (bio_op(bio) == REQ_OP_DISCARD)
155 if (bio_op(bio) == REQ_OP_SECURE_ERASE)
158 if (bio_op(bio) == REQ_OP_WRITE_SAME)
161 bio_for_each_segment(bv, bio, iter)
167 static inline void bio_get(struct bio *bio)
169 bio->bi_flags |= (1 << BIO_REFFED);
170 smp_mb__before_atomic();
171 atomic_inc(&bio->__bi_cnt);
174 static inline bool bio_flagged(struct bio *bio, unsigned int bit)
176 return (bio->bi_flags & (1U << bit)) != 0;
179 static inline void bio_set_flag(struct bio *bio, unsigned int bit)
181 bio->bi_flags |= (1U << bit);
184 static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
186 bio->bi_flags &= ~(1U << bit);
189 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
191 *bv = bio_iovec(bio);
194 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
196 struct bvec_iter iter = bio->bi_iter;
199 if (unlikely(!bio_multiple_segments(bio))) {
200 *bv = bio_iovec(bio);
204 bio_advance_iter(bio, &iter, iter.bi_size);
206 if (!iter.bi_bvec_done)
207 idx = iter.bi_idx - 1;
208 else /* in the middle of bvec */
211 *bv = bio->bi_io_vec[idx];
214 * iter.bi_bvec_done records actual length of the last bvec
215 * if this bio ends in the middle of one io vector
217 if (iter.bi_bvec_done)
218 bv->bv_len = iter.bi_bvec_done;
221 extern struct bio *bio_split(struct bio *bio, int sectors,
222 gfp_t gfp, struct bio_set *bs);
224 static inline struct bio *bio_next_split(struct bio *bio, int sectors,
225 gfp_t gfp, struct bio_set *bs)
227 if (sectors >= bio_sectors(bio))
230 return bio_split(bio, sectors, gfp, bs);
234 unsigned int front_pad;
237 static inline void bioset_exit(struct bio_set *bs) {}
239 static inline void bioset_free(struct bio_set *bs)
244 static inline int bioset_init(struct bio_set *bs,
248 bs->front_pad = front_pad;
252 extern struct bio_set *bioset_create(unsigned int, unsigned int);
253 extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
255 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
256 extern void bio_put(struct bio *);
258 extern void __bio_clone_fast(struct bio *, struct bio *);
259 extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
260 extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
262 static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
264 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
267 static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
269 return bio_clone_bioset(bio, gfp_mask, NULL);
273 extern void bio_endio(struct bio *);
274 extern void bio_endio_nodec(struct bio *);
276 static inline void bio_io_error(struct bio *bio)
278 bio->bi_error = -EIO;
282 extern void bio_advance(struct bio *, unsigned);
284 extern void bio_reset(struct bio *);
285 void bio_chain(struct bio *, struct bio *);
287 static inline void bio_flush_dcache_pages(struct bio *bi)
291 extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
292 struct bio *src, struct bvec_iter *src_iter);
293 extern void bio_copy_data(struct bio *dst, struct bio *src);
294 extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
296 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
298 static inline void zero_fill_bio(struct bio *bio)
300 zero_fill_bio_iter(bio, bio->bi_iter);
303 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
305 return page_address(bvec->bv_page) + bvec->bv_offset;
308 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
313 static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
314 unsigned long *flags)
316 return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
318 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
320 #define bio_kmap_irq(bio, flags) \
321 __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
322 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
329 static inline int bio_list_empty(const struct bio_list *bl)
331 return bl->head == NULL;
334 static inline void bio_list_init(struct bio_list *bl)
336 bl->head = bl->tail = NULL;
339 #define BIO_EMPTY_LIST { NULL, NULL }
341 #define bio_list_for_each(bio, bl) \
342 for (bio = (bl)->head; bio; bio = bio->bi_next)
344 static inline unsigned bio_list_size(const struct bio_list *bl)
349 bio_list_for_each(bio, bl)
355 static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
360 bl->tail->bi_next = bio;
367 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
369 bio->bi_next = bl->head;
377 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
383 bl->tail->bi_next = bl2->head;
385 bl->head = bl2->head;
387 bl->tail = bl2->tail;
390 static inline void bio_list_merge_head(struct bio_list *bl,
391 struct bio_list *bl2)
397 bl2->tail->bi_next = bl->head;
399 bl->tail = bl2->tail;
401 bl->head = bl2->head;
404 static inline struct bio *bio_list_peek(struct bio_list *bl)
409 static inline struct bio *bio_list_pop(struct bio_list *bl)
411 struct bio *bio = bl->head;
414 bl->head = bl->head->bi_next;
424 static inline struct bio *bio_list_get(struct bio_list *bl)
426 struct bio *bio = bl->head;
428 bl->head = bl->tail = NULL;
434 * Increment chain count for the bio. Make sure the CHAIN flag update
435 * is visible before the raised count.
437 static inline void bio_inc_remaining(struct bio *bio)
439 bio_set_flag(bio, BIO_CHAIN);
440 smp_mb__before_atomic();
441 atomic_inc(&bio->__bi_remaining);
444 static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
446 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
449 static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
451 return bio_clone_bioset(bio, gfp_mask, NULL);
454 static inline void bio_init(struct bio *bio)
456 memset(bio, 0, sizeof(*bio));
457 atomic_set(&bio->__bi_remaining, 1);
458 atomic_set(&bio->__bi_cnt, 1);
461 #endif /* __LINUX_BIO_H */