}
}
-void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
+static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
{
- /*
- * most users will be overriding ->bi_bdev with a new target,
- * so we don't set nor calculate new physical/hw segment counts here
- */
- bio->bi_bdev = bio_src->bi_bdev;
bio_set_flag(bio, BIO_CLONED);
- bio->bi_opf = bio_src->bi_opf;
+ bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_iter = bio_src->bi_iter;
- bio->bi_io_vec = bio_src->bi_io_vec;
+ return 0;
}
-struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
+struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
+ gfp_t gfp, struct bio_set *bs)
{
- struct bio *b;
+ struct bio *bio;
+
+ bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
+ if (!bio)
+ return NULL;
- b = bio_alloc_bioset(gfp_mask, 0, bs);
- if (!b)
+ if (__bio_clone(bio, bio_src, gfp) < 0) {
+ bio_put(bio);
return NULL;
+ }
+ bio->bi_io_vec = bio_src->bi_io_vec;
- __bio_clone_fast(b, bio);
- return b;
+ return bio;
}
struct bio *bio_split(struct bio *bio, int sectors,
BUG_ON(sectors <= 0);
BUG_ON(sectors >= bio_sectors(bio));
- /*
- * Discards need a mutable bio_vec to accommodate the payload
- * required by the DSM TRIM and UNMAP commands.
- */
- if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
- split = bio_clone_bioset(bio, gfp, bs);
- else
- split = bio_clone_fast(bio, gfp, bs);
-
+ split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
if (!split)
return NULL;
static void bio_free(struct bio *bio)
{
- unsigned front_pad = bio->bi_pool ? bio->bi_pool->front_pad : 0;
+ struct bio_set *bs = bio->bi_pool;
+
+ if (bs) {
+ if (bio->bi_max_vecs > BIO_INLINE_VECS)
+ mempool_free(bio->bi_io_vec, &bs->bvec_pool);
- kfree((void *) bio - front_pad);
+ mempool_free((void *) bio - bs->front_pad, &bs->bio_pool);
+ } else {
+ kfree(bio);
+ }
}
void bio_put(struct bio *bio)
bio->bi_end_io(bio);
}
-void bio_reset(struct bio *bio)
+void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf)
{
unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
memset(bio, 0, BIO_RESET_BYTES);
- bio->bi_flags = flags;
+ bio->bi_bdev = bdev;
+ bio->bi_opf = opf;
+ bio->bi_flags = flags;
atomic_set(&bio->__bi_remaining, 1);
}
-struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
+struct bio *bio_kmalloc(unsigned int nr_iovecs, gfp_t gfp_mask)
{
- unsigned front_pad = bs ? bs->front_pad : 0;
struct bio *bio;
- void *p;
-
- p = kmalloc(front_pad +
- sizeof(struct bio) +
- nr_iovecs * sizeof(struct bio_vec),
- gfp_mask);
- if (unlikely(!p))
+ bio = kmalloc(sizeof(struct bio) +
+ sizeof(struct bio_vec) * nr_iovecs, gfp_mask);
+ if (unlikely(!bio))
return NULL;
+ bio_init(bio, NULL, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs, 0);
+ bio->bi_pool = NULL;
+ return bio;
+}
- bio = p + front_pad;
- bio_init(bio, bio->bi_inline_vecs, nr_iovecs);
- bio->bi_pool = bs;
+static struct bio_vec *bvec_alloc(mempool_t *pool, int *nr_vecs,
+ gfp_t gfp_mask)
+{
+ *nr_vecs = roundup_pow_of_two(*nr_vecs);
+ /*
+ * Try a slab allocation first for all smaller allocations. If that
+ * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
+ * The mempool is sized to handle up to BIO_MAX_VECS entries.
+ */
+ if (*nr_vecs < BIO_MAX_VECS) {
+ struct bio_vec *bvl;
- return bio;
+ bvl = kmalloc(sizeof(*bvl) * *nr_vecs, gfp_mask);
+ if (likely(bvl))
+ return bvl;
+ *nr_vecs = BIO_MAX_VECS;
+ }
+
+ return mempool_alloc(pool, gfp_mask);
}
-struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
+struct bio *bio_alloc_bioset(struct block_device *bdev,
+ unsigned nr_iovecs,
+ unsigned opf,
+ gfp_t gfp_mask,
struct bio_set *bs)
{
- struct bvec_iter iter;
- struct bio_vec bv;
struct bio *bio;
+ void *p;
- bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
- if (!bio)
+ if (nr_iovecs > BIO_MAX_VECS)
+ return NULL;
+
+ p = mempool_alloc(&bs->bio_pool, gfp_mask);
+ if (unlikely(!p))
return NULL;
- bio->bi_bdev = bio_src->bi_bdev;
- bio->bi_opf = bio_src->bi_opf;
- bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
- bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
-
- switch (bio_op(bio)) {
- case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
- break;
- case REQ_OP_WRITE_SAME:
- bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
- break;
- default:
- bio_for_each_segment(bv, bio_src, iter)
- bio->bi_io_vec[bio->bi_vcnt++] = bv;
- break;
+ bio = p + bs->front_pad;
+ if (nr_iovecs > BIO_INLINE_VECS) {
+ struct bio_vec *bvl = NULL;
+
+ bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
+ if (unlikely(!bvl))
+ goto err_free;
+
+ bio_init(bio, bdev, bvl, nr_iovecs, opf);
+ } else if (nr_iovecs) {
+ bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
+ } else {
+ bio_init(bio, bdev, NULL, 0, opf);
}
+ bio->bi_pool = bs;
return bio;
+
+err_free:
+ mempool_free(p, &bs->bio_pool);
+ return NULL;
+}
+
+void bioset_exit(struct bio_set *bs)
+{
+ mempool_exit(&bs->bio_pool);
+ mempool_exit(&bs->bvec_pool);
+}
+
+int bioset_init(struct bio_set *bs,
+ unsigned int pool_size,
+ unsigned int front_pad,
+ int flags)
+{
+ int ret;
+
+ bs->front_pad = front_pad;
+ if (flags & BIOSET_NEED_BVECS)
+ bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
+ else
+ bs->back_pad = 0;
+
+ ret = mempool_init_kmalloc_pool(&bs->bio_pool, pool_size, bs->front_pad +
+ sizeof(struct bio) + bs->back_pad) ?:
+ mempool_init_kmalloc_pool(&bs->bvec_pool, pool_size,
+ sizeof(struct bio_vec) * BIO_MAX_VECS);
+ if (ret)
+ bioset_exit(bs);
+ return ret;
}