13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/completion.h>
17 #include <linux/kthread.h>
19 #include "tools-util.h"
21 static io_context_t aio_ctx;
23 void generic_make_request(struct bio *bio)
26 struct bvec_iter iter;
31 if (bio->bi_opf & REQ_PREFLUSH) {
32 ret = fdatasync(bio->bi_bdev->bd_fd);
34 fprintf(stderr, "fsync error: %m\n");
35 bio->bi_status = BLK_STS_IOERR;
42 bio_for_each_segment(bv, bio, iter)
45 iov = alloca(sizeof(*iov) * i);
48 bio_for_each_segment(bv, bio, iter)
49 iov[i++] = (struct iovec) {
50 .iov_base = page_address(bv.bv_page) + bv.bv_offset,
56 .aio_fildes = bio->bi_opf & REQ_FUA
57 ? bio->bi_bdev->bd_sync_fd
58 : bio->bi_bdev->bd_fd,
61 switch (bio_op(bio)) {
63 iocb.aio_lio_opcode = IO_CMD_PREADV;
66 iocb.u.v.offset = bio->bi_iter.bi_sector << 9;
68 ret = io_submit(aio_ctx, 1, &iocbp);
70 die("io_submit err: %s", strerror(-ret));
73 iocb.aio_lio_opcode = IO_CMD_PWRITEV;
76 iocb.u.v.offset = bio->bi_iter.bi_sector << 9;
78 ret = io_submit(aio_ctx, 1, &iocbp);
80 die("io_submit err: %s", strerror(-ret));
83 ret = fsync(bio->bi_bdev->bd_fd);
85 die("fsync error: %m");
93 static void submit_bio_wait_endio(struct bio *bio)
95 complete(bio->bi_private);
98 int submit_bio_wait(struct bio *bio)
100 struct completion done;
102 init_completion(&done);
103 bio->bi_private = &done;
104 bio->bi_end_io = submit_bio_wait_endio;
105 bio->bi_opf |= REQ_SYNC;
107 wait_for_completion(&done);
109 return blk_status_to_errno(bio->bi_status);
112 int blkdev_issue_discard(struct block_device *bdev,
113 sector_t sector, sector_t nr_sects,
114 gfp_t gfp_mask, unsigned long flags)
119 unsigned bdev_logical_block_size(struct block_device *bdev)
125 ret = fstat(bdev->bd_fd, &statbuf);
128 if (!S_ISBLK(statbuf.st_mode))
129 return statbuf.st_blksize >> 9;
131 ret = ioctl(bdev->bd_fd, BLKPBSZGET, &blksize);
137 sector_t get_capacity(struct gendisk *disk)
139 struct block_device *bdev =
140 container_of(disk, struct block_device, __bd_disk);
145 ret = fstat(bdev->bd_fd, &statbuf);
148 if (!S_ISBLK(statbuf.st_mode))
149 return statbuf.st_size >> 9;
151 ret = ioctl(bdev->bd_fd, BLKGETSIZE64, &bytes);
157 void blkdev_put(struct block_device *bdev, fmode_t mode)
159 fdatasync(bdev->bd_fd);
160 close(bdev->bd_sync_fd);
165 struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
168 struct block_device *bdev;
169 int fd, sync_fd, flags = O_DIRECT;
171 if ((mode & (FMODE_READ|FMODE_WRITE)) == (FMODE_READ|FMODE_WRITE))
173 else if (mode & FMODE_READ)
175 else if (mode & FMODE_WRITE)
179 /* using O_EXCL doesn't work with opening twice for an O_SYNC fd: */
180 if (mode & FMODE_EXCL)
184 fd = open(path, flags);
186 return ERR_PTR(-errno);
188 sync_fd = open(path, flags|O_SYNC);
192 return ERR_PTR(-errno);
195 bdev = malloc(sizeof(*bdev));
196 memset(bdev, 0, sizeof(*bdev));
198 strncpy(bdev->name, path, sizeof(bdev->name));
199 bdev->name[sizeof(bdev->name) - 1] = '\0';
202 bdev->bd_sync_fd = sync_fd;
203 bdev->bd_holder = holder;
204 bdev->bd_disk = &bdev->__bd_disk;
205 bdev->bd_bdi = &bdev->__bd_bdi;
206 bdev->queue.backing_dev_info = bdev->bd_bdi;
211 void bdput(struct block_device *bdev)
216 struct block_device *lookup_bdev(const char *path)
218 return ERR_PTR(-EINVAL);
221 static int aio_completion_thread(void *arg)
223 struct io_event events[8], *ev;
227 ret = io_getevents(aio_ctx, 1, ARRAY_SIZE(events),
230 if (ret < 0 && ret == -EINTR)
233 die("io_getevents() error: %s", strerror(-ret));
235 for (ev = events; ev < events + ret; ev++) {
236 struct bio *bio = (struct bio *) ev->data;
238 if (ev->res != bio->bi_iter.bi_size)
239 bio->bi_status = BLK_STS_IOERR;
248 __attribute__((constructor(102)))
249 static void blkdev_init(void)
251 struct task_struct *p;
253 if (io_setup(256, &aio_ctx))
254 die("io_setup() error: %m");
256 p = kthread_run(aio_completion_thread, NULL, "aio_completion");