return bytes >> 9;
}
-void blkdev_put(struct block_device *bdev, fmode_t mode)
+void blkdev_put(struct block_device *bdev, void *holder)
{
fdatasync(bdev->bd_fd);
close(bdev->bd_sync_fd);
free(bdev);
}
-struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
- void *holder)
+struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode,
+ void *holder, const struct blk_holder_ops *hop)
{
struct block_device *bdev;
int fd, sync_fd, buffered_fd, flags = 0;
- if ((mode & (FMODE_READ|FMODE_WRITE)) == (FMODE_READ|FMODE_WRITE))
+ if ((mode & (BLK_OPEN_READ|BLK_OPEN_WRITE)) == (BLK_OPEN_READ|BLK_OPEN_WRITE))
flags = O_RDWR;
- else if (mode & FMODE_READ)
+ else if (mode & BLK_OPEN_READ)
flags = O_RDONLY;
- else if (mode & FMODE_WRITE)
+ else if (mode & BLK_OPEN_WRITE)
flags = O_WRONLY;
- if (!(mode & FMODE_BUFFERED))
+ if (!(mode & BLK_OPEN_BUFFERED))
flags |= O_DIRECT;
#if 0
/* using O_EXCL doesn't work with opening twice for an O_SYNC fd: */
- if (mode & FMODE_EXCL)
+ if (mode & BLK_OPEN_EXCL)
flags |= O_EXCL;
#endif
buffered_fd = open(path, flags & ~O_DIRECT);
sync_check(bio, ret);
}
+static DECLARE_WAIT_QUEUE_HEAD(aio_events_completed);
+
static int aio_completion_thread(void *arg)
{
struct io_event events[8], *ev;
continue;
if (ret < 0)
die("io_getevents() error: %s", strerror(-ret));
+ if (ret)
+ wake_up(&aio_events_completed);
for (ev = events; ev < events + ret; ev++) {
struct bio *bio = (struct bio *) ev->data;
}, *iocbp = &iocb;
atomic_inc(&running_requests);
- ret = io_submit(aio_ctx, 1, &iocbp);
+
+ wait_event(aio_events_completed,
+ (ret = io_submit(aio_ctx, 1, &iocbp)) != -EAGAIN);;
+
if (ret != 1)
die("io_submit err: %s", strerror(-ret));
}