int blkdev_issue_discard(struct block_device *bdev,
sector_t sector, sector_t nr_sects,
- gfp_t gfp_mask, unsigned long flags)
+ gfp_t gfp_mask)
{
return 0;
}
+int blkdev_issue_zeroout(struct block_device *bdev,
+ sector_t sector, sector_t nr_sects,
+ gfp_t gfp_mask, unsigned flags)
+{
+ /* Not yet implemented: */
+ BUG();
+}
+
unsigned bdev_logical_block_size(struct block_device *bdev)
{
struct stat statbuf;
BUG_ON(ret);
if (!S_ISBLK(statbuf.st_mode))
- return statbuf.st_blksize >> 9;
+ return statbuf.st_blksize;
- ret = ioctl(bdev->bd_fd, BLKPBSZGET, &blksize);
- BUG_ON(ret);
-
- return blksize >> 9;
+ xioctl(bdev->bd_fd, BLKPBSZGET, &blksize);
+ return blksize;
}
sector_t get_capacity(struct gendisk *disk)
return bytes >> 9;
}
-void blkdev_put(struct block_device *bdev, fmode_t mode)
+void bdev_release(struct bdev_handle *handle)
{
- fdatasync(bdev->bd_fd);
- close(bdev->bd_sync_fd);
- close(bdev->bd_fd);
- free(bdev);
+ fdatasync(handle->bdev->bd_fd);
+ close(handle->bdev->bd_fd);
+ free(handle->bdev);
+ free(handle);
}
-struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
- void *holder)
+struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
+ void *holder, const struct blk_holder_ops *hop)
{
- struct block_device *bdev;
- int fd, sync_fd, flags = O_DIRECT;
+ int fd, flags = 0;
- if ((mode & (FMODE_READ|FMODE_WRITE)) == (FMODE_READ|FMODE_WRITE))
+ if ((mode & (BLK_OPEN_READ|BLK_OPEN_WRITE)) == (BLK_OPEN_READ|BLK_OPEN_WRITE))
flags = O_RDWR;
- else if (mode & FMODE_READ)
+ else if (mode & BLK_OPEN_READ)
flags = O_RDONLY;
- else if (mode & FMODE_WRITE)
+ else if (mode & BLK_OPEN_WRITE)
flags = O_WRONLY;
-#if 0
- /* using O_EXCL doesn't work with opening twice for an O_SYNC fd: */
- if (mode & FMODE_EXCL)
+ if (!(mode & BLK_OPEN_BUFFERED))
+ flags |= O_DIRECT;
+
+ if (mode & BLK_OPEN_EXCL)
flags |= O_EXCL;
-#endif
fd = open(path, flags);
if (fd < 0)
return ERR_PTR(-errno);
- sync_fd = open(path, flags|O_SYNC);
- if (sync_fd < 0) {
- assert(0);
- close(fd);
- return ERR_PTR(-errno);
- }
-
- bdev = malloc(sizeof(*bdev));
+ struct block_device *bdev = malloc(sizeof(*bdev));
memset(bdev, 0, sizeof(*bdev));
strncpy(bdev->name, path, sizeof(bdev->name));
bdev->bd_dev = xfstat(fd).st_rdev;
bdev->bd_fd = fd;
- bdev->bd_sync_fd = sync_fd;
bdev->bd_holder = holder;
bdev->bd_disk = &bdev->__bd_disk;
bdev->bd_disk->bdi = &bdev->bd_disk->__bdi;
bdev->queue.backing_dev_info = bdev->bd_disk->bdi;
- return bdev;
-}
+ struct bdev_handle *handle = calloc(sizeof(*handle), 1);
+ handle->bdev = bdev;
+ handle->holder = holder;
+ handle->mode = mode;
-void bdput(struct block_device *bdev)
-{
- BUG();
+ return handle;
}
int lookup_bdev(const char *path, dev_t *dev)
static void sync_read(struct bio *bio, struct iovec * iov, unsigned i)
{
- int fd = bio->bi_opf & REQ_FUA
- ? bio->bi_bdev->bd_sync_fd
- : bio->bi_bdev->bd_fd;
- ssize_t ret = preadv(fd, iov, i, bio->bi_iter.bi_sector << 9);
+ ssize_t ret = preadv(bio->bi_bdev->bd_fd, iov, i,
+ bio->bi_iter.bi_sector << 9);
sync_check(bio, ret);
}
static void sync_write(struct bio *bio, struct iovec * iov, unsigned i)
{
- int fd = bio->bi_opf & REQ_FUA
- ? bio->bi_bdev->bd_sync_fd
- : bio->bi_bdev->bd_fd;
- ssize_t ret = pwritev(fd, iov, i, bio->bi_iter.bi_sector << 9);
+ ssize_t ret = pwritev2(bio->bi_bdev->bd_fd, iov, i,
+ bio->bi_iter.bi_sector << 9,
+ bio->bi_opf & REQ_FUA ? RWF_SYNC : 0);
sync_check(bio, ret);
}
+static DECLARE_WAIT_QUEUE_HEAD(aio_events_completed);
+
static int aio_completion_thread(void *arg)
{
struct io_event events[8], *ev;
continue;
if (ret < 0)
die("io_getevents() error: %s", strerror(-ret));
+ if (ret)
+ wake_up(&aio_events_completed);
for (ev = events; ev < events + ret; ev++) {
struct bio *bio = (struct bio *) ev->data;
ssize_t ret;
struct iocb iocb = {
.data = bio,
- .aio_fildes = bio->bi_opf & REQ_FUA
- ? bio->bi_bdev->bd_sync_fd
- : bio->bi_bdev->bd_fd,
+ .aio_fildes = bio->bi_bdev->bd_fd,
+ .aio_rw_flags = bio->bi_opf & REQ_FUA ? RWF_SYNC : 0,
.aio_lio_opcode = opcode,
.u.c.buf = iov,
.u.c.nbytes = i,
}, *iocbp = &iocb;
atomic_inc(&running_requests);
- ret = io_submit(aio_ctx, 1, &iocbp);
+
+ wait_event(aio_events_completed,
+ (ret = io_submit(aio_ctx, 1, &iocbp)) != -EAGAIN);;
+
if (ret != 1)
die("io_submit err: %s", strerror(-ret));
}