10 #include "io_uring_engine.h"
14 IOUringEngine::IOUringEngine()
19 int ret = io_uring_queue_init(queue_depth, &ring, 0);
21 using_uring = (ret >= 0);
24 void IOUringEngine::submit_read(int fd, size_t len, off_t offset, function<void(string)> cb)
30 complete_pread(fd, &s[0], len, offset);
36 if (pending_reads < queue_depth) {
37 io_uring_sqe *sqe = io_uring_get_sqe(&ring);
39 fprintf(stderr, "io_uring_get_sqe: %s\n", strerror(errno));
42 submit_read_internal(sqe, fd, len, offset, move(cb));
44 queued_reads.push(QueuedRead{ fd, len, offset, move(cb) });
50 void IOUringEngine::submit_read_internal(io_uring_sqe *sqe, int fd, size_t len, off_t offset, function<void(string)> cb)
53 if (posix_memalign(&buf, /*alignment=*/4096, len)) {
54 fprintf(stderr, "Couldn't allocate %zu bytes: %s\n", len, strerror(errno));
57 PendingRead *pending = new PendingRead{ buf, len, move(cb), fd, offset, { buf, len } };
59 io_uring_prep_readv(sqe, fd, &pending->iov, 1, offset);
60 io_uring_sqe_set_data(sqe, pending);
65 void IOUringEngine::finish()
72 int ret = io_uring_submit(&ring);
74 fprintf(stderr, "io_uring_submit: %s\n", strerror(-ret));
77 bool anything_to_submit = false;
78 while (pending_reads > 0) {
80 ret = io_uring_wait_cqe(&ring, &cqe);
82 fprintf(stderr, "io_uring_wait_cqe: %s\n", strerror(-ret));
86 PendingRead *pending = reinterpret_cast<PendingRead *>(cqe->user_data);
88 fprintf(stderr, "async read failed: %s\n", strerror(-cqe->res));
92 if (size_t(cqe->res) < pending->iov.iov_len) {
93 // Incomplete read, so resubmit it.
94 pending->iov.iov_base = (char *)pending->iov.iov_base + cqe->res;
95 pending->iov.iov_len -= cqe->res;
96 pending->offset += cqe->res;
97 io_uring_cqe_seen(&ring, cqe);
99 io_uring_sqe *sqe = io_uring_get_sqe(&ring);
100 if (sqe == nullptr) {
101 fprintf(stderr, "No free SQE for resubmit; this shouldn't happen.\n");
104 io_uring_prep_readv(sqe, pending->fd, &pending->iov, 1, pending->offset);
105 io_uring_sqe_set_data(sqe, pending);
106 anything_to_submit = true;
108 io_uring_cqe_seen(&ring, cqe);
111 size_t old_pending_reads = pending_reads;
112 pending->cb(string(reinterpret_cast<char *>(pending->buf), pending->len));
116 if (pending_reads != old_pending_reads) {
117 // A new read was made in the callback (and not queued),
118 // so we need to re-submit.
119 anything_to_submit = true;
123 // See if there are any queued reads we can submit now.
124 while (!queued_reads.empty() && pending_reads < queue_depth) {
125 io_uring_sqe *sqe = io_uring_get_sqe(&ring);
126 if (sqe == nullptr) {
127 fprintf(stderr, "io_uring_get_sqe: %s\n", strerror(errno));
130 QueuedRead &qr = queued_reads.front();
131 submit_read_internal(sqe, qr.fd, qr.len, qr.offset, move(qr.cb));
133 anything_to_submit = true;
136 if (anything_to_submit) {
137 // A new read was made, so we need to re-submit.
138 int ret = io_uring_submit(&ring);
140 fprintf(stderr, "io_uring_submit(queued): %s\n", strerror(-ret));
143 anything_to_submit = false;
150 void complete_pread(int fd, void *ptr, size_t len, off_t offset)
153 ssize_t ret = pread(fd, ptr, len, offset);
154 if (ret == -1 && errno == EINTR) {
161 ptr = reinterpret_cast<char *>(ptr) + ret;