14 #include "io_uring_engine.h"
18 IOUringEngine::IOUringEngine(size_t slop_bytes)
19 : slop_bytes(slop_bytes)
24 int ret = io_uring_queue_init(queue_depth, &ring, 0);
26 using_uring = (ret >= 0);
29 void IOUringEngine::submit_read(int fd, size_t len, off_t offset, function<void(string_view)> cb)
34 s.resize(len + slop_bytes);
35 complete_pread(fd, &s[0], len, offset);
36 cb(string_view(s.data(), len));
41 if (pending_reads < queue_depth) {
42 io_uring_sqe *sqe = io_uring_get_sqe(&ring);
44 fprintf(stderr, "io_uring_get_sqe: %s\n", strerror(errno));
47 submit_read_internal(sqe, fd, len, offset, move(cb));
49 queued_reads.push(QueuedRead{ fd, len, offset, move(cb) });
55 void IOUringEngine::submit_read_internal(io_uring_sqe *sqe, int fd, size_t len, off_t offset, function<void(string_view)> cb)
58 if (posix_memalign(&buf, /*alignment=*/4096, len + slop_bytes)) {
59 fprintf(stderr, "Couldn't allocate %zu bytes: %s\n", len, strerror(errno));
62 PendingRead *pending = new PendingRead{ buf, len, move(cb), fd, offset, { buf, len } };
64 io_uring_prep_readv(sqe, fd, &pending->iov, 1, offset);
65 io_uring_sqe_set_data(sqe, pending);
70 void IOUringEngine::finish()
77 bool anything_to_submit = true;
78 while (pending_reads > 0) {
80 if (io_uring_peek_cqe(&ring, &cqe) != 0) {
81 if (anything_to_submit) {
82 // Nothing ready, so submit whatever is pending and then do a blocking wait.
83 int ret = io_uring_submit_and_wait(&ring, 1);
85 fprintf(stderr, "io_uring_submit(queued): %s\n", strerror(-ret));
88 anything_to_submit = false;
90 int ret = io_uring_wait_cqe(&ring, &cqe);
92 fprintf(stderr, "io_uring_wait_cqe: %s\n", strerror(-ret));
99 io_uring_for_each_cqe(&ring, head, cqe)
101 PendingRead *pending = reinterpret_cast<PendingRead *>(cqe->user_data);
103 fprintf(stderr, "async read failed: %s\n", strerror(-cqe->res));
107 if (size_t(cqe->res) < pending->iov.iov_len) {
108 // Incomplete read, so resubmit it.
109 pending->iov.iov_base = (char *)pending->iov.iov_base + cqe->res;
110 pending->iov.iov_len -= cqe->res;
111 pending->offset += cqe->res;
112 io_uring_cqe_seen(&ring, cqe);
114 io_uring_sqe *sqe = io_uring_get_sqe(&ring);
115 if (sqe == nullptr) {
116 fprintf(stderr, "No free SQE for resubmit; this shouldn't happen.\n");
119 io_uring_prep_readv(sqe, pending->fd, &pending->iov, 1, pending->offset);
120 io_uring_sqe_set_data(sqe, pending);
121 anything_to_submit = true;
123 io_uring_cqe_seen(&ring, cqe);
126 size_t old_pending_reads = pending_reads;
127 pending->cb(string_view(reinterpret_cast<char *>(pending->buf), pending->len));
131 if (pending_reads != old_pending_reads) {
132 // A new read was made in the callback (and not queued),
133 // so we need to re-submit.
134 anything_to_submit = true;
139 // See if there are any queued reads we can submit now.
140 while (!queued_reads.empty() && pending_reads < queue_depth) {
141 io_uring_sqe *sqe = io_uring_get_sqe(&ring);
142 if (sqe == nullptr) {
143 fprintf(stderr, "io_uring_get_sqe: %s\n", strerror(errno));
146 QueuedRead &qr = queued_reads.front();
147 submit_read_internal(sqe, qr.fd, qr.len, qr.offset, move(qr.cb));
149 anything_to_submit = true;
155 void complete_pread(int fd, void *ptr, size_t len, off_t offset)
158 ssize_t ret = pread(fd, ptr, len, offset);
159 if (ret == -1 && errno == EINTR) {
166 ptr = reinterpret_cast<char *>(ptr) + ret;