5 #include "io_uring_engine.h"
14 IOUringEngine::IOUringEngine(size_t slop_bytes)
15 : slop_bytes(slop_bytes)
20 int ret = io_uring_queue_init(queue_depth, &ring, 0);
22 using_uring = (ret >= 0);
25 void IOUringEngine::submit_read(int fd, size_t len, off_t offset, function<void(string_view)> cb)
30 s.resize(len + slop_bytes);
31 complete_pread(fd, &s[0], len, offset);
32 cb(string_view(s.data(), len));
37 if (pending_reads < queue_depth) {
38 io_uring_sqe *sqe = io_uring_get_sqe(&ring);
40 fprintf(stderr, "io_uring_get_sqe: %s\n", strerror(errno));
43 submit_read_internal(sqe, fd, len, offset, move(cb));
45 queued_reads.push(QueuedRead{ fd, len, offset, move(cb) });
51 void IOUringEngine::submit_read_internal(io_uring_sqe *sqe, int fd, size_t len, off_t offset, function<void(string_view)> cb)
54 if (posix_memalign(&buf, /*alignment=*/4096, len + slop_bytes)) {
55 fprintf(stderr, "Couldn't allocate %zu bytes: %s\n", len, strerror(errno));
58 PendingRead *pending = new PendingRead{ buf, len, move(cb), fd, offset, { buf, len } };
60 io_uring_prep_readv(sqe, fd, &pending->iov, 1, offset);
61 io_uring_sqe_set_data(sqe, pending);
66 void IOUringEngine::finish()
73 bool anything_to_submit = true;
74 while (pending_reads > 0) {
76 if (io_uring_peek_cqe(&ring, &cqe) != 0) {
77 if (anything_to_submit) {
78 // Nothing ready, so submit whatever is pending and then do a blocking wait.
79 int ret = io_uring_submit_and_wait(&ring, 1);
81 fprintf(stderr, "io_uring_submit(queued): %s\n", strerror(-ret));
84 anything_to_submit = false;
86 int ret = io_uring_wait_cqe(&ring, &cqe);
88 fprintf(stderr, "io_uring_wait_cqe: %s\n", strerror(-ret));
95 io_uring_for_each_cqe(&ring, head, cqe)
97 PendingRead *pending = reinterpret_cast<PendingRead *>(cqe->user_data);
99 fprintf(stderr, "async read failed: %s\n", strerror(-cqe->res));
103 if (size_t(cqe->res) < pending->iov.iov_len) {
104 // Incomplete read, so resubmit it.
105 pending->iov.iov_base = (char *)pending->iov.iov_base + cqe->res;
106 pending->iov.iov_len -= cqe->res;
107 pending->offset += cqe->res;
108 io_uring_cqe_seen(&ring, cqe);
110 io_uring_sqe *sqe = io_uring_get_sqe(&ring);
111 if (sqe == nullptr) {
112 fprintf(stderr, "No free SQE for resubmit; this shouldn't happen.\n");
115 io_uring_prep_readv(sqe, pending->fd, &pending->iov, 1, pending->offset);
116 io_uring_sqe_set_data(sqe, pending);
117 anything_to_submit = true;
119 io_uring_cqe_seen(&ring, cqe);
122 size_t old_pending_reads = pending_reads;
123 pending->cb(string_view(reinterpret_cast<char *>(pending->buf), pending->len));
127 if (pending_reads != old_pending_reads) {
128 // A new read was made in the callback (and not queued),
129 // so we need to re-submit.
130 anything_to_submit = true;
135 // See if there are any queued reads we can submit now.
136 while (!queued_reads.empty() && pending_reads < queue_depth) {
137 io_uring_sqe *sqe = io_uring_get_sqe(&ring);
138 if (sqe == nullptr) {
139 fprintf(stderr, "io_uring_get_sqe: %s\n", strerror(errno));
142 QueuedRead &qr = queued_reads.front();
143 submit_read_internal(sqe, qr.fd, qr.len, qr.offset, move(qr.cb));
145 anything_to_submit = true;
151 void complete_pread(int fd, void *ptr, size_t len, off_t offset)
154 ssize_t ret = pread(fd, ptr, len, offset);
155 if (ret == -1 && errno == EINTR) {
162 ptr = reinterpret_cast<char *>(ptr) + ret;