5 #include "io_uring_engine.h"
14 IOUringEngine::IOUringEngine()
19 int ret = io_uring_queue_init(queue_depth, &ring, 0);
21 using_uring = (ret >= 0);
24 void IOUringEngine::submit_read(int fd, size_t len, off_t offset, function<void(string)> cb)
30 complete_pread(fd, &s[0], len, offset);
36 if (pending_reads < queue_depth) {
37 io_uring_sqe *sqe = io_uring_get_sqe(&ring);
39 fprintf(stderr, "io_uring_get_sqe: %s\n", strerror(errno));
42 submit_read_internal(sqe, fd, len, offset, move(cb));
44 queued_reads.push(QueuedRead{ fd, len, offset, move(cb) });
50 void IOUringEngine::submit_read_internal(io_uring_sqe *sqe, int fd, size_t len, off_t offset, function<void(string)> cb)
53 if (posix_memalign(&buf, /*alignment=*/4096, len)) {
54 fprintf(stderr, "Couldn't allocate %zu bytes: %s\n", len, strerror(errno));
57 PendingRead *pending = new PendingRead{ buf, len, move(cb), fd, offset, { buf, len } };
59 io_uring_prep_readv(sqe, fd, &pending->iov, 1, offset);
60 io_uring_sqe_set_data(sqe, pending);
65 void IOUringEngine::finish()
72 bool anything_to_submit = true;
73 while (pending_reads > 0) {
74 io_uring_cqe *cqes[queue_depth];
75 int num_sqes = io_uring_peek_batch_cqe(&ring, cqes, queue_depth);
77 if (anything_to_submit) {
78 // Nothing ready, so submit whatever is pending and then do a blocking wait.
79 int ret = io_uring_submit(&ring);
81 fprintf(stderr, "io_uring_submit(queued): %s\n", strerror(-ret));
84 anything_to_submit = false;
86 int ret = io_uring_wait_cqe(&ring, &cqes[0]);
88 fprintf(stderr, "io_uring_wait_cqe: %s\n", strerror(-ret));
94 for (int sqe_idx = 0; sqe_idx < num_sqes; ++sqe_idx) {
95 io_uring_cqe *cqe = cqes[sqe_idx];
96 PendingRead *pending = reinterpret_cast<PendingRead *>(cqe->user_data);
98 fprintf(stderr, "async read failed: %s\n", strerror(-cqe->res));
102 if (size_t(cqe->res) < pending->iov.iov_len) {
103 // Incomplete read, so resubmit it.
104 pending->iov.iov_base = (char *)pending->iov.iov_base + cqe->res;
105 pending->iov.iov_len -= cqe->res;
106 pending->offset += cqe->res;
107 io_uring_cqe_seen(&ring, cqe);
109 io_uring_sqe *sqe = io_uring_get_sqe(&ring);
110 if (sqe == nullptr) {
111 fprintf(stderr, "No free SQE for resubmit; this shouldn't happen.\n");
114 io_uring_prep_readv(sqe, pending->fd, &pending->iov, 1, pending->offset);
115 io_uring_sqe_set_data(sqe, pending);
116 anything_to_submit = true;
118 io_uring_cqe_seen(&ring, cqe);
121 size_t old_pending_reads = pending_reads;
122 pending->cb(string(reinterpret_cast<char *>(pending->buf), pending->len));
126 if (pending_reads != old_pending_reads) {
127 // A new read was made in the callback (and not queued),
128 // so we need to re-submit.
129 anything_to_submit = true;
133 // See if there are any queued reads we can submit now.
134 while (!queued_reads.empty() && pending_reads < queue_depth) {
135 io_uring_sqe *sqe = io_uring_get_sqe(&ring);
136 if (sqe == nullptr) {
137 fprintf(stderr, "io_uring_get_sqe: %s\n", strerror(errno));
140 QueuedRead &qr = queued_reads.front();
141 submit_read_internal(sqe, qr.fd, qr.len, qr.offset, move(qr.cb));
143 anything_to_submit = true;
150 void complete_pread(int fd, void *ptr, size_t len, off_t offset)
153 ssize_t ret = pread(fd, ptr, len, offset);
154 if (ret == -1 && errno == EINTR) {
161 ptr = reinterpret_cast<char *>(ptr) + ret;