X-Git-Url: https://git.sesse.net/?p=cubemap;a=blobdiff_plain;f=server.cpp;h=69615e705aba182b9fbbf168c56ea5c78931695c;hp=31e5c0a31da327bc80499c543bbb4dee932b74bb;hb=50651c954803c1941e6ad1bb494712891c18f7d2;hpb=97bdb597d4847308ce9d6982505b56a3a09e930b diff --git a/server.cpp b/server.cpp index 31e5c0a..852ecdd 100644 --- a/server.cpp +++ b/server.cpp @@ -1,90 +1,988 @@ +#include +#include +#include +#include +#include #include +#include #include -#include -#include -#include -#include +#include +#include #include -#include #include -#include -#include -#include -#include -#include +#include +#include #include +#include +#include +#include + +#include "tlse.h" -#include "metacube.h" +#include "acceptor.h" +#include "accesslog.h" +#include "log.h" +#include "metacube2.h" +#include "parse.h" #include "server.h" -#include "mutexlock.h" +#include "state.pb.h" +#include "stream.h" +#include "util.h" + +#ifndef SO_MAX_PACING_RATE +#define SO_MAX_PACING_RATE 47 +#endif using namespace std; -Server::Server() +extern AccessLogThread *access_log; + +namespace { + +inline bool is_equal(timespec a, timespec b) +{ + return a.tv_sec == b.tv_sec && + a.tv_nsec == b.tv_nsec; +} + +inline bool is_earlier(timespec a, timespec b) { - pthread_mutex_init(&mutex, NULL); + if (a.tv_sec != b.tv_sec) + return a.tv_sec < b.tv_sec; + return a.tv_nsec < b.tv_nsec; +} + +} // namespace +Server::Server() +{ epoll_fd = epoll_create(1024); // Size argument is ignored. if (epoll_fd == -1) { - perror("epoll_fd"); + log_perror("epoll_fd"); exit(1); } } -void Server::run() +Server::~Server() { - pthread_t thread; - pthread_create(&thread, NULL, Server::do_work_thunk, this); + safe_close(epoll_fd); } -void *Server::do_work_thunk(void *arg) +vector Server::get_client_stats() const { - Server *server = static_cast(arg); - server->do_work(); - return NULL; + vector ret; + + lock_guard lock(mu); + for (const auto &fd_and_client : clients) { + ret.push_back(fd_and_client.second.get_stats()); + } + return ret; } void Server::do_work() { - for ( ;; ) { - MutexLock lock(&mutex); - printf("server thread running\n"); - sleep(1); + while (!should_stop()) { + // Wait until there's activity on at least one of the fds, + // or 20 ms (about one frame at 50 fps) has elapsed. + // + // We could in theory wait forever and rely on wakeup() + // from add_client_deferred() and add_data_deferred(), + // but wakeup is a pretty expensive operation, and the + // two threads might end up fighting over a lock, so it's + // seemingly (much) more efficient to just have a timeout here. + int nfds = epoll_pwait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS, &sigset_without_usr1_block); + if (nfds == -1 && errno != EINTR) { + log_perror("epoll_wait"); + exit(1); + } + + lock_guard lock(mu); // We release the mutex between iterations. + + process_queued_data(); + + // Process each client where we have socket activity. + for (int i = 0; i < nfds; ++i) { + Client *client = reinterpret_cast(events[i].data.ptr); + + if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) { + close_client(client); + continue; + } + + process_client(client); + } + + // Process each client where its stream has new data, + // even if there was no socket activity. + for (unique_ptr &stream : streams) { + vector to_process; + swap(stream->to_process, to_process); + for (Client *client : to_process) { + process_client(client); + } + } + + // Finally, go through each client to see if it's timed out + // in the READING_REQUEST state. (Seemingly there are clients + // that can hold sockets up for days at a time without sending + // anything at all.) + timespec timeout_time; + if (clock_gettime(CLOCK_MONOTONIC_COARSE, &timeout_time) == -1) { + log_perror("clock_gettime(CLOCK_MONOTONIC_COARSE)"); + continue; + } + timeout_time.tv_sec -= REQUEST_READ_TIMEOUT_SEC; + while (!clients_ordered_by_connect_time.empty()) { + const pair &connect_time_and_fd = clients_ordered_by_connect_time.front(); + + // See if we have reached the end of clients to process. + if (is_earlier(timeout_time, connect_time_and_fd.first)) { + break; + } + + // If this client doesn't exist anymore, just ignore it + // (it was deleted earlier). + auto client_it = clients.find(connect_time_and_fd.second); + if (client_it == clients.end()) { + clients_ordered_by_connect_time.pop(); + continue; + } + Client *client = &client_it->second; + if (!is_equal(client->connect_time, connect_time_and_fd.first)) { + // Another client has taken this fd in the meantime. + clients_ordered_by_connect_time.pop(); + continue; + } + + if (client->state != Client::READING_REQUEST) { + // Only READING_REQUEST can time out. + clients_ordered_by_connect_time.pop(); + continue; + } + + // OK, it timed out. + close_client(client); + clients_ordered_by_connect_time.pop(); + } + } +} + +CubemapStateProto Server::serialize() +{ + // We don't serialize anything queued, so empty the queues. + process_queued_data(); + + // Set all clients in a consistent state before serializing + // (ie., they have no remaining lost data). Otherwise, increasing + // the backlog could take clients into a newly valid area of the backlog, + // sending a stream of zeros instead of skipping the data as it should. + // + // TODO: Do this when clients are added back from serialized state instead; + // it would probably be less wasteful. + for (auto &fd_and_client : clients) { + skip_lost_data(&fd_and_client.second); } + + CubemapStateProto serialized; + for (const auto &fd_and_client : clients) { + serialized.add_clients()->MergeFrom(fd_and_client.second.serialize()); + } + for (unique_ptr &stream : streams) { + serialized.add_streams()->MergeFrom(stream->serialize()); + } + return serialized; } - -void Server::add_client(int sock) + +void Server::add_client_deferred(int sock, Acceptor *acceptor) +{ + lock_guard lock(queued_clients_mutex); + queued_add_clients.push_back(std::make_pair(sock, acceptor)); +} + +void Server::add_client(int sock, Acceptor *acceptor) { - MutexLock lock(&mutex); - Client new_client; - new_client.state = Client::READING_REQUEST; - new_client.header_bytes_sent = 0; - new_client.bytes_sent = 0; + const bool is_tls = acceptor->is_tls(); + auto inserted = clients.insert(make_pair(sock, Client(sock))); + assert(inserted.second == true); // Should not already exist. + Client *client_ptr = &inserted.first->second; - clients.insert(make_pair(sock, new_client)); + // Connection timestamps must be nondecreasing. I can't find any guarantee + // that even the monotonic clock can't go backwards by a small amount + // (think switching between CPUs with non-synchronized TSCs), so if + // this actually should happen, we hack around it by fudging + // connect_time. + if (!clients_ordered_by_connect_time.empty() && + is_earlier(client_ptr->connect_time, clients_ordered_by_connect_time.back().first)) { + client_ptr->connect_time = clients_ordered_by_connect_time.back().first; + } + clients_ordered_by_connect_time.push(make_pair(client_ptr->connect_time, sock)); // Start listening on data from this socket. epoll_event ev; - ev.events = EPOLLIN; - ev.data.fd = sock; + if (is_tls) { + // Even in the initial state (READING_REQUEST), TLS needs to + // send data for the handshake, and thus might end up needing + // to know about EPOLLOUT. + ev.events = EPOLLIN | EPOLLOUT | EPOLLET | EPOLLRDHUP; + } else { + // EPOLLOUT will be added once we go out of READING_REQUEST. + ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP; + } + ev.data.ptr = client_ptr; if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) { - perror("epoll_ctl(EPOLL_CTL_ADD)"); + log_perror("epoll_ctl(EPOLL_CTL_ADD)"); + exit(1); + } + + if (is_tls) { + assert(tls_server_contexts.count(acceptor)); + client_ptr->tls_context = tls_accept(tls_server_contexts[acceptor]); + if (client_ptr->tls_context == nullptr) { + log(ERROR, "tls_accept() failed"); + close_client(client_ptr); + return; + } + tls_make_exportable(client_ptr->tls_context, 1); + } + + process_client(client_ptr); +} + +void Server::add_client_from_serialized(const ClientProto &client) +{ + lock_guard lock(mu); + Stream *stream; + int stream_index = lookup_stream_by_url(client.url()); + if (stream_index == -1) { + assert(client.state() != Client::SENDING_DATA); + stream = nullptr; + } else { + stream = streams[stream_index].get(); + } + auto inserted = clients.insert(make_pair(client.sock(), Client(client, stream))); + assert(inserted.second == true); // Should not already exist. + Client *client_ptr = &inserted.first->second; + + // Connection timestamps must be nondecreasing. + assert(clients_ordered_by_connect_time.empty() || + !is_earlier(client_ptr->connect_time, clients_ordered_by_connect_time.back().first)); + clients_ordered_by_connect_time.push(make_pair(client_ptr->connect_time, client.sock())); + + // Start listening on data from this socket. + epoll_event ev; + if (client.state() == Client::READING_REQUEST) { + // See the corresponding comment in Server::add_client(). + if (client.has_tls_context()) { + ev.events = EPOLLIN | EPOLLOUT | EPOLLET | EPOLLRDHUP; + } else { + ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP; + } + } else { + // If we don't have more data for this client, we'll be putting it into + // the sleeping array again soon. + ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP; + } + ev.data.ptr = client_ptr; + if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client.sock(), &ev) == -1) { + log_perror("epoll_ctl(EPOLL_CTL_ADD)"); exit(1); } + + if (client_ptr->state == Client::WAITING_FOR_KEYFRAME || + client_ptr->state == Client::PREBUFFERING || + (client_ptr->state == Client::SENDING_DATA && + client_ptr->stream_pos == client_ptr->stream->bytes_received)) { + client_ptr->stream->put_client_to_sleep(client_ptr); + } else { + process_client(client_ptr); + } +} + +int Server::lookup_stream_by_url(const string &url) const +{ + map::const_iterator stream_url_it = stream_url_map.find(url); + if (stream_url_it == stream_url_map.end()) { + return -1; + } + return stream_url_it->second; +} + +int Server::add_stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Stream::Encoding encoding, Stream::Encoding src_encoding) +{ + lock_guard lock(mu); + stream_url_map.insert(make_pair(url, streams.size())); + streams.emplace_back(new Stream(url, backlog_size, prebuffering_bytes, encoding, src_encoding)); + return streams.size() - 1; +} + +int Server::add_stream_from_serialized(const StreamProto &stream, int data_fd) +{ + lock_guard lock(mu); + stream_url_map.insert(make_pair(stream.url(), streams.size())); + streams.emplace_back(new Stream(stream, data_fd)); + return streams.size() - 1; } -void Server::add_stream(const string &stream_id) +void Server::set_backlog_size(int stream_index, size_t new_size) +{ + lock_guard lock(mu); + assert(stream_index >= 0 && stream_index < ssize_t(streams.size())); + streams[stream_index]->set_backlog_size(new_size); +} + +void Server::set_prebuffering_bytes(int stream_index, size_t new_amount) +{ + lock_guard lock(mu); + assert(stream_index >= 0 && stream_index < ssize_t(streams.size())); + streams[stream_index]->prebuffering_bytes = new_amount; +} + +void Server::set_encoding(int stream_index, Stream::Encoding encoding) +{ + lock_guard lock(mu); + assert(stream_index >= 0 && stream_index < ssize_t(streams.size())); + streams[stream_index]->encoding = encoding; +} + +void Server::set_src_encoding(int stream_index, Stream::Encoding encoding) { - // TODO + lock_guard lock(mu); + assert(stream_index >= 0 && stream_index < ssize_t(streams.size())); + streams[stream_index]->src_encoding = encoding; } -void Server::set_header(const string &stream_id, const string &header) +void Server::set_header(int stream_index, const string &http_header, const string &stream_header) { - // TODO - printf("got header! %lu bytes\n", header.size()); + lock_guard lock(mu); + assert(stream_index >= 0 && stream_index < ssize_t(streams.size())); + streams[stream_index]->http_header = http_header; + + if (stream_header != streams[stream_index]->stream_header) { + // We cannot start at any of the older starting points anymore, + // since they'd get the wrong header for the stream (not to mention + // that a changed header probably means the stream restarted, + // which means any client starting on the old one would probably + // stop playing properly at the change point). Next block + // should be a suitable starting point (if not, something is + // pretty strange), so it will fill up again soon enough. + streams[stream_index]->suitable_starting_points.clear(); + } + streams[stream_index]->stream_header = stream_header; } -void Server::add_data(const string &stream_id, const char *data, size_t bytes) +void Server::set_pacing_rate(int stream_index, uint32_t pacing_rate) { - // TODO + lock_guard lock(mu); + assert(clients.empty()); + assert(stream_index >= 0 && stream_index < ssize_t(streams.size())); + streams[stream_index]->pacing_rate = pacing_rate; +} + +void Server::add_gen204(const std::string &url, const std::string &allow_origin) +{ + lock_guard lock(mu); + assert(clients.empty()); + ping_url_map[url] = allow_origin; +} + +void Server::create_tls_context_for_acceptor(const Acceptor *acceptor) +{ + assert(acceptor->is_tls()); + + bool is_server = true; + TLSContext *server_context = tls_create_context(is_server, TLS_V12); + + const string &cert = acceptor->get_certificate_chain(); + int num_cert = tls_load_certificates(server_context, reinterpret_cast(cert.data()), cert.size()); + assert(num_cert > 0); // Should have been checked by config earlier. + + const string &key = acceptor->get_private_key(); + int num_key = tls_load_private_key(server_context, reinterpret_cast(key.data()), key.size()); + assert(num_key > 0); // Should have been checked by config earlier. + + tls_server_contexts.insert(make_pair(acceptor, server_context)); +} + +void Server::add_data_deferred(int stream_index, const char *data, size_t bytes, uint16_t metacube_flags) +{ + assert(stream_index >= 0 && stream_index < ssize_t(streams.size())); + streams[stream_index]->add_data_deferred(data, bytes, metacube_flags); +} + +// See the .h file for postconditions after this function. +void Server::process_client(Client *client) +{ + switch (client->state) { + case Client::READING_REQUEST: { + if (client->tls_context != nullptr) { + if (send_pending_tls_data(client)) { + // send_pending_tls_data() hit postconditions #1 or #4. + return; + } + } + +read_request_again: + // Try to read more of the request. + char buf[1024]; + int ret; + if (client->tls_context == nullptr) { + ret = read_nontls_data(client, buf, sizeof(buf)); + if (ret == -1) { + // read_nontls_data() hit postconditions #1 or #2. + return; + } + } else { + ret = read_tls_data(client, buf, sizeof(buf)); + if (ret == -1) { + // read_tls_data() hit postconditions #1, #2 or #4. + return; + } + } + + RequestParseStatus status = wait_for_double_newline(&client->request, buf, ret); + + switch (status) { + case RP_OUT_OF_SPACE: + log(WARNING, "[%s] Client sent overlong request!", client->remote_addr.c_str()); + close_client(client); + return; + case RP_NOT_FINISHED_YET: + // OK, we don't have the entire header yet. Fine; we'll get it later. + // See if there's more data for us. + goto read_request_again; + case RP_EXTRA_DATA: + log(WARNING, "[%s] Junk data after request!", client->remote_addr.c_str()); + close_client(client); + return; + case RP_FINISHED: + break; + } + + assert(status == RP_FINISHED); + + if (client->tls_context && !client->in_ktls_mode && tls_established(client->tls_context)) { + // We're ready to enter kTLS mode, unless we still have some + // handshake data to send (which then must be sent as non-kTLS). + if (send_pending_tls_data(client)) { + // send_pending_tls_data() hit postconditions #1 or #4. + return; + } + ret = tls_make_ktls(client->tls_context, client->sock); + if (ret < 0) { + log_tls_error("tls_make_ktls", ret); + close_client(client); + return; + } + client->in_ktls_mode = true; + } + + int error_code = parse_request(client); + if (error_code == 200) { + construct_header(client); + } else if (error_code == 204) { + construct_204(client); + } else { + construct_error(client, error_code); + } + + // We've changed states, so fall through. + assert(client->state == Client::SENDING_SHORT_RESPONSE || + client->state == Client::SENDING_HEADER); + } + case Client::SENDING_SHORT_RESPONSE: + case Client::SENDING_HEADER: { +sending_header_or_short_response_again: + int ret; + do { + ret = write(client->sock, + client->header_or_short_response.data() + client->header_or_short_response_bytes_sent, + client->header_or_short_response.size() - client->header_or_short_response_bytes_sent); + } while (ret == -1 && errno == EINTR); + + if (ret == -1 && errno == EAGAIN) { + // We're out of socket space, so now we're at the “low edge” of epoll's + // edge triggering. epoll will tell us when there is more room, so for now, + // just return. + // This is postcondition #4. + return; + } + + if (ret == -1) { + // Error! Postcondition #1. + log_perror("write"); + close_client(client); + return; + } + + client->header_or_short_response_bytes_sent += ret; + assert(client->header_or_short_response_bytes_sent <= client->header_or_short_response.size()); + + if (client->header_or_short_response_bytes_sent < client->header_or_short_response.size()) { + // We haven't sent all yet. Fine; go another round. + goto sending_header_or_short_response_again; + } + + // We're done sending the header or error! Clear it to release some memory. + client->header_or_short_response.clear(); + + if (client->state == Client::SENDING_SHORT_RESPONSE) { + // We're done sending the error, so now close. + // This is postcondition #1. + close_client(client); + return; + } + + Stream *stream = client->stream; + if (client->stream_pos == size_t(-2)) { + // Start sending from the beginning of the backlog. + client->stream_pos = min( + stream->bytes_received - stream->backlog_size, + 0); + client->state = Client::SENDING_DATA; + goto sending_data; + } else if (stream->prebuffering_bytes == 0) { + // Start sending from the first keyframe we get. In other + // words, we won't send any of the backlog, but we'll start + // sending immediately as we get the next keyframe block. + // Note that this is functionally identical to the next if branch, + // except that we save a binary search. + client->stream_pos = stream->bytes_received; + client->state = Client::WAITING_FOR_KEYFRAME; + } else { + // We're not going to send anything to the client before we have + // N bytes. However, this wait might be boring; we can just as well + // use it to send older data if we have it. We use lower_bound() + // so that we are conservative and never add extra latency over just + // waiting (assuming CBR or nearly so); otherwise, we could want e.g. + // 100 kB prebuffer but end up sending a 10 MB GOP. + deque::const_iterator starting_point_it = + lower_bound(stream->suitable_starting_points.begin(), + stream->suitable_starting_points.end(), + stream->bytes_received - stream->prebuffering_bytes); + if (starting_point_it == stream->suitable_starting_points.end()) { + // None found. Just put us at the end, and then wait for the + // first keyframe to appear. + client->stream_pos = stream->bytes_received; + client->state = Client::WAITING_FOR_KEYFRAME; + } else { + client->stream_pos = *starting_point_it; + client->state = Client::PREBUFFERING; + goto prebuffering; + } + } + // Fall through. + } + case Client::WAITING_FOR_KEYFRAME: { + Stream *stream = client->stream; + if (stream->suitable_starting_points.empty() || + client->stream_pos > stream->suitable_starting_points.back()) { + // We haven't received a keyframe since this stream started waiting, + // so keep on waiting for one. + // This is postcondition #3. + stream->put_client_to_sleep(client); + return; + } + client->stream_pos = stream->suitable_starting_points.back(); + client->state = Client::PREBUFFERING; + // Fall through. + } + case Client::PREBUFFERING: { +prebuffering: + Stream *stream = client->stream; + size_t bytes_to_send = stream->bytes_received - client->stream_pos; + assert(bytes_to_send <= stream->backlog_size); + if (bytes_to_send < stream->prebuffering_bytes) { + // We don't have enough bytes buffered to start this client yet. + // This is postcondition #3. + stream->put_client_to_sleep(client); + return; + } + client->state = Client::SENDING_DATA; + // Fall through. + } + case Client::SENDING_DATA: { +sending_data: + skip_lost_data(client); + Stream *stream = client->stream; + +sending_data_again: + size_t bytes_to_send = stream->bytes_received - client->stream_pos; + assert(bytes_to_send <= stream->backlog_size); + if (bytes_to_send == 0) { + return; + } + + // See if we need to split across the circular buffer. + bool more_data = false; + if ((client->stream_pos % stream->backlog_size) + bytes_to_send > stream->backlog_size) { + bytes_to_send = stream->backlog_size - (client->stream_pos % stream->backlog_size); + more_data = true; + } + + ssize_t ret; + do { + off_t offset = client->stream_pos % stream->backlog_size; + ret = sendfile(client->sock, stream->data_fd, &offset, bytes_to_send); + } while (ret == -1 && errno == EINTR); + + if (ret == -1 && errno == EAGAIN) { + // We're out of socket space, so return; epoll will wake us up + // when there is more room. + // This is postcondition #4. + return; + } + if (ret == -1) { + // Error, close; postcondition #1. + log_perror("sendfile"); + close_client(client); + return; + } + client->stream_pos += ret; + client->bytes_sent += ret; + + if (client->stream_pos == stream->bytes_received) { + // We don't have any more data for this client, so put it to sleep. + // This is postcondition #3. + stream->put_client_to_sleep(client); + } else if (more_data && size_t(ret) == bytes_to_send) { + goto sending_data_again; + } + break; + } + default: + assert(false); + } +} + +bool Server::send_pending_tls_data(Client *client) +{ + // See if there's data from the TLS library to write. + if (client->tls_data_to_send == nullptr) { + client->tls_data_to_send = tls_get_write_buffer(client->tls_context, &client->tls_data_left_to_send); + if (client->tls_data_to_send == nullptr) { + // Really no data to send. + return false; + } + } + +send_data_again: + int ret; + do { + ret = write(client->sock, client->tls_data_to_send, client->tls_data_left_to_send); + } while (ret == -1 && errno == EINTR); + assert(ret < 0 || size_t(ret) <= client->tls_data_left_to_send); + + if (ret == -1 && errno == EAGAIN) { + // We're out of socket space, so now we're at the “low edge” of epoll's + // edge triggering. epoll will tell us when there is more room, so for now, + // just return. + // This is postcondition #4. + return true; + } + if (ret == -1) { + // Error! Postcondition #1. + log_perror("write"); + close_client(client); + return true; + } + if (ret > 0 && size_t(ret) == client->tls_data_left_to_send) { + // All data has been sent, so we don't need to go to sleep. + tls_buffer_clear(client->tls_context); + client->tls_data_to_send = nullptr; + return false; + } + + // More data to send, so try again. + client->tls_data_to_send += ret; + client->tls_data_left_to_send -= ret; + goto send_data_again; +} + +int Server::read_nontls_data(Client *client, char *buf, size_t max_size) +{ + int ret; + do { + ret = read(client->sock, buf, max_size); + } while (ret == -1 && errno == EINTR); + + if (ret == -1 && errno == EAGAIN) { + // No more data right now. Nothing to do. + // This is postcondition #2. + return -1; + } + if (ret == -1) { + log_perror("read"); + close_client(client); + return -1; + } + if (ret == 0) { + // OK, the socket is closed. + close_client(client); + return -1; + } + + return ret; +} + +int Server::read_tls_data(Client *client, char *buf, size_t max_size) +{ +read_again: + int ret; + do { + ret = read(client->sock, buf, max_size); + } while (ret == -1 && errno == EINTR); + + if (ret == -1 && errno == EAGAIN) { + // No more data right now. Nothing to do. + // This is postcondition #2. + return -1; + } + if (ret == -1) { + log_perror("read"); + close_client(client); + return -1; + } + if (ret == 0) { + // OK, the socket is closed. + close_client(client); + return -1; + } + + // Give it to the TLS library. + int err = tls_consume_stream(client->tls_context, reinterpret_cast(buf), ret, nullptr); + if (err < 0) { + log_tls_error("tls_consume_stream", err); + close_client(client); + return -1; + } + if (err == 0) { + // Not consumed any data. See if we can read more. + goto read_again; + } + + // Read any decrypted data available for us. (We can reuse buf, since it's free now.) + ret = tls_read(client->tls_context, reinterpret_cast(buf), max_size); + if (ret == 0) { + // No decrypted data for us yet, but there might be some more handshaking + // to send. Do that if needed, then look for more data. + if (send_pending_tls_data(client)) { + // send_pending_tls_data() hit postconditions #1 or #4. + return -1; + } + goto read_again; + } + if (ret < 0) { + log_tls_error("tls_read", ret); + close_client(client); + return -1; + } + + assert(ret > 0); + return ret; +} + +// See if there's some data we've lost. Ideally, we should drop to a block boundary, +// but resync will be the mux's problem. +void Server::skip_lost_data(Client *client) +{ + Stream *stream = client->stream; + if (stream == nullptr) { + return; + } + size_t bytes_to_send = stream->bytes_received - client->stream_pos; + if (bytes_to_send > stream->backlog_size) { + size_t bytes_lost = bytes_to_send - stream->backlog_size; + client->stream_pos = stream->bytes_received - stream->backlog_size; + client->bytes_lost += bytes_lost; + ++client->num_loss_events; + } +} + +int Server::parse_request(Client *client) +{ + vector lines = split_lines(client->request); + if (lines.empty()) { + return 400; // Bad request (empty). + } + + // Parse the headers, for logging purposes. + // TODO: Case-insensitivity. + multimap headers = extract_headers(lines, client->remote_addr); + multimap::const_iterator referer_it = headers.find("Referer"); + if (referer_it != headers.end()) { + client->referer = referer_it->second; + } + multimap::const_iterator user_agent_it = headers.find("User-Agent"); + if (user_agent_it != headers.end()) { + client->user_agent = user_agent_it->second; + } + + vector request_tokens = split_tokens(lines[0]); + if (request_tokens.size() < 2) { + return 400; // Bad request (empty). + } + if (request_tokens[0] != "GET") { + return 400; // Should maybe be 405 instead? + } + + string url = request_tokens[1]; + client->url = url; + if (url.size() > 8 && url.find("?backlog") == url.size() - 8) { + client->stream_pos = -2; + url = url.substr(0, url.size() - 8); + } else { + client->stream_pos = -1; + } + + map::const_iterator stream_url_map_it = stream_url_map.find(url); + if (stream_url_map_it == stream_url_map.end()) { + map::const_iterator ping_url_map_it = ping_url_map.find(url); + if (ping_url_map_it == ping_url_map.end()) { + return 404; // Not found. + } else { + return 204; // No error. + } + } + + Stream *stream = streams[stream_url_map_it->second].get(); + if (stream->http_header.empty()) { + return 503; // Service unavailable. + } + + client->stream = stream; + if (setsockopt(client->sock, SOL_SOCKET, SO_MAX_PACING_RATE, &client->stream->pacing_rate, sizeof(client->stream->pacing_rate)) == -1) { + if (client->stream->pacing_rate != ~0U) { + log_perror("setsockopt(SO_MAX_PACING_RATE)"); + } + } + client->request.clear(); + + return 200; // OK! +} + +void Server::construct_header(Client *client) +{ + Stream *stream = client->stream; + if (stream->encoding == Stream::STREAM_ENCODING_RAW) { + client->header_or_short_response = stream->http_header + + "\r\n" + + stream->stream_header; + } else if (stream->encoding == Stream::STREAM_ENCODING_METACUBE) { + client->header_or_short_response = stream->http_header + + "Content-encoding: metacube\r\n" + + "\r\n"; + if (!stream->stream_header.empty()) { + metacube2_block_header hdr; + memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync)); + hdr.size = htonl(stream->stream_header.size()); + hdr.flags = htons(METACUBE_FLAGS_HEADER); + hdr.csum = htons(metacube2_compute_crc(&hdr)); + client->header_or_short_response.append( + string(reinterpret_cast(&hdr), sizeof(hdr))); + } + client->header_or_short_response.append(stream->stream_header); + } else { + assert(false); + } + + // Switch states. + client->state = Client::SENDING_HEADER; + change_epoll_events(client, EPOLLOUT | EPOLLET | EPOLLRDHUP); +} + +void Server::construct_error(Client *client, int error_code) +{ + char error[256]; + snprintf(error, 256, "HTTP/1.0 %d Error\r\nContent-type: text/plain\r\n\r\nSomething went wrong. Sorry.\r\n", + error_code); + client->header_or_short_response = error; + + // Switch states. + client->state = Client::SENDING_SHORT_RESPONSE; + change_epoll_events(client, EPOLLOUT | EPOLLET | EPOLLRDHUP); +} + +void Server::construct_204(Client *client) +{ + map::const_iterator ping_url_map_it = ping_url_map.find(client->url); + assert(ping_url_map_it != ping_url_map.end()); + + if (ping_url_map_it->second.empty()) { + client->header_or_short_response = + "HTTP/1.0 204 No Content\r\n" + "\r\n"; + } else { + char response[256]; + snprintf(response, 256, + "HTTP/1.0 204 No Content\r\n" + "Access-Control-Allow-Origin: %s\r\n" + "\r\n", + ping_url_map_it->second.c_str()); + client->header_or_short_response = response; + } + + // Switch states. + client->state = Client::SENDING_SHORT_RESPONSE; + change_epoll_events(client, EPOLLOUT | EPOLLET | EPOLLRDHUP); +} + +template +void delete_from(vector *v, T elem) +{ + typename vector::iterator new_end = remove(v->begin(), v->end(), elem); + v->erase(new_end, v->end()); +} + +void Server::close_client(Client *client) +{ + if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, nullptr) == -1) { + log_perror("epoll_ctl(EPOLL_CTL_DEL)"); + exit(1); + } + + // This client could be sleeping, so we'll need to fix that. (Argh, O(n).) + if (client->stream != nullptr) { + delete_from(&client->stream->sleeping_clients, client); + delete_from(&client->stream->to_process, client); + } + + if (client->tls_context) { + tls_destroy_context(client->tls_context); + } + + // Log to access_log. + access_log->write(client->get_stats()); + + // Bye-bye! + safe_close(client->sock); + + clients.erase(client->sock); +} + +void Server::change_epoll_events(Client *client, uint32_t events) +{ + epoll_event ev; + ev.events = events; + ev.data.ptr = client; + + if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) { + log_perror("epoll_ctl(EPOLL_CTL_MOD)"); + exit(1); + } +} + +void Server::process_queued_data() +{ + { + lock_guard lock(queued_clients_mutex); + + for (const pair &id_and_acceptor : queued_add_clients) { + add_client(id_and_acceptor.first, id_and_acceptor.second); + } + queued_add_clients.clear(); + } + + for (unique_ptr &stream : streams) { + stream->process_queued_data(); + } }