+#include <assert.h>
+#include <errno.h>
+#include <netinet/in.h>
+#include <pthread.h>
+#include <stdint.h>
#include <stdio.h>
+#include <stdlib.h>
#include <string.h>
-#include <stdint.h>
-#include <assert.h>
-#include <arpa/inet.h>
-#include <curl/curl.h>
+#include <sys/epoll.h>
+#include <sys/sendfile.h>
#include <sys/socket.h>
-#include <pthread.h>
#include <sys/types.h>
-#include <sys/ioctl.h>
-#include <sys/epoll.h>
-#include <errno.h>
-#include <vector>
-#include <string>
-#include <map>
+#include <unistd.h>
#include <algorithm>
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
-#include "metacube.h"
-#include "server.h"
+#include "tlse.h"
+
+#include "acceptor.h"
+#include "accesslog.h"
+#include "log.h"
+#include "metacube2.h"
#include "mutexlock.h"
+#include "parse.h"
+#include "server.h"
+#include "state.pb.h"
+#include "stream.h"
+#include "util.h"
+
+#ifndef SO_MAX_PACING_RATE
+#define SO_MAX_PACING_RATE 47
+#endif
using namespace std;
-Client::Client(int sock)
- : state(Client::READING_REQUEST),
- header_bytes_sent(0),
- bytes_sent(0)
+extern AccessLogThread *access_log;
+
+namespace {
+
+inline bool is_equal(timespec a, timespec b)
{
- request.reserve(1024);
+ return a.tv_sec == b.tv_sec &&
+ a.tv_nsec == b.tv_nsec;
}
+inline bool is_earlier(timespec a, timespec b)
+{
+ if (a.tv_sec != b.tv_sec)
+ return a.tv_sec < b.tv_sec;
+ return a.tv_nsec < b.tv_nsec;
+}
+
+} // namespace
+
Server::Server()
{
pthread_mutex_init(&mutex, NULL);
+ pthread_mutex_init(&queued_clients_mutex, NULL);
epoll_fd = epoll_create(1024); // Size argument is ignored.
if (epoll_fd == -1) {
- perror("epoll_fd");
+ log_perror("epoll_fd");
exit(1);
}
}
-void Server::run()
+Server::~Server()
{
- should_stop = false;
-
- // Joinable is already the default, but it's good to be certain.
- pthread_attr_t attr;
- pthread_attr_init(&attr);
- pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
- pthread_create(&worker_thread, &attr, Server::do_work_thunk, this);
-}
-
-void Server::stop()
-{
- {
- MutexLock lock(&mutex);
- should_stop = true;
+ for (size_t i = 0; i < streams.size(); ++i) {
+ delete streams[i];
}
- if (pthread_join(worker_thread, NULL) == -1) {
- perror("pthread_join");
- exit(1);
- }
+ safe_close(epoll_fd);
}
-void *Server::do_work_thunk(void *arg)
+vector<ClientStats> Server::get_client_stats() const
{
- Server *server = static_cast<Server *>(arg);
- server->do_work();
- return NULL;
+ vector<ClientStats> ret;
+
+ MutexLock lock(&mutex);
+ for (map<int, Client>::const_iterator client_it = clients.begin();
+ client_it != clients.end();
+ ++client_it) {
+ ret.push_back(client_it->second.get_stats());
+ }
+ return ret;
}
void Server::do_work()
{
- for ( ;; ) {
- int nfds = epoll_wait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS);
- if (nfds == -1) {
- perror("epoll_wait");
+ while (!should_stop()) {
+ // Wait until there's activity on at least one of the fds,
+ // or 20 ms (about one frame at 50 fps) has elapsed.
+ //
+ // We could in theory wait forever and rely on wakeup()
+ // from add_client_deferred() and add_data_deferred(),
+ // but wakeup is a pretty expensive operation, and the
+ // two threads might end up fighting over a lock, so it's
+ // seemingly (much) more efficient to just have a timeout here.
+ int nfds = epoll_pwait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS, &sigset_without_usr1_block);
+ if (nfds == -1 && errno != EINTR) {
+ log_perror("epoll_wait");
exit(1);
}
MutexLock lock(&mutex); // We release the mutex between iterations.
- if (should_stop) {
- return;
- }
-
+ process_queued_data();
+
+ // Process each client where we have socket activity.
for (int i = 0; i < nfds; ++i) {
- int fd = events[i].data.fd;
- assert(clients.count(fd) != 0);
- Client *client = &clients[fd];
+ Client *client = reinterpret_cast<Client *>(events[i].data.ptr);
if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
close_client(client);
process_client(client);
}
+
+ // Process each client where its stream has new data,
+ // even if there was no socket activity.
+ for (size_t i = 0; i < streams.size(); ++i) {
+ vector<Client *> to_process;
+ swap(streams[i]->to_process, to_process);
+ for (size_t i = 0; i < to_process.size(); ++i) {
+ process_client(to_process[i]);
+ }
+ }
+
+ // Finally, go through each client to see if it's timed out
+ // in the READING_REQUEST state. (Seemingly there are clients
+ // that can hold sockets up for days at a time without sending
+ // anything at all.)
+ timespec timeout_time;
+ if (clock_gettime(CLOCK_MONOTONIC_COARSE, &timeout_time) == -1) {
+ log_perror("clock_gettime(CLOCK_MONOTONIC_COARSE)");
+ continue;
+ }
+ timeout_time.tv_sec -= REQUEST_READ_TIMEOUT_SEC;
+ while (!clients_ordered_by_connect_time.empty()) {
+ const pair<timespec, int> &connect_time_and_fd = clients_ordered_by_connect_time.front();
+
+ // See if we have reached the end of clients to process.
+ if (is_earlier(timeout_time, connect_time_and_fd.first)) {
+ break;
+ }
+
+ // If this client doesn't exist anymore, just ignore it
+ // (it was deleted earlier).
+ map<int, Client>::iterator client_it = clients.find(connect_time_and_fd.second);
+ if (client_it == clients.end()) {
+ clients_ordered_by_connect_time.pop();
+ continue;
+ }
+ Client *client = &client_it->second;
+ if (!is_equal(client->connect_time, connect_time_and_fd.first)) {
+ // Another client has taken this fd in the meantime.
+ clients_ordered_by_connect_time.pop();
+ continue;
+ }
+
+ if (client->state != Client::READING_REQUEST) {
+ // Only READING_REQUEST can time out.
+ clients_ordered_by_connect_time.pop();
+ continue;
+ }
+
+ // OK, it timed out.
+ close_client(client);
+ clients_ordered_by_connect_time.pop();
+ }
}
}
-
-void Server::add_client(int sock)
+
+CubemapStateProto Server::serialize()
{
- MutexLock lock(&mutex);
- clients.insert(make_pair(sock, Client(sock)));
+ // We don't serialize anything queued, so empty the queues.
+ process_queued_data();
+
+ // Set all clients in a consistent state before serializing
+ // (ie., they have no remaining lost data). Otherwise, increasing
+ // the backlog could take clients into a newly valid area of the backlog,
+ // sending a stream of zeros instead of skipping the data as it should.
+ //
+ // TODO: Do this when clients are added back from serialized state instead;
+ // it would probably be less wasteful.
+ for (map<int, Client>::iterator client_it = clients.begin();
+ client_it != clients.end();
+ ++client_it) {
+ skip_lost_data(&client_it->second);
+ }
+
+ CubemapStateProto serialized;
+ for (map<int, Client>::const_iterator client_it = clients.begin();
+ client_it != clients.end();
+ ++client_it) {
+ serialized.add_clients()->MergeFrom(client_it->second.serialize());
+ }
+ for (size_t i = 0; i < streams.size(); ++i) {
+ serialized.add_streams()->MergeFrom(streams[i]->serialize());
+ }
+ return serialized;
+}
+
+void Server::add_client_deferred(int sock, Acceptor *acceptor)
+{
+ MutexLock lock(&queued_clients_mutex);
+ queued_add_clients.push_back(std::make_pair(sock, acceptor));
+}
+
+void Server::add_client(int sock, Acceptor *acceptor)
+{
+ const bool is_tls = acceptor->is_tls();
+ pair<map<int, Client>::iterator, bool> ret =
+ clients.insert(make_pair(sock, Client(sock)));
+ assert(ret.second == true); // Should not already exist.
+ Client *client_ptr = &ret.first->second;
+
+ // Connection timestamps must be nondecreasing. I can't find any guarantee
+ // that even the monotonic clock can't go backwards by a small amount
+ // (think switching between CPUs with non-synchronized TSCs), so if
+ // this actually should happen, we hack around it by fudging
+ // connect_time.
+ if (!clients_ordered_by_connect_time.empty() &&
+ is_earlier(client_ptr->connect_time, clients_ordered_by_connect_time.back().first)) {
+ client_ptr->connect_time = clients_ordered_by_connect_time.back().first;
+ }
+ clients_ordered_by_connect_time.push(make_pair(client_ptr->connect_time, sock));
// Start listening on data from this socket.
epoll_event ev;
- ev.events = EPOLLIN | EPOLLRDHUP;
- ev.data.fd = sock;
+ if (is_tls) {
+ // Even in the initial state (READING_REQUEST), TLS needs to
+ // send data for the handshake, and thus might end up needing
+ // to know about EPOLLOUT.
+ ev.events = EPOLLIN | EPOLLOUT | EPOLLET | EPOLLRDHUP;
+ } else {
+ // EPOLLOUT will be added once we go out of READING_REQUEST.
+ ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
+ }
+ ev.data.ptr = client_ptr;
if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
- perror("epoll_ctl(EPOLL_CTL_ADD)");
+ log_perror("epoll_ctl(EPOLL_CTL_ADD)");
exit(1);
}
+
+ if (is_tls) {
+ assert(tls_server_contexts.count(acceptor));
+ client_ptr->tls_context = tls_accept(tls_server_contexts[acceptor]);
+ if (client_ptr->tls_context == NULL) {
+ log(ERROR, "tls_accept() failed");
+ close_client(client_ptr);
+ return;
+ }
+ tls_make_exportable(client_ptr->tls_context, 1);
+ }
+
+ process_client(client_ptr);
+}
+
+void Server::add_client_from_serialized(const ClientProto &client)
+{
+ MutexLock lock(&mutex);
+ Stream *stream;
+ int stream_index = lookup_stream_by_url(client.url());
+ if (stream_index == -1) {
+ assert(client.state() != Client::SENDING_DATA);
+ stream = NULL;
+ } else {
+ stream = streams[stream_index];
+ }
+ pair<map<int, Client>::iterator, bool> ret =
+ clients.insert(make_pair(client.sock(), Client(client, stream)));
+ assert(ret.second == true); // Should not already exist.
+ Client *client_ptr = &ret.first->second;
+
+ // Connection timestamps must be nondecreasing.
+ assert(clients_ordered_by_connect_time.empty() ||
+ !is_earlier(client_ptr->connect_time, clients_ordered_by_connect_time.back().first));
+ clients_ordered_by_connect_time.push(make_pair(client_ptr->connect_time, client.sock()));
+
+ // Start listening on data from this socket.
+ epoll_event ev;
+ if (client.state() == Client::READING_REQUEST) {
+ // See the corresponding comment in Server::add_client().
+ if (client.has_tls_context()) {
+ ev.events = EPOLLIN | EPOLLOUT | EPOLLET | EPOLLRDHUP;
+ } else {
+ ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
+ }
+ } else {
+ // If we don't have more data for this client, we'll be putting it into
+ // the sleeping array again soon.
+ ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
+ }
+ ev.data.ptr = client_ptr;
+ if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client.sock(), &ev) == -1) {
+ log_perror("epoll_ctl(EPOLL_CTL_ADD)");
+ exit(1);
+ }
+
+ if (client_ptr->state == Client::WAITING_FOR_KEYFRAME ||
+ client_ptr->state == Client::PREBUFFERING ||
+ (client_ptr->state == Client::SENDING_DATA &&
+ client_ptr->stream_pos == client_ptr->stream->bytes_received)) {
+ client_ptr->stream->put_client_to_sleep(client_ptr);
+ } else {
+ process_client(client_ptr);
+ }
+}
+
+int Server::lookup_stream_by_url(const string &url) const
+{
+ map<string, int>::const_iterator stream_url_it = stream_url_map.find(url);
+ if (stream_url_it == stream_url_map.end()) {
+ return -1;
+ }
+ return stream_url_it->second;
+}
+
+int Server::add_stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Stream::Encoding encoding, Stream::Encoding src_encoding)
+{
+ MutexLock lock(&mutex);
+ stream_url_map.insert(make_pair(url, streams.size()));
+ streams.push_back(new Stream(url, backlog_size, prebuffering_bytes, encoding, src_encoding));
+ return streams.size() - 1;
+}
+
+int Server::add_stream_from_serialized(const StreamProto &stream, int data_fd)
+{
+ MutexLock lock(&mutex);
+ stream_url_map.insert(make_pair(stream.url(), streams.size()));
+ streams.push_back(new Stream(stream, data_fd));
+ return streams.size() - 1;
}
-void Server::add_stream(const string &stream_id)
+void Server::set_backlog_size(int stream_index, size_t new_size)
{
MutexLock lock(&mutex);
- streams.insert(make_pair(stream_id, Stream()));
+ assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
+ streams[stream_index]->set_backlog_size(new_size);
+}
+
+void Server::set_prebuffering_bytes(int stream_index, size_t new_amount)
+{
+ MutexLock lock(&mutex);
+ assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
+ streams[stream_index]->prebuffering_bytes = new_amount;
}
-void Server::set_header(const string &stream_id, const string &header)
+void Server::set_encoding(int stream_index, Stream::Encoding encoding)
{
MutexLock lock(&mutex);
- assert(streams.count(stream_id) != 0);
- streams[stream_id].header = header;
+ assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
+ streams[stream_index]->encoding = encoding;
+}
+
+void Server::set_src_encoding(int stream_index, Stream::Encoding encoding)
+{
+ MutexLock lock(&mutex);
+ assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
+ streams[stream_index]->src_encoding = encoding;
}
-void Server::add_data(const string &stream_id, const char *data, size_t bytes)
+void Server::set_header(int stream_index, const string &http_header, const string &stream_header)
{
- if (bytes == 0) {
- return;
+ MutexLock lock(&mutex);
+ assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
+ streams[stream_index]->http_header = http_header;
+
+ if (stream_header != streams[stream_index]->stream_header) {
+ // We cannot start at any of the older starting points anymore,
+ // since they'd get the wrong header for the stream (not to mention
+ // that a changed header probably means the stream restarted,
+ // which means any client starting on the old one would probably
+ // stop playing properly at the change point). Next block
+ // should be a suitable starting point (if not, something is
+ // pretty strange), so it will fill up again soon enough.
+ streams[stream_index]->suitable_starting_points.clear();
}
+ streams[stream_index]->stream_header = stream_header;
+}
+
+void Server::set_pacing_rate(int stream_index, uint32_t pacing_rate)
+{
+ MutexLock lock(&mutex);
+ assert(clients.empty());
+ assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
+ streams[stream_index]->pacing_rate = pacing_rate;
+}
+void Server::add_gen204(const std::string &url, const std::string &allow_origin)
+{
MutexLock lock(&mutex);
- assert(streams.count(stream_id) != 0);
- Stream *stream = &streams[stream_id];
- size_t pos = stream->data_size % BACKLOG_SIZE;
- stream->data_size += bytes;
+ assert(clients.empty());
+ ping_url_map[url] = allow_origin;
+}
- if (pos + bytes > BACKLOG_SIZE) {
- size_t to_copy = BACKLOG_SIZE - pos;
- memcpy(stream->data + pos, data, to_copy);
- data += to_copy;
- bytes -= to_copy;
- pos = 0;
- }
+void Server::create_tls_context_for_acceptor(const Acceptor *acceptor)
+{
+ assert(acceptor->is_tls());
+
+ bool is_server = true;
+ TLSContext *server_context = tls_create_context(is_server, TLS_V12);
- memcpy(stream->data + pos, data, bytes);
- wake_up_all_clients();
+ const string &cert = acceptor->get_certificate_chain();
+ int num_cert = tls_load_certificates(server_context, reinterpret_cast<const unsigned char *>(cert.data()), cert.size());
+ assert(num_cert > 0); // Should have been checked by config earlier.
+
+ const string &key = acceptor->get_private_key();
+ int num_key = tls_load_private_key(server_context, reinterpret_cast<const unsigned char *>(key.data()), key.size());
+ assert(num_key > 0); // Should have been checked by config earlier.
+
+ tls_server_contexts.insert(make_pair(acceptor, server_context));
}
-
+
+void Server::add_data_deferred(int stream_index, const char *data, size_t bytes, uint16_t metacube_flags)
+{
+ assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
+ streams[stream_index]->add_data_deferred(data, bytes, metacube_flags);
+}
+
+// See the .h file for postconditions after this function.
void Server::process_client(Client *client)
{
switch (client->state) {
case Client::READING_REQUEST: {
+ if (client->tls_context != NULL) {
+ if (send_pending_tls_data(client)) {
+ // send_pending_tls_data() hit postconditions #1 or #4.
+ return;
+ }
+ }
+
+read_request_again:
// Try to read more of the request.
char buf[1024];
- int ret = read(client->sock, buf, sizeof(buf));
- if (ret == -1) {
- perror("read");
+ int ret;
+ if (client->tls_context == NULL) {
+ ret = read_nontls_data(client, buf, sizeof(buf));
+ if (ret == -1) {
+ // read_nontls_data() hit postconditions #1 or #2.
+ return;
+ }
+ } else {
+ ret = read_tls_data(client, buf, sizeof(buf));
+ if (ret == -1) {
+ // read_tls_data() hit postconditions #1, #2 or #4.
+ return;
+ }
+ }
+
+ RequestParseStatus status = wait_for_double_newline(&client->request, buf, ret);
+
+ switch (status) {
+ case RP_OUT_OF_SPACE:
+ log(WARNING, "[%s] Client sent overlong request!", client->remote_addr.c_str());
close_client(client);
return;
- }
- if (ret == 0) {
- // No data? This really means that we were triggered for something else than
- // POLLIN (which suggests a logic error in epoll).
- fprintf(stderr, "WARNING: fd %d returned unexpectedly 0 bytes!\n", client->sock);
+ case RP_NOT_FINISHED_YET:
+ // OK, we don't have the entire header yet. Fine; we'll get it later.
+ // See if there's more data for us.
+ goto read_request_again;
+ case RP_EXTRA_DATA:
+ log(WARNING, "[%s] Junk data after request!", client->remote_addr.c_str());
close_client(client);
return;
+ case RP_FINISHED:
+ break;
}
- // Guard against overlong requests gobbling up all of our space.
- if (client->request.size() + ret > MAX_CLIENT_REQUEST) {
- fprintf(stderr, "WARNING: fd %d sent overlong request!\n", client->sock);
- close_client(client);
- return;
- }
+ assert(status == RP_FINISHED);
- // See if we have \r\n\r\n anywhere in the request. We start three bytes
- // before what we just appended, in case we just got the final character.
- size_t existing_req_bytes = client->request.size();
- client->request.append(string(buf, buf + ret));
-
- size_t start_at = (existing_req_bytes >= 3 ? existing_req_bytes - 3 : 0);
- const char *ptr = reinterpret_cast<char *>(
- memmem(client->request.data() + start_at, client->request.size() - start_at,
- "\r\n\r\n", 4));
- if (ptr == NULL) {
- // OK, we don't have the entire header yet. Fine; we'll get it later.
- return;
+ if (client->tls_context && !client->in_ktls_mode && tls_established(client->tls_context)) {
+ // We're ready to enter kTLS mode, unless we still have some
+ // handshake data to send (which then must be sent as non-kTLS).
+ if (send_pending_tls_data(client)) {
+ // send_pending_tls_data() hit postconditions #1 or #4.
+ return;
+ }
+ ret = tls_make_ktls(client->tls_context, client->sock);
+ if (ret < 0) {
+ log_tls_error("tls_make_ktls", ret);
+ close_client(client);
+ return;
+ }
+ client->in_ktls_mode = true;
}
- if (ptr != client->request.data() + client->request.size() - 4) {
- fprintf(stderr, "WARNING: fd %d had junk data after request!\n", client->sock);
- close_client(client);
- return;
+ int error_code = parse_request(client);
+ if (error_code == 200) {
+ construct_header(client);
+ } else if (error_code == 204) {
+ construct_204(client);
+ } else {
+ construct_error(client, error_code);
}
- parse_request(client);
- break;
+ // We've changed states, so fall through.
+ assert(client->state == Client::SENDING_SHORT_RESPONSE ||
+ client->state == Client::SENDING_HEADER);
}
+ case Client::SENDING_SHORT_RESPONSE:
case Client::SENDING_HEADER: {
- int ret = write(client->sock,
- client->header.data() + client->header_bytes_sent,
- client->header.size() - client->header_bytes_sent);
+sending_header_or_short_response_again:
+ int ret;
+ do {
+ ret = write(client->sock,
+ client->header_or_short_response.data() + client->header_or_short_response_bytes_sent,
+ client->header_or_short_response.size() - client->header_or_short_response_bytes_sent);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1 && errno == EAGAIN) {
+ // We're out of socket space, so now we're at the “low edge” of epoll's
+ // edge triggering. epoll will tell us when there is more room, so for now,
+ // just return.
+ // This is postcondition #4.
+ return;
+ }
+
if (ret == -1) {
- perror("write");
+ // Error! Postcondition #1.
+ log_perror("write");
close_client(client);
return;
}
- client->header_bytes_sent += ret;
- assert(client->header_bytes_sent <= client->header.size());
+ client->header_or_short_response_bytes_sent += ret;
+ assert(client->header_or_short_response_bytes_sent <= client->header_or_short_response.size());
- if (client->header_bytes_sent < client->header.size()) {
- // We haven't sent all yet. Fine; we'll do that later.
- return;
+ if (client->header_or_short_response_bytes_sent < client->header_or_short_response.size()) {
+ // We haven't sent all yet. Fine; go another round.
+ goto sending_header_or_short_response_again;
}
- // We're done sending the header! Clear the entire header to release some memory.
- client->header.clear();
+ // We're done sending the header or error! Clear it to release some memory.
+ client->header_or_short_response.clear();
- // Start sending from the end. In other words, we won't send any of the backlog,
- // but we'll start sending immediately as we get data.
+ if (client->state == Client::SENDING_SHORT_RESPONSE) {
+ // We're done sending the error, so now close.
+ // This is postcondition #1.
+ close_client(client);
+ return;
+ }
+
+ Stream *stream = client->stream;
+ if (client->stream_pos == size_t(-2)) {
+ // Start sending from the beginning of the backlog.
+ client->stream_pos = min<size_t>(
+ stream->bytes_received - stream->backlog_size,
+ 0);
+ client->state = Client::SENDING_DATA;
+ goto sending_data;
+ } else if (stream->prebuffering_bytes == 0) {
+ // Start sending from the first keyframe we get. In other
+ // words, we won't send any of the backlog, but we'll start
+ // sending immediately as we get the next keyframe block.
+ // Note that this is functionally identical to the next if branch,
+ // except that we save a binary search.
+ client->stream_pos = stream->bytes_received;
+ client->state = Client::WAITING_FOR_KEYFRAME;
+ } else {
+ // We're not going to send anything to the client before we have
+ // N bytes. However, this wait might be boring; we can just as well
+ // use it to send older data if we have it. We use lower_bound()
+ // so that we are conservative and never add extra latency over just
+ // waiting (assuming CBR or nearly so); otherwise, we could want e.g.
+ // 100 kB prebuffer but end up sending a 10 MB GOP.
+ deque<size_t>::const_iterator starting_point_it =
+ lower_bound(stream->suitable_starting_points.begin(),
+ stream->suitable_starting_points.end(),
+ stream->bytes_received - stream->prebuffering_bytes);
+ if (starting_point_it == stream->suitable_starting_points.end()) {
+ // None found. Just put us at the end, and then wait for the
+ // first keyframe to appear.
+ client->stream_pos = stream->bytes_received;
+ client->state = Client::WAITING_FOR_KEYFRAME;
+ } else {
+ client->stream_pos = *starting_point_it;
+ client->state = Client::PREBUFFERING;
+ goto prebuffering;
+ }
+ }
+ // Fall through.
+ }
+ case Client::WAITING_FOR_KEYFRAME: {
+ Stream *stream = client->stream;
+ if (stream->suitable_starting_points.empty() ||
+ client->stream_pos > stream->suitable_starting_points.back()) {
+ // We haven't received a keyframe since this stream started waiting,
+ // so keep on waiting for one.
+ // This is postcondition #3.
+ stream->put_client_to_sleep(client);
+ return;
+ }
+ client->stream_pos = stream->suitable_starting_points.back();
+ client->state = Client::PREBUFFERING;
+ // Fall through.
+ }
+ case Client::PREBUFFERING: {
+prebuffering:
+ Stream *stream = client->stream;
+ size_t bytes_to_send = stream->bytes_received - client->stream_pos;
+ assert(bytes_to_send <= stream->backlog_size);
+ if (bytes_to_send < stream->prebuffering_bytes) {
+ // We don't have enough bytes buffered to start this client yet.
+ // This is postcondition #3.
+ stream->put_client_to_sleep(client);
+ return;
+ }
client->state = Client::SENDING_DATA;
- client->bytes_sent = streams[client->stream_id].data_size;
- break;
+ // Fall through.
}
case Client::SENDING_DATA: {
- // See if there's some data we've lost. Ideally, we should drop to a block boundary,
- // but resync will be the mux's problem.
- const Stream &stream = streams[client->stream_id];
- size_t bytes_to_send = stream.data_size - client->bytes_sent;
- if (bytes_to_send > BACKLOG_SIZE) {
- fprintf(stderr, "WARNING: fd %d lost %lld bytes, maybe too slow connection\n",
- client->sock,
- (long long int)(bytes_to_send - BACKLOG_SIZE));
- client->bytes_sent = streams[client->stream_id].data_size - BACKLOG_SIZE;
- bytes_to_send = BACKLOG_SIZE;
+sending_data:
+ skip_lost_data(client);
+ Stream *stream = client->stream;
+
+sending_data_again:
+ size_t bytes_to_send = stream->bytes_received - client->stream_pos;
+ assert(bytes_to_send <= stream->backlog_size);
+ if (bytes_to_send == 0) {
+ return;
}
// See if we need to split across the circular buffer.
- ssize_t ret;
- if ((client->bytes_sent % BACKLOG_SIZE) + bytes_to_send > BACKLOG_SIZE) {
- size_t bytes_first_part = BACKLOG_SIZE - (client->bytes_sent % BACKLOG_SIZE);
-
- iovec iov[2];
- iov[0].iov_base = const_cast<char *>(stream.data + (client->bytes_sent % BACKLOG_SIZE));
- iov[0].iov_len = bytes_first_part;
-
- iov[1].iov_base = const_cast<char *>(stream.data);
- iov[1].iov_len = bytes_to_send - bytes_first_part;
+ bool more_data = false;
+ if ((client->stream_pos % stream->backlog_size) + bytes_to_send > stream->backlog_size) {
+ bytes_to_send = stream->backlog_size - (client->stream_pos % stream->backlog_size);
+ more_data = true;
+ }
- ret = writev(client->sock, iov, 2);
- } else {
- ret = write(client->sock,
- stream.data + (client->bytes_sent % BACKLOG_SIZE),
- bytes_to_send);
+ ssize_t ret;
+ do {
+ off_t offset = client->stream_pos % stream->backlog_size;
+ ret = sendfile(client->sock, stream->data_fd, &offset, bytes_to_send);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1 && errno == EAGAIN) {
+ // We're out of socket space, so return; epoll will wake us up
+ // when there is more room.
+ // This is postcondition #4.
+ return;
}
if (ret == -1) {
- perror("write/writev");
+ // Error, close; postcondition #1.
+ log_perror("sendfile");
close_client(client);
return;
}
+ client->stream_pos += ret;
client->bytes_sent += ret;
- if (client->bytes_sent == stream.data_size) {
+ if (client->stream_pos == stream->bytes_received) {
// We don't have any more data for this client, so put it to sleep.
- put_client_to_sleep(client);
+ // This is postcondition #3.
+ stream->put_client_to_sleep(client);
+ } else if (more_data && size_t(ret) == bytes_to_send) {
+ goto sending_data_again;
}
break;
}
}
}
-void Server::parse_request(Client *client)
+bool Server::send_pending_tls_data(Client *client)
+{
+ // See if there's data from the TLS library to write.
+ if (client->tls_data_to_send == NULL) {
+ client->tls_data_to_send = tls_get_write_buffer(client->tls_context, &client->tls_data_left_to_send);
+ if (client->tls_data_to_send == NULL) {
+ // Really no data to send.
+ return false;
+ }
+ }
+
+send_data_again:
+ int ret;
+ do {
+ ret = write(client->sock, client->tls_data_to_send, client->tls_data_left_to_send);
+ } while (ret == -1 && errno == EINTR);
+ assert(ret < 0 || size_t(ret) <= client->tls_data_left_to_send);
+
+ if (ret == -1 && errno == EAGAIN) {
+ // We're out of socket space, so now we're at the “low edge” of epoll's
+ // edge triggering. epoll will tell us when there is more room, so for now,
+ // just return.
+ // This is postcondition #4.
+ return true;
+ }
+ if (ret == -1) {
+ // Error! Postcondition #1.
+ log_perror("write");
+ close_client(client);
+ return true;
+ }
+ if (ret > 0 && size_t(ret) == client->tls_data_left_to_send) {
+ // All data has been sent, so we don't need to go to sleep.
+ tls_buffer_clear(client->tls_context);
+ client->tls_data_to_send = NULL;
+ return false;
+ }
+
+ // More data to send, so try again.
+ client->tls_data_to_send += ret;
+ client->tls_data_left_to_send -= ret;
+ goto send_data_again;
+}
+
+int Server::read_nontls_data(Client *client, char *buf, size_t max_size)
{
- // TODO: Actually parse the request. :-)
- client->stream_id = "stream";
+ int ret;
+ do {
+ ret = read(client->sock, buf, max_size);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1 && errno == EAGAIN) {
+ // No more data right now. Nothing to do.
+ // This is postcondition #2.
+ return -1;
+ }
+ if (ret == -1) {
+ log_perror("read");
+ close_client(client);
+ return -1;
+ }
+ if (ret == 0) {
+ // OK, the socket is closed.
+ close_client(client);
+ return -1;
+ }
+
+ return ret;
+}
+
+int Server::read_tls_data(Client *client, char *buf, size_t max_size)
+{
+read_again:
+ int ret;
+ do {
+ ret = read(client->sock, buf, max_size);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1 && errno == EAGAIN) {
+ // No more data right now. Nothing to do.
+ // This is postcondition #2.
+ return -1;
+ }
+ if (ret == -1) {
+ log_perror("read");
+ close_client(client);
+ return -1;
+ }
+ if (ret == 0) {
+ // OK, the socket is closed.
+ close_client(client);
+ return -1;
+ }
+
+ // Give it to the TLS library.
+ int err = tls_consume_stream(client->tls_context, reinterpret_cast<const unsigned char *>(buf), ret, nullptr);
+ if (err < 0) {
+ log_tls_error("tls_consume_stream", err);
+ close_client(client);
+ return -1;
+ }
+ if (err == 0) {
+ // Not consumed any data. See if we can read more.
+ goto read_again;
+ }
+
+ // Read any decrypted data available for us. (We can reuse buf, since it's free now.)
+ ret = tls_read(client->tls_context, reinterpret_cast<unsigned char *>(buf), max_size);
+ if (ret == 0) {
+ // No decrypted data for us yet, but there might be some more handshaking
+ // to send. Do that if needed, then look for more data.
+ if (send_pending_tls_data(client)) {
+ // send_pending_tls_data() hit postconditions #1 or #4.
+ return -1;
+ }
+ goto read_again;
+ }
+ if (ret < 0) {
+ log_tls_error("tls_read", ret);
+ close_client(client);
+ return -1;
+ }
+
+ assert(ret > 0);
+ return ret;
+}
+
+// See if there's some data we've lost. Ideally, we should drop to a block boundary,
+// but resync will be the mux's problem.
+void Server::skip_lost_data(Client *client)
+{
+ Stream *stream = client->stream;
+ if (stream == NULL) {
+ return;
+ }
+ size_t bytes_to_send = stream->bytes_received - client->stream_pos;
+ if (bytes_to_send > stream->backlog_size) {
+ size_t bytes_lost = bytes_to_send - stream->backlog_size;
+ client->stream_pos = stream->bytes_received - stream->backlog_size;
+ client->bytes_lost += bytes_lost;
+ ++client->num_loss_events;
+ }
+}
+
+int Server::parse_request(Client *client)
+{
+ vector<string> lines = split_lines(client->request);
+ if (lines.empty()) {
+ return 400; // Bad request (empty).
+ }
+
+ // Parse the headers, for logging purposes.
+ // TODO: Case-insensitivity.
+ multimap<string, string> headers = extract_headers(lines, client->remote_addr);
+ multimap<string, string>::const_iterator referer_it = headers.find("Referer");
+ if (referer_it != headers.end()) {
+ client->referer = referer_it->second;
+ }
+ multimap<string, string>::const_iterator user_agent_it = headers.find("User-Agent");
+ if (user_agent_it != headers.end()) {
+ client->user_agent = user_agent_it->second;
+ }
+
+ vector<string> request_tokens = split_tokens(lines[0]);
+ if (request_tokens.size() < 2) {
+ return 400; // Bad request (empty).
+ }
+ if (request_tokens[0] != "GET") {
+ return 400; // Should maybe be 405 instead?
+ }
+
+ string url = request_tokens[1];
+ client->url = url;
+ if (url.size() > 8 && url.find("?backlog") == url.size() - 8) {
+ client->stream_pos = -2;
+ url = url.substr(0, url.size() - 8);
+ } else {
+ client->stream_pos = -1;
+ }
+
+ map<string, int>::const_iterator stream_url_map_it = stream_url_map.find(url);
+ if (stream_url_map_it == stream_url_map.end()) {
+ map<string, string>::const_iterator ping_url_map_it = ping_url_map.find(url);
+ if (ping_url_map_it == ping_url_map.end()) {
+ return 404; // Not found.
+ } else {
+ return 204; // No error.
+ }
+ }
+
+ Stream *stream = streams[stream_url_map_it->second];
+ if (stream->http_header.empty()) {
+ return 503; // Service unavailable.
+ }
+
+ client->stream = stream;
+ if (setsockopt(client->sock, SOL_SOCKET, SO_MAX_PACING_RATE, &client->stream->pacing_rate, sizeof(client->stream->pacing_rate)) == -1) {
+ if (client->stream->pacing_rate != ~0U) {
+ log_perror("setsockopt(SO_MAX_PACING_RATE)");
+ }
+ }
client->request.clear();
- // Construct the header.
- client->header = "HTTP/1.0 200 OK\r\n Content-type: video/x-flv\r\nCache-Control: no-cache\r\nContent-type: todo/fixme\r\n\r\n" +
- streams[client->stream_id].header;
+ return 200; // OK!
+}
+
+void Server::construct_header(Client *client)
+{
+ Stream *stream = client->stream;
+ if (stream->encoding == Stream::STREAM_ENCODING_RAW) {
+ client->header_or_short_response = stream->http_header +
+ "\r\n" +
+ stream->stream_header;
+ } else if (stream->encoding == Stream::STREAM_ENCODING_METACUBE) {
+ client->header_or_short_response = stream->http_header +
+ "Content-encoding: metacube\r\n" +
+ "\r\n";
+ if (!stream->stream_header.empty()) {
+ metacube2_block_header hdr;
+ memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync));
+ hdr.size = htonl(stream->stream_header.size());
+ hdr.flags = htons(METACUBE_FLAGS_HEADER);
+ hdr.csum = htons(metacube2_compute_crc(&hdr));
+ client->header_or_short_response.append(
+ string(reinterpret_cast<char *>(&hdr), sizeof(hdr)));
+ }
+ client->header_or_short_response.append(stream->stream_header);
+ } else {
+ assert(false);
+ }
// Switch states.
client->state = Client::SENDING_HEADER;
+ change_epoll_events(client, EPOLLOUT | EPOLLET | EPOLLRDHUP);
+}
+
+void Server::construct_error(Client *client, int error_code)
+{
+ char error[256];
+ snprintf(error, 256, "HTTP/1.0 %d Error\r\nContent-type: text/plain\r\n\r\nSomething went wrong. Sorry.\r\n",
+ error_code);
+ client->header_or_short_response = error;
- epoll_event ev;
- ev.events = EPOLLOUT | EPOLLRDHUP;
- ev.data.fd = client->sock;
+ // Switch states.
+ client->state = Client::SENDING_SHORT_RESPONSE;
+ change_epoll_events(client, EPOLLOUT | EPOLLET | EPOLLRDHUP);
+}
- if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
- perror("epoll_ctl(EPOLL_CTL_MOD)");
- exit(1);
+void Server::construct_204(Client *client)
+{
+ map<string, string>::const_iterator ping_url_map_it = ping_url_map.find(client->url);
+ assert(ping_url_map_it != ping_url_map.end());
+
+ if (ping_url_map_it->second.empty()) {
+ client->header_or_short_response =
+ "HTTP/1.0 204 No Content\r\n"
+ "\r\n";
+ } else {
+ char response[256];
+ snprintf(response, 256,
+ "HTTP/1.0 204 No Content\r\n"
+ "Access-Control-Allow-Origin: %s\r\n"
+ "\r\n",
+ ping_url_map_it->second.c_str());
+ client->header_or_short_response = response;
}
+
+ // Switch states.
+ client->state = Client::SENDING_SHORT_RESPONSE;
+ change_epoll_events(client, EPOLLOUT | EPOLLET | EPOLLRDHUP);
+}
+
+template<class T>
+void delete_from(vector<T> *v, T elem)
+{
+ typename vector<T>::iterator new_end = remove(v->begin(), v->end(), elem);
+ v->erase(new_end, v->end());
}
void Server::close_client(Client *client)
{
if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, NULL) == -1) {
- perror("epoll_ctl(EPOLL_CTL_DEL)");
+ log_perror("epoll_ctl(EPOLL_CTL_DEL)");
exit(1);
}
// This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
- vector<int>::iterator new_end =
- remove(sleeping_clients.begin(), sleeping_clients.end(), client->sock);
- sleeping_clients.erase(new_end, sleeping_clients.end());
-
+ if (client->stream != NULL) {
+ delete_from(&client->stream->sleeping_clients, client);
+ delete_from(&client->stream->to_process, client);
+ }
+
+ if (client->tls_context) {
+ tls_destroy_context(client->tls_context);
+ }
+
+ // Log to access_log.
+ access_log->write(client->get_stats());
+
// Bye-bye!
- close(client->sock);
+ safe_close(client->sock);
+
clients.erase(client->sock);
}
-
-void Server::put_client_to_sleep(Client *client)
+
+void Server::change_epoll_events(Client *client, uint32_t events)
{
epoll_event ev;
- ev.events = EPOLLRDHUP;
- ev.data.fd = client->sock;
+ ev.events = events;
+ ev.data.ptr = client;
if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
- perror("epoll_ctl(EPOLL_CTL_MOD)");
+ log_perror("epoll_ctl(EPOLL_CTL_MOD)");
exit(1);
}
-
- sleeping_clients.push_back(client->sock);
}
-void Server::wake_up_all_clients()
+void Server::process_queued_data()
{
- for (unsigned i = 0; i < sleeping_clients.size(); ++i) {
- epoll_event ev;
- ev.events = EPOLLOUT | EPOLLRDHUP;
- ev.data.fd = sleeping_clients[i];
- if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, sleeping_clients[i], &ev) == -1) {
- perror("epoll_ctl(EPOLL_CTL_MOD)");
- exit(1);
+ {
+ MutexLock lock(&queued_clients_mutex);
+
+ for (size_t i = 0; i < queued_add_clients.size(); ++i) {
+ add_client(queued_add_clients[i].first, queued_add_clients[i].second);
}
+ queued_add_clients.clear();
+ }
+
+ for (size_t i = 0; i < streams.size(); ++i) {
+ streams[i]->process_queued_data();
}
- sleeping_clients.clear();
}