extern AccessLogThread *access_log;
+namespace {
+
+inline bool is_equal(timespec a, timespec b)
+{
+ return a.tv_sec == b.tv_sec &&
+ a.tv_nsec == b.tv_nsec;
+}
+
+inline bool is_earlier(timespec a, timespec b)
+{
+ if (a.tv_sec != b.tv_sec)
+ return a.tv_sec < b.tv_sec;
+ return a.tv_nsec < b.tv_nsec;
+}
+
+} // namespace
+
Server::Server()
{
pthread_mutex_init(&mutex, NULL);
process_queued_data();
+ // Process each client where we have socket activity.
for (int i = 0; i < nfds; ++i) {
Client *client = reinterpret_cast<Client *>(events[i].data.u64);
process_client(client);
}
+ // Process each client where its stream has new data,
+ // even if there was no socket activity.
for (size_t i = 0; i < streams.size(); ++i) {
vector<Client *> to_process;
swap(streams[i]->to_process, to_process);
process_client(to_process[i]);
}
}
+
+ // Finally, go through each client to see if it's timed out
+ // in the READING_REQUEST state. (Seemingly there are clients
+ // that can hold sockets up for days at a time without sending
+ // anything at all.)
+ timespec timeout_time;
+ if (clock_gettime(CLOCK_MONOTONIC_COARSE, &timeout_time) == -1) {
+ log_perror("clock_gettime(CLOCK_MONOTONIC_COARSE)");
+ continue;
+ }
+ timeout_time.tv_sec -= REQUEST_READ_TIMEOUT_SEC;
+ while (!clients_ordered_by_connect_time.empty()) {
+ const pair<timespec, int> &connect_time_and_fd = clients_ordered_by_connect_time.front();
+
+ // See if we have reached the end of clients to process.
+ if (is_earlier(timeout_time, connect_time_and_fd.first)) {
+ break;
+ }
+
+ // If this client doesn't exist anymore, just ignore it
+ // (it was deleted earlier).
+ map<int, Client>::iterator client_it = clients.find(connect_time_and_fd.second);
+ if (client_it == clients.end()) {
+ clients_ordered_by_connect_time.pop();
+ continue;
+ }
+ Client *client = &client_it->second;
+ if (!is_equal(client->connect_time, connect_time_and_fd.first)) {
+ // Another client has taken this fd in the meantime.
+ clients_ordered_by_connect_time.pop();
+ continue;
+ }
+
+ if (client->state != Client::READING_REQUEST) {
+ // Only READING_REQUEST can time out.
+ clients_ordered_by_connect_time.pop();
+ continue;
+ }
+
+ // OK, it timed out.
+ close_client(client);
+ clients_ordered_by_connect_time.pop();
+ }
}
}
assert(ret.second == true); // Should not already exist.
Client *client_ptr = &ret.first->second;
+ // Connection timestamps must be nondecreasing. I can't find any guarantee
+ // that even the monotonic clock can't go backwards by a small amount
+ // (think switching between CPUs with non-synchronized TSCs), so if
+ // this actually should happen, we hack around it by fudging
+ // connect_time.
+ if (!clients_ordered_by_connect_time.empty() &&
+ is_earlier(client_ptr->connect_time, clients_ordered_by_connect_time.back().first)) {
+ client_ptr->connect_time = clients_ordered_by_connect_time.back().first;
+ }
+ clients_ordered_by_connect_time.push(make_pair(client_ptr->connect_time, sock));
+
// Start listening on data from this socket.
epoll_event ev;
ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
assert(ret.second == true); // Should not already exist.
Client *client_ptr = &ret.first->second;
+ // Connection timestamps must be nondecreasing.
+ assert(clients_ordered_by_connect_time.empty() ||
+ !is_earlier(client_ptr->connect_time, clients_ordered_by_connect_time.back().first));
+ clients_ordered_by_connect_time.push(make_pair(client_ptr->connect_time, client.sock()));
+
// Start listening on data from this socket.
epoll_event ev;
if (client.state() == Client::READING_REQUEST) {
}
if (client_ptr->state == Client::WAITING_FOR_KEYFRAME ||
+ client_ptr->state == Client::PREBUFFERING ||
(client_ptr->state == Client::SENDING_DATA &&
client_ptr->stream_pos == client_ptr->stream->bytes_received)) {
client_ptr->stream->put_client_to_sleep(client_ptr);
}
}
-int Server::lookup_stream_by_url(const std::string &url) const
+int Server::lookup_stream_by_url(const string &url) const
{
map<string, int>::const_iterator url_it = url_map.find(url);
if (url_it == url_map.end()) {
return url_it->second;
}
-int Server::add_stream(const string &url, size_t backlog_size, Stream::Encoding encoding)
+int Server::add_stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Stream::Encoding encoding)
{
MutexLock lock(&mutex);
url_map.insert(make_pair(url, streams.size()));
- streams.push_back(new Stream(url, backlog_size, encoding));
+ streams.push_back(new Stream(url, backlog_size, prebuffering_bytes, encoding));
return streams.size() - 1;
}
assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
streams[stream_index]->set_backlog_size(new_size);
}
+
+void Server::set_prebuffering_bytes(int stream_index, size_t new_amount)
+{
+ MutexLock lock(&mutex);
+ assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
+ streams[stream_index]->prebuffering_bytes = new_amount;
+}
void Server::set_encoding(int stream_index, Stream::Encoding encoding)
{
MutexLock lock(&mutex);
assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
streams[stream_index]->http_header = http_header;
+
+ if (stream_header != streams[stream_index]->stream_header) {
+ // We cannot start at any of the older starting points anymore,
+ // since they'd get the wrong header for the stream (not to mention
+ // that a changed header probably means the stream restarted,
+ // which means any client starting on the old one would probably
+ // stop playing properly at the change point). Next block
+ // should be a suitable starting point (if not, something is
+ // pretty strange), so it will fill up again soon enough.
+ streams[stream_index]->suitable_starting_points.clear();
+ }
streams[stream_index]->stream_header = stream_header;
}
return;
}
- // Start sending from the first keyframe we get. In other
- // words, we won't send any of the backlog, but we'll start
- // sending immediately as we get the next keyframe block.
- // This is postcondition #3.
+ Stream *stream = client->stream;
if (client->stream_pos == size_t(-2)) {
- client->stream_pos = std::min<size_t>(
- client->stream->bytes_received - client->stream->backlog_size,
+ // Start sending from the beginning of the backlog.
+ client->stream_pos = min<size_t>(
+ stream->bytes_received - stream->backlog_size,
0);
client->state = Client::SENDING_DATA;
- } else {
- // client->stream_pos should be -1, but it might not be,
- // if we have clients from an older version.
- client->stream_pos = client->stream->bytes_received;
+ goto sending_data;
+ } else if (stream->prebuffering_bytes == 0) {
+ // Start sending from the first keyframe we get. In other
+ // words, we won't send any of the backlog, but we'll start
+ // sending immediately as we get the next keyframe block.
+ // Note that this is functionally identical to the next if branch,
+ // except that we save a binary search.
+ client->stream_pos = stream->bytes_received;
client->state = Client::WAITING_FOR_KEYFRAME;
+ } else {
+ // We're not going to send anything to the client before we have
+ // N bytes. However, this wait might be boring; we can just as well
+ // use it to send older data if we have it. We use lower_bound()
+ // so that we are conservative and never add extra latency over just
+ // waiting (assuming CBR or nearly so); otherwise, we could want e.g.
+ // 100 kB prebuffer but end up sending a 10 MB GOP.
+ deque<size_t>::const_iterator starting_point_it =
+ lower_bound(stream->suitable_starting_points.begin(),
+ stream->suitable_starting_points.end(),
+ stream->bytes_received - stream->prebuffering_bytes);
+ if (starting_point_it == stream->suitable_starting_points.end()) {
+ // None found. Just put us at the end, and then wait for the
+ // first keyframe to appear.
+ client->stream_pos = stream->bytes_received;
+ client->state = Client::WAITING_FOR_KEYFRAME;
+ } else {
+ client->stream_pos = *starting_point_it;
+ client->state = Client::PREBUFFERING;
+ goto prebuffering;
+ }
}
- client->stream->put_client_to_sleep(client);
- return;
+ // Fall through.
}
case Client::WAITING_FOR_KEYFRAME: {
Stream *stream = client->stream;
- if (ssize_t(client->stream_pos) > stream->last_suitable_starting_point) {
+ if (stream->suitable_starting_points.empty() ||
+ client->stream_pos > stream->suitable_starting_points.back()) {
// We haven't received a keyframe since this stream started waiting,
// so keep on waiting for one.
// This is postcondition #3.
stream->put_client_to_sleep(client);
return;
}
- client->stream_pos = stream->last_suitable_starting_point;
+ client->stream_pos = stream->suitable_starting_points.back();
+ client->state = Client::PREBUFFERING;
+ // Fall through.
+ }
+ case Client::PREBUFFERING: {
+prebuffering:
+ Stream *stream = client->stream;
+ size_t bytes_to_send = stream->bytes_received - client->stream_pos;
+ assert(bytes_to_send <= stream->backlog_size);
+ if (bytes_to_send < stream->prebuffering_bytes) {
+ // We don't have enough bytes buffered to start this client yet.
+ // This is postcondition #3.
+ stream->put_client_to_sleep(client);
+ return;
+ }
client->state = Client::SENDING_DATA;
// Fall through.
}
case Client::SENDING_DATA: {
+sending_data:
skip_lost_data(client);
Stream *stream = client->stream;
return 400; // Bad request (empty).
}
+ // Parse the headers, for logging purposes.
+ // TODO: Case-insensitivity.
+ multimap<string, string> headers = extract_headers(lines, client->remote_addr);
+ multimap<string, string>::const_iterator referer_it = headers.find("Referer");
+ if (referer_it != headers.end()) {
+ client->referer = referer_it->second;
+ }
+ multimap<string, string>::const_iterator user_agent_it = headers.find("User-Agent");
+ if (user_agent_it != headers.end()) {
+ client->user_agent = user_agent_it->second;
+ }
+
vector<string> request_tokens = split_tokens(lines[0]);
if (request_tokens.size() < 2) {
return 400; // Bad request (empty).
}
client->url = request_tokens[1];
+
client->stream = stream;
if (setsockopt(client->sock, SOL_SOCKET, SO_MAX_PACING_RATE, &client->stream->pacing_rate, sizeof(client->stream->pacing_rate)) == -1) {
if (client->stream->pacing_rate != ~0U) {