3 #include <netinet/in.h>
10 #include <sys/sendfile.h>
11 #include <sys/socket.h>
12 #include <sys/types.h>
20 #include "accesslog.h"
22 #include "metacube2.h"
23 #include "mutexlock.h"
30 #ifndef SO_MAX_PACING_RATE
31 #define SO_MAX_PACING_RATE 47
36 extern AccessLogThread *access_log;
40 inline bool is_equal(timespec a, timespec b)
42 return a.tv_sec == b.tv_sec &&
43 a.tv_nsec == b.tv_nsec;
46 inline bool is_earlier(timespec a, timespec b)
48 if (a.tv_sec != b.tv_sec)
49 return a.tv_sec < b.tv_sec;
50 return a.tv_nsec < b.tv_nsec;
57 pthread_mutex_init(&mutex, NULL);
58 pthread_mutex_init(&queued_clients_mutex, NULL);
60 epoll_fd = epoll_create(1024); // Size argument is ignored.
62 log_perror("epoll_fd");
69 for (size_t i = 0; i < streams.size(); ++i) {
76 vector<ClientStats> Server::get_client_stats() const
78 vector<ClientStats> ret;
80 MutexLock lock(&mutex);
81 for (map<int, Client>::const_iterator client_it = clients.begin();
82 client_it != clients.end();
84 ret.push_back(client_it->second.get_stats());
89 void Server::do_work()
91 while (!should_stop()) {
92 // Wait until there's activity on at least one of the fds,
93 // or 20 ms (about one frame at 50 fps) has elapsed.
95 // We could in theory wait forever and rely on wakeup()
96 // from add_client_deferred() and add_data_deferred(),
97 // but wakeup is a pretty expensive operation, and the
98 // two threads might end up fighting over a lock, so it's
99 // seemingly (much) more efficient to just have a timeout here.
100 int nfds = epoll_pwait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS, &sigset_without_usr1_block);
101 if (nfds == -1 && errno != EINTR) {
102 log_perror("epoll_wait");
106 MutexLock lock(&mutex); // We release the mutex between iterations.
108 process_queued_data();
110 // Process each client where we have socket activity.
111 for (int i = 0; i < nfds; ++i) {
112 Client *client = reinterpret_cast<Client *>(events[i].data.u64);
114 if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
115 close_client(client);
119 process_client(client);
122 // Process each client where its stream has new data,
123 // even if there was no socket activity.
124 for (size_t i = 0; i < streams.size(); ++i) {
125 vector<Client *> to_process;
126 swap(streams[i]->to_process, to_process);
127 for (size_t i = 0; i < to_process.size(); ++i) {
128 process_client(to_process[i]);
132 // Finally, go through each client to see if it's timed out
133 // in the READING_REQUEST state. (Seemingly there are clients
134 // that can hold sockets up for days at a time without sending
136 timespec timeout_time;
137 if (clock_gettime(CLOCK_MONOTONIC_COARSE, &timeout_time) == -1) {
138 log_perror("clock_gettime(CLOCK_MONOTONIC_COARSE)");
141 timeout_time.tv_sec -= REQUEST_READ_TIMEOUT_SEC;
142 while (!clients_ordered_by_connect_time.empty()) {
143 const pair<timespec, int> &connect_time_and_fd = clients_ordered_by_connect_time.front();
145 // See if we have reached the end of clients to process.
146 if (is_earlier(timeout_time, connect_time_and_fd.first)) {
150 // If this client doesn't exist anymore, just ignore it
151 // (it was deleted earlier).
152 map<int, Client>::iterator client_it = clients.find(connect_time_and_fd.second);
153 if (client_it == clients.end()) {
154 clients_ordered_by_connect_time.pop();
157 Client *client = &client_it->second;
158 if (!is_equal(client->connect_time, connect_time_and_fd.first)) {
159 // Another client has taken this fd in the meantime.
160 clients_ordered_by_connect_time.pop();
164 if (client->state != Client::READING_REQUEST) {
165 // Only READING_REQUEST can time out.
166 clients_ordered_by_connect_time.pop();
171 close_client(client);
172 clients_ordered_by_connect_time.pop();
177 CubemapStateProto Server::serialize()
179 // We don't serialize anything queued, so empty the queues.
180 process_queued_data();
182 // Set all clients in a consistent state before serializing
183 // (ie., they have no remaining lost data). Otherwise, increasing
184 // the backlog could take clients into a newly valid area of the backlog,
185 // sending a stream of zeros instead of skipping the data as it should.
187 // TODO: Do this when clients are added back from serialized state instead;
188 // it would probably be less wasteful.
189 for (map<int, Client>::iterator client_it = clients.begin();
190 client_it != clients.end();
192 skip_lost_data(&client_it->second);
195 CubemapStateProto serialized;
196 for (map<int, Client>::const_iterator client_it = clients.begin();
197 client_it != clients.end();
199 serialized.add_clients()->MergeFrom(client_it->second.serialize());
201 for (size_t i = 0; i < streams.size(); ++i) {
202 serialized.add_streams()->MergeFrom(streams[i]->serialize());
207 void Server::add_client_deferred(int sock)
209 MutexLock lock(&queued_clients_mutex);
210 queued_add_clients.push_back(sock);
213 void Server::add_client(int sock)
215 pair<map<int, Client>::iterator, bool> ret =
216 clients.insert(make_pair(sock, Client(sock)));
217 assert(ret.second == true); // Should not already exist.
218 Client *client_ptr = &ret.first->second;
220 // Connection timestamps must be nondecreasing. I can't find any guarantee
221 // that even the monotonic clock can't go backwards by a small amount
222 // (think switching between CPUs with non-synchronized TSCs), so if
223 // this actually should happen, we hack around it by fudging
225 if (!clients_ordered_by_connect_time.empty() &&
226 is_earlier(client_ptr->connect_time, clients_ordered_by_connect_time.back().first)) {
227 client_ptr->connect_time = clients_ordered_by_connect_time.back().first;
229 clients_ordered_by_connect_time.push(make_pair(client_ptr->connect_time, sock));
231 // Start listening on data from this socket.
233 ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
234 ev.data.u64 = reinterpret_cast<uint64_t>(client_ptr);
235 if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
236 log_perror("epoll_ctl(EPOLL_CTL_ADD)");
240 process_client(client_ptr);
243 void Server::add_client_from_serialized(const ClientProto &client)
245 MutexLock lock(&mutex);
247 int stream_index = lookup_stream_by_url(client.url());
248 if (stream_index == -1) {
249 assert(client.state() != Client::SENDING_DATA);
252 stream = streams[stream_index];
254 pair<map<int, Client>::iterator, bool> ret =
255 clients.insert(make_pair(client.sock(), Client(client, stream)));
256 assert(ret.second == true); // Should not already exist.
257 Client *client_ptr = &ret.first->second;
259 // Connection timestamps must be nondecreasing.
260 assert(clients_ordered_by_connect_time.empty() ||
261 !is_earlier(client_ptr->connect_time, clients_ordered_by_connect_time.back().first));
262 clients_ordered_by_connect_time.push(make_pair(client_ptr->connect_time, client.sock()));
264 // Start listening on data from this socket.
266 if (client.state() == Client::READING_REQUEST) {
267 ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
269 // If we don't have more data for this client, we'll be putting it into
270 // the sleeping array again soon.
271 ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
273 ev.data.u64 = reinterpret_cast<uint64_t>(client_ptr);
274 if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client.sock(), &ev) == -1) {
275 log_perror("epoll_ctl(EPOLL_CTL_ADD)");
279 if (client_ptr->state == Client::WAITING_FOR_KEYFRAME ||
280 client_ptr->state == Client::PREBUFFERING ||
281 (client_ptr->state == Client::SENDING_DATA &&
282 client_ptr->stream_pos == client_ptr->stream->bytes_received)) {
283 client_ptr->stream->put_client_to_sleep(client_ptr);
285 process_client(client_ptr);
289 int Server::lookup_stream_by_url(const string &url) const
291 map<string, int>::const_iterator url_it = url_map.find(url);
292 if (url_it == url_map.end()) {
295 return url_it->second;
298 int Server::add_stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Stream::Encoding encoding)
300 MutexLock lock(&mutex);
301 url_map.insert(make_pair(url, streams.size()));
302 streams.push_back(new Stream(url, backlog_size, prebuffering_bytes, encoding));
303 return streams.size() - 1;
306 int Server::add_stream_from_serialized(const StreamProto &stream, int data_fd)
308 MutexLock lock(&mutex);
309 url_map.insert(make_pair(stream.url(), streams.size()));
310 streams.push_back(new Stream(stream, data_fd));
311 return streams.size() - 1;
314 void Server::set_backlog_size(int stream_index, size_t new_size)
316 MutexLock lock(&mutex);
317 assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
318 streams[stream_index]->set_backlog_size(new_size);
321 void Server::set_prebuffering_bytes(int stream_index, size_t new_amount)
323 MutexLock lock(&mutex);
324 assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
325 streams[stream_index]->prebuffering_bytes = new_amount;
328 void Server::set_encoding(int stream_index, Stream::Encoding encoding)
330 MutexLock lock(&mutex);
331 assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
332 streams[stream_index]->encoding = encoding;
335 void Server::set_header(int stream_index, const string &http_header, const string &stream_header)
337 MutexLock lock(&mutex);
338 assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
339 streams[stream_index]->http_header = http_header;
341 if (stream_header != streams[stream_index]->stream_header) {
342 // We cannot start at any of the older starting points anymore,
343 // since they'd get the wrong header for the stream (not to mention
344 // that a changed header probably means the stream restarted,
345 // which means any client starting on the old one would probably
346 // stop playing properly at the change point). Next block
347 // should be a suitable starting point (if not, something is
348 // pretty strange), so it will fill up again soon enough.
349 streams[stream_index]->suitable_starting_points.clear();
351 streams[stream_index]->stream_header = stream_header;
354 void Server::set_pacing_rate(int stream_index, uint32_t pacing_rate)
356 MutexLock lock(&mutex);
357 assert(clients.empty());
358 assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
359 streams[stream_index]->pacing_rate = pacing_rate;
362 void Server::add_data_deferred(int stream_index, const char *data, size_t bytes, StreamStartSuitability suitable_for_stream_start)
364 assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
365 streams[stream_index]->add_data_deferred(data, bytes, suitable_for_stream_start);
368 // See the .h file for postconditions after this function.
369 void Server::process_client(Client *client)
371 switch (client->state) {
372 case Client::READING_REQUEST: {
374 // Try to read more of the request.
378 ret = read(client->sock, buf, sizeof(buf));
379 } while (ret == -1 && errno == EINTR);
381 if (ret == -1 && errno == EAGAIN) {
382 // No more data right now. Nothing to do.
383 // This is postcondition #2.
388 close_client(client);
392 // OK, the socket is closed.
393 close_client(client);
397 RequestParseStatus status = wait_for_double_newline(&client->request, buf, ret);
400 case RP_OUT_OF_SPACE:
401 log(WARNING, "[%s] Client sent overlong request!", client->remote_addr.c_str());
402 close_client(client);
404 case RP_NOT_FINISHED_YET:
405 // OK, we don't have the entire header yet. Fine; we'll get it later.
406 // See if there's more data for us.
407 goto read_request_again;
409 log(WARNING, "[%s] Junk data after request!", client->remote_addr.c_str());
410 close_client(client);
416 assert(status == RP_FINISHED);
418 int error_code = parse_request(client);
419 if (error_code == 200) {
420 construct_header(client);
422 construct_error(client, error_code);
425 // We've changed states, so fall through.
426 assert(client->state == Client::SENDING_SHORT_RESPONSE ||
427 client->state == Client::SENDING_HEADER);
429 case Client::SENDING_SHORT_RESPONSE:
430 case Client::SENDING_HEADER: {
431 sending_header_or_short_response_again:
434 ret = write(client->sock,
435 client->header_or_short_response.data() + client->header_or_short_response_bytes_sent,
436 client->header_or_short_response.size() - client->header_or_short_response_bytes_sent);
437 } while (ret == -1 && errno == EINTR);
439 if (ret == -1 && errno == EAGAIN) {
440 // We're out of socket space, so now we're at the “low edge” of epoll's
441 // edge triggering. epoll will tell us when there is more room, so for now,
443 // This is postcondition #4.
448 // Error! Postcondition #1.
450 close_client(client);
454 client->header_or_short_response_bytes_sent += ret;
455 assert(client->header_or_short_response_bytes_sent <= client->header_or_short_response.size());
457 if (client->header_or_short_response_bytes_sent < client->header_or_short_response.size()) {
458 // We haven't sent all yet. Fine; go another round.
459 goto sending_header_or_short_response_again;
462 // We're done sending the header or error! Clear it to release some memory.
463 client->header_or_short_response.clear();
465 if (client->state == Client::SENDING_SHORT_RESPONSE) {
466 // We're done sending the error, so now close.
467 // This is postcondition #1.
468 close_client(client);
472 Stream *stream = client->stream;
473 if (client->stream_pos == size_t(-2)) {
474 // Start sending from the beginning of the backlog.
475 client->stream_pos = min<size_t>(
476 stream->bytes_received - stream->backlog_size,
478 client->state = Client::SENDING_DATA;
480 } else if (stream->prebuffering_bytes == 0) {
481 // Start sending from the first keyframe we get. In other
482 // words, we won't send any of the backlog, but we'll start
483 // sending immediately as we get the next keyframe block.
484 // Note that this is functionally identical to the next if branch,
485 // except that we save a binary search.
486 client->stream_pos = stream->bytes_received;
487 client->state = Client::WAITING_FOR_KEYFRAME;
489 // We're not going to send anything to the client before we have
490 // N bytes. However, this wait might be boring; we can just as well
491 // use it to send older data if we have it. We use lower_bound()
492 // so that we are conservative and never add extra latency over just
493 // waiting (assuming CBR or nearly so); otherwise, we could want e.g.
494 // 100 kB prebuffer but end up sending a 10 MB GOP.
495 deque<size_t>::const_iterator starting_point_it =
496 lower_bound(stream->suitable_starting_points.begin(),
497 stream->suitable_starting_points.end(),
498 stream->bytes_received - stream->prebuffering_bytes);
499 if (starting_point_it == stream->suitable_starting_points.end()) {
500 // None found. Just put us at the end, and then wait for the
501 // first keyframe to appear.
502 client->stream_pos = stream->bytes_received;
503 client->state = Client::WAITING_FOR_KEYFRAME;
505 client->stream_pos = *starting_point_it;
506 client->state = Client::PREBUFFERING;
512 case Client::WAITING_FOR_KEYFRAME: {
513 Stream *stream = client->stream;
514 if (stream->suitable_starting_points.empty() ||
515 client->stream_pos > stream->suitable_starting_points.back()) {
516 // We haven't received a keyframe since this stream started waiting,
517 // so keep on waiting for one.
518 // This is postcondition #3.
519 stream->put_client_to_sleep(client);
522 client->stream_pos = stream->suitable_starting_points.back();
523 client->state = Client::PREBUFFERING;
526 case Client::PREBUFFERING: {
528 Stream *stream = client->stream;
529 size_t bytes_to_send = stream->bytes_received - client->stream_pos;
530 assert(bytes_to_send <= stream->backlog_size);
531 if (bytes_to_send < stream->prebuffering_bytes) {
532 // We don't have enough bytes buffered to start this client yet.
533 // This is postcondition #3.
534 stream->put_client_to_sleep(client);
537 client->state = Client::SENDING_DATA;
540 case Client::SENDING_DATA: {
542 skip_lost_data(client);
543 Stream *stream = client->stream;
546 size_t bytes_to_send = stream->bytes_received - client->stream_pos;
547 assert(bytes_to_send <= stream->backlog_size);
548 if (bytes_to_send == 0) {
552 // See if we need to split across the circular buffer.
553 bool more_data = false;
554 if ((client->stream_pos % stream->backlog_size) + bytes_to_send > stream->backlog_size) {
555 bytes_to_send = stream->backlog_size - (client->stream_pos % stream->backlog_size);
561 off_t offset = client->stream_pos % stream->backlog_size;
562 ret = sendfile(client->sock, stream->data_fd, &offset, bytes_to_send);
563 } while (ret == -1 && errno == EINTR);
565 if (ret == -1 && errno == EAGAIN) {
566 // We're out of socket space, so return; epoll will wake us up
567 // when there is more room.
568 // This is postcondition #4.
572 // Error, close; postcondition #1.
573 log_perror("sendfile");
574 close_client(client);
577 client->stream_pos += ret;
578 client->bytes_sent += ret;
580 if (client->stream_pos == stream->bytes_received) {
581 // We don't have any more data for this client, so put it to sleep.
582 // This is postcondition #3.
583 stream->put_client_to_sleep(client);
584 } else if (more_data && size_t(ret) == bytes_to_send) {
585 goto sending_data_again;
594 // See if there's some data we've lost. Ideally, we should drop to a block boundary,
595 // but resync will be the mux's problem.
596 void Server::skip_lost_data(Client *client)
598 Stream *stream = client->stream;
599 if (stream == NULL) {
602 size_t bytes_to_send = stream->bytes_received - client->stream_pos;
603 if (bytes_to_send > stream->backlog_size) {
604 size_t bytes_lost = bytes_to_send - stream->backlog_size;
605 client->stream_pos = stream->bytes_received - stream->backlog_size;
606 client->bytes_lost += bytes_lost;
607 ++client->num_loss_events;
611 int Server::parse_request(Client *client)
613 vector<string> lines = split_lines(client->request);
615 return 400; // Bad request (empty).
618 // Parse the headers, for logging purposes.
619 // TODO: Case-insensitivity.
620 multimap<string, string> headers = extract_headers(lines, client->remote_addr);
621 multimap<string, string>::const_iterator referer_it = headers.find("Referer");
622 if (referer_it != headers.end()) {
623 client->referer = referer_it->second;
625 multimap<string, string>::const_iterator user_agent_it = headers.find("User-Agent");
626 if (user_agent_it != headers.end()) {
627 client->user_agent = user_agent_it->second;
630 vector<string> request_tokens = split_tokens(lines[0]);
631 if (request_tokens.size() < 2) {
632 return 400; // Bad request (empty).
634 if (request_tokens[0] != "GET") {
635 return 400; // Should maybe be 405 instead?
638 string url = request_tokens[1];
639 if (url.find("?backlog") == url.size() - 8) {
640 client->stream_pos = -2;
641 url = url.substr(0, url.size() - 8);
643 client->stream_pos = -1;
646 map<string, int>::const_iterator url_map_it = url_map.find(url);
647 if (url_map_it == url_map.end()) {
648 return 404; // Not found.
651 Stream *stream = streams[url_map_it->second];
652 if (stream->http_header.empty()) {
653 return 503; // Service unavailable.
656 client->url = request_tokens[1];
658 client->stream = stream;
659 if (setsockopt(client->sock, SOL_SOCKET, SO_MAX_PACING_RATE, &client->stream->pacing_rate, sizeof(client->stream->pacing_rate)) == -1) {
660 if (client->stream->pacing_rate != ~0U) {
661 log_perror("setsockopt(SO_MAX_PACING_RATE)");
664 client->request.clear();
669 void Server::construct_header(Client *client)
671 Stream *stream = client->stream;
672 if (stream->encoding == Stream::STREAM_ENCODING_RAW) {
673 client->header_or_short_response = stream->http_header +
675 stream->stream_header;
676 } else if (stream->encoding == Stream::STREAM_ENCODING_METACUBE) {
677 client->header_or_short_response = stream->http_header +
678 "Content-encoding: metacube\r\n" +
680 if (!stream->stream_header.empty()) {
681 metacube2_block_header hdr;
682 memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync));
683 hdr.size = htonl(stream->stream_header.size());
684 hdr.flags = htons(METACUBE_FLAGS_HEADER);
685 hdr.csum = htons(metacube2_compute_crc(&hdr));
686 client->header_or_short_response.append(
687 string(reinterpret_cast<char *>(&hdr), sizeof(hdr)));
689 client->header_or_short_response.append(stream->stream_header);
695 client->state = Client::SENDING_HEADER;
698 ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
699 ev.data.u64 = reinterpret_cast<uint64_t>(client);
701 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
702 log_perror("epoll_ctl(EPOLL_CTL_MOD)");
707 void Server::construct_error(Client *client, int error_code)
710 snprintf(error, 256, "HTTP/1.0 %d Error\r\nContent-type: text/plain\r\n\r\nSomething went wrong. Sorry.\r\n",
712 client->header_or_short_response = error;
715 client->state = Client::SENDING_SHORT_RESPONSE;
718 ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
719 ev.data.u64 = reinterpret_cast<uint64_t>(client);
721 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
722 log_perror("epoll_ctl(EPOLL_CTL_MOD)");
728 void delete_from(vector<T> *v, T elem)
730 typename vector<T>::iterator new_end = remove(v->begin(), v->end(), elem);
731 v->erase(new_end, v->end());
734 void Server::close_client(Client *client)
736 if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, NULL) == -1) {
737 log_perror("epoll_ctl(EPOLL_CTL_DEL)");
741 // This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
742 if (client->stream != NULL) {
743 delete_from(&client->stream->sleeping_clients, client);
744 delete_from(&client->stream->to_process, client);
747 // Log to access_log.
748 access_log->write(client->get_stats());
751 safe_close(client->sock);
753 clients.erase(client->sock);
756 void Server::process_queued_data()
759 MutexLock lock(&queued_clients_mutex);
761 for (size_t i = 0; i < queued_add_clients.size(); ++i) {
762 add_client(queued_add_clients[i]);
764 queued_add_clients.clear();
767 for (size_t i = 0; i < streams.size(); ++i) {
768 streams[i]->process_queued_data();