3 #include <netinet/in.h>
10 #include <sys/sendfile.h>
11 #include <sys/socket.h>
12 #include <sys/types.h>
23 #include "accesslog.h"
25 #include "metacube2.h"
26 #include "mutexlock.h"
33 #ifndef SO_MAX_PACING_RATE
34 #define SO_MAX_PACING_RATE 47
39 extern AccessLogThread *access_log;
43 inline bool is_equal(timespec a, timespec b)
45 return a.tv_sec == b.tv_sec &&
46 a.tv_nsec == b.tv_nsec;
49 inline bool is_earlier(timespec a, timespec b)
51 if (a.tv_sec != b.tv_sec)
52 return a.tv_sec < b.tv_sec;
53 return a.tv_nsec < b.tv_nsec;
60 pthread_mutex_init(&mutex, NULL);
61 pthread_mutex_init(&queued_clients_mutex, NULL);
63 epoll_fd = epoll_create(1024); // Size argument is ignored.
65 log_perror("epoll_fd");
75 vector<ClientStats> Server::get_client_stats() const
77 vector<ClientStats> ret;
79 MutexLock lock(&mutex);
80 for (const auto &fd_and_client : clients) {
81 ret.push_back(fd_and_client.second.get_stats());
86 void Server::do_work()
88 while (!should_stop()) {
89 // Wait until there's activity on at least one of the fds,
90 // or 20 ms (about one frame at 50 fps) has elapsed.
92 // We could in theory wait forever and rely on wakeup()
93 // from add_client_deferred() and add_data_deferred(),
94 // but wakeup is a pretty expensive operation, and the
95 // two threads might end up fighting over a lock, so it's
96 // seemingly (much) more efficient to just have a timeout here.
97 int nfds = epoll_pwait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS, &sigset_without_usr1_block);
98 if (nfds == -1 && errno != EINTR) {
99 log_perror("epoll_wait");
103 MutexLock lock(&mutex); // We release the mutex between iterations.
105 process_queued_data();
107 // Process each client where we have socket activity.
108 for (int i = 0; i < nfds; ++i) {
109 Client *client = reinterpret_cast<Client *>(events[i].data.ptr);
111 if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
112 close_client(client);
116 process_client(client);
119 // Process each client where its stream has new data,
120 // even if there was no socket activity.
121 for (unique_ptr<Stream> &stream : streams) {
122 vector<Client *> to_process;
123 swap(stream->to_process, to_process);
124 for (Client *client : to_process) {
125 process_client(client);
129 // Finally, go through each client to see if it's timed out
130 // in the READING_REQUEST state. (Seemingly there are clients
131 // that can hold sockets up for days at a time without sending
133 timespec timeout_time;
134 if (clock_gettime(CLOCK_MONOTONIC_COARSE, &timeout_time) == -1) {
135 log_perror("clock_gettime(CLOCK_MONOTONIC_COARSE)");
138 timeout_time.tv_sec -= REQUEST_READ_TIMEOUT_SEC;
139 while (!clients_ordered_by_connect_time.empty()) {
140 const pair<timespec, int> &connect_time_and_fd = clients_ordered_by_connect_time.front();
142 // See if we have reached the end of clients to process.
143 if (is_earlier(timeout_time, connect_time_and_fd.first)) {
147 // If this client doesn't exist anymore, just ignore it
148 // (it was deleted earlier).
149 auto client_it = clients.find(connect_time_and_fd.second);
150 if (client_it == clients.end()) {
151 clients_ordered_by_connect_time.pop();
154 Client *client = &client_it->second;
155 if (!is_equal(client->connect_time, connect_time_and_fd.first)) {
156 // Another client has taken this fd in the meantime.
157 clients_ordered_by_connect_time.pop();
161 if (client->state != Client::READING_REQUEST) {
162 // Only READING_REQUEST can time out.
163 clients_ordered_by_connect_time.pop();
168 close_client(client);
169 clients_ordered_by_connect_time.pop();
174 CubemapStateProto Server::serialize()
176 // We don't serialize anything queued, so empty the queues.
177 process_queued_data();
179 // Set all clients in a consistent state before serializing
180 // (ie., they have no remaining lost data). Otherwise, increasing
181 // the backlog could take clients into a newly valid area of the backlog,
182 // sending a stream of zeros instead of skipping the data as it should.
184 // TODO: Do this when clients are added back from serialized state instead;
185 // it would probably be less wasteful.
186 for (auto &fd_and_client : clients) {
187 skip_lost_data(&fd_and_client.second);
190 CubemapStateProto serialized;
191 for (const auto &fd_and_client : clients) {
192 serialized.add_clients()->MergeFrom(fd_and_client.second.serialize());
194 for (unique_ptr<Stream> &stream : streams) {
195 serialized.add_streams()->MergeFrom(stream->serialize());
200 void Server::add_client_deferred(int sock, Acceptor *acceptor)
202 MutexLock lock(&queued_clients_mutex);
203 queued_add_clients.push_back(std::make_pair(sock, acceptor));
206 void Server::add_client(int sock, Acceptor *acceptor)
208 const bool is_tls = acceptor->is_tls();
209 auto inserted = clients.insert(make_pair(sock, Client(sock)));
210 assert(inserted.second == true); // Should not already exist.
211 Client *client_ptr = &inserted.first->second;
213 // Connection timestamps must be nondecreasing. I can't find any guarantee
214 // that even the monotonic clock can't go backwards by a small amount
215 // (think switching between CPUs with non-synchronized TSCs), so if
216 // this actually should happen, we hack around it by fudging
218 if (!clients_ordered_by_connect_time.empty() &&
219 is_earlier(client_ptr->connect_time, clients_ordered_by_connect_time.back().first)) {
220 client_ptr->connect_time = clients_ordered_by_connect_time.back().first;
222 clients_ordered_by_connect_time.push(make_pair(client_ptr->connect_time, sock));
224 // Start listening on data from this socket.
227 // Even in the initial state (READING_REQUEST), TLS needs to
228 // send data for the handshake, and thus might end up needing
229 // to know about EPOLLOUT.
230 ev.events = EPOLLIN | EPOLLOUT | EPOLLET | EPOLLRDHUP;
232 // EPOLLOUT will be added once we go out of READING_REQUEST.
233 ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
235 ev.data.ptr = client_ptr;
236 if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
237 log_perror("epoll_ctl(EPOLL_CTL_ADD)");
242 assert(tls_server_contexts.count(acceptor));
243 client_ptr->tls_context = tls_accept(tls_server_contexts[acceptor]);
244 if (client_ptr->tls_context == NULL) {
245 log(ERROR, "tls_accept() failed");
246 close_client(client_ptr);
249 tls_make_exportable(client_ptr->tls_context, 1);
252 process_client(client_ptr);
255 void Server::add_client_from_serialized(const ClientProto &client)
257 MutexLock lock(&mutex);
259 int stream_index = lookup_stream_by_url(client.url());
260 if (stream_index == -1) {
261 assert(client.state() != Client::SENDING_DATA);
264 stream = streams[stream_index].get();
266 auto inserted = clients.insert(make_pair(client.sock(), Client(client, stream)));
267 assert(inserted.second == true); // Should not already exist.
268 Client *client_ptr = &inserted.first->second;
270 // Connection timestamps must be nondecreasing.
271 assert(clients_ordered_by_connect_time.empty() ||
272 !is_earlier(client_ptr->connect_time, clients_ordered_by_connect_time.back().first));
273 clients_ordered_by_connect_time.push(make_pair(client_ptr->connect_time, client.sock()));
275 // Start listening on data from this socket.
277 if (client.state() == Client::READING_REQUEST) {
278 // See the corresponding comment in Server::add_client().
279 if (client.has_tls_context()) {
280 ev.events = EPOLLIN | EPOLLOUT | EPOLLET | EPOLLRDHUP;
282 ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
285 // If we don't have more data for this client, we'll be putting it into
286 // the sleeping array again soon.
287 ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
289 ev.data.ptr = client_ptr;
290 if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client.sock(), &ev) == -1) {
291 log_perror("epoll_ctl(EPOLL_CTL_ADD)");
295 if (client_ptr->state == Client::WAITING_FOR_KEYFRAME ||
296 client_ptr->state == Client::PREBUFFERING ||
297 (client_ptr->state == Client::SENDING_DATA &&
298 client_ptr->stream_pos == client_ptr->stream->bytes_received)) {
299 client_ptr->stream->put_client_to_sleep(client_ptr);
301 process_client(client_ptr);
305 int Server::lookup_stream_by_url(const string &url) const
307 map<string, int>::const_iterator stream_url_it = stream_url_map.find(url);
308 if (stream_url_it == stream_url_map.end()) {
311 return stream_url_it->second;
314 int Server::add_stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Stream::Encoding encoding, Stream::Encoding src_encoding)
316 MutexLock lock(&mutex);
317 stream_url_map.insert(make_pair(url, streams.size()));
318 streams.emplace_back(new Stream(url, backlog_size, prebuffering_bytes, encoding, src_encoding));
319 return streams.size() - 1;
322 int Server::add_stream_from_serialized(const StreamProto &stream, int data_fd)
324 MutexLock lock(&mutex);
325 stream_url_map.insert(make_pair(stream.url(), streams.size()));
326 streams.emplace_back(new Stream(stream, data_fd));
327 return streams.size() - 1;
330 void Server::set_backlog_size(int stream_index, size_t new_size)
332 MutexLock lock(&mutex);
333 assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
334 streams[stream_index]->set_backlog_size(new_size);
337 void Server::set_prebuffering_bytes(int stream_index, size_t new_amount)
339 MutexLock lock(&mutex);
340 assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
341 streams[stream_index]->prebuffering_bytes = new_amount;
344 void Server::set_encoding(int stream_index, Stream::Encoding encoding)
346 MutexLock lock(&mutex);
347 assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
348 streams[stream_index]->encoding = encoding;
351 void Server::set_src_encoding(int stream_index, Stream::Encoding encoding)
353 MutexLock lock(&mutex);
354 assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
355 streams[stream_index]->src_encoding = encoding;
358 void Server::set_header(int stream_index, const string &http_header, const string &stream_header)
360 MutexLock lock(&mutex);
361 assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
362 streams[stream_index]->http_header = http_header;
364 if (stream_header != streams[stream_index]->stream_header) {
365 // We cannot start at any of the older starting points anymore,
366 // since they'd get the wrong header for the stream (not to mention
367 // that a changed header probably means the stream restarted,
368 // which means any client starting on the old one would probably
369 // stop playing properly at the change point). Next block
370 // should be a suitable starting point (if not, something is
371 // pretty strange), so it will fill up again soon enough.
372 streams[stream_index]->suitable_starting_points.clear();
374 streams[stream_index]->stream_header = stream_header;
377 void Server::set_pacing_rate(int stream_index, uint32_t pacing_rate)
379 MutexLock lock(&mutex);
380 assert(clients.empty());
381 assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
382 streams[stream_index]->pacing_rate = pacing_rate;
385 void Server::add_gen204(const std::string &url, const std::string &allow_origin)
387 MutexLock lock(&mutex);
388 assert(clients.empty());
389 ping_url_map[url] = allow_origin;
392 void Server::create_tls_context_for_acceptor(const Acceptor *acceptor)
394 assert(acceptor->is_tls());
396 bool is_server = true;
397 TLSContext *server_context = tls_create_context(is_server, TLS_V12);
399 const string &cert = acceptor->get_certificate_chain();
400 int num_cert = tls_load_certificates(server_context, reinterpret_cast<const unsigned char *>(cert.data()), cert.size());
401 assert(num_cert > 0); // Should have been checked by config earlier.
403 const string &key = acceptor->get_private_key();
404 int num_key = tls_load_private_key(server_context, reinterpret_cast<const unsigned char *>(key.data()), key.size());
405 assert(num_key > 0); // Should have been checked by config earlier.
407 tls_server_contexts.insert(make_pair(acceptor, server_context));
410 void Server::add_data_deferred(int stream_index, const char *data, size_t bytes, uint16_t metacube_flags)
412 assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
413 streams[stream_index]->add_data_deferred(data, bytes, metacube_flags);
416 // See the .h file for postconditions after this function.
417 void Server::process_client(Client *client)
419 switch (client->state) {
420 case Client::READING_REQUEST: {
421 if (client->tls_context != NULL) {
422 if (send_pending_tls_data(client)) {
423 // send_pending_tls_data() hit postconditions #1 or #4.
429 // Try to read more of the request.
432 if (client->tls_context == NULL) {
433 ret = read_nontls_data(client, buf, sizeof(buf));
435 // read_nontls_data() hit postconditions #1 or #2.
439 ret = read_tls_data(client, buf, sizeof(buf));
441 // read_tls_data() hit postconditions #1, #2 or #4.
446 RequestParseStatus status = wait_for_double_newline(&client->request, buf, ret);
449 case RP_OUT_OF_SPACE:
450 log(WARNING, "[%s] Client sent overlong request!", client->remote_addr.c_str());
451 close_client(client);
453 case RP_NOT_FINISHED_YET:
454 // OK, we don't have the entire header yet. Fine; we'll get it later.
455 // See if there's more data for us.
456 goto read_request_again;
458 log(WARNING, "[%s] Junk data after request!", client->remote_addr.c_str());
459 close_client(client);
465 assert(status == RP_FINISHED);
467 if (client->tls_context && !client->in_ktls_mode && tls_established(client->tls_context)) {
468 // We're ready to enter kTLS mode, unless we still have some
469 // handshake data to send (which then must be sent as non-kTLS).
470 if (send_pending_tls_data(client)) {
471 // send_pending_tls_data() hit postconditions #1 or #4.
474 ret = tls_make_ktls(client->tls_context, client->sock);
476 log_tls_error("tls_make_ktls", ret);
477 close_client(client);
480 client->in_ktls_mode = true;
483 int error_code = parse_request(client);
484 if (error_code == 200) {
485 construct_header(client);
486 } else if (error_code == 204) {
487 construct_204(client);
489 construct_error(client, error_code);
492 // We've changed states, so fall through.
493 assert(client->state == Client::SENDING_SHORT_RESPONSE ||
494 client->state == Client::SENDING_HEADER);
496 case Client::SENDING_SHORT_RESPONSE:
497 case Client::SENDING_HEADER: {
498 sending_header_or_short_response_again:
501 ret = write(client->sock,
502 client->header_or_short_response.data() + client->header_or_short_response_bytes_sent,
503 client->header_or_short_response.size() - client->header_or_short_response_bytes_sent);
504 } while (ret == -1 && errno == EINTR);
506 if (ret == -1 && errno == EAGAIN) {
507 // We're out of socket space, so now we're at the “low edge” of epoll's
508 // edge triggering. epoll will tell us when there is more room, so for now,
510 // This is postcondition #4.
515 // Error! Postcondition #1.
517 close_client(client);
521 client->header_or_short_response_bytes_sent += ret;
522 assert(client->header_or_short_response_bytes_sent <= client->header_or_short_response.size());
524 if (client->header_or_short_response_bytes_sent < client->header_or_short_response.size()) {
525 // We haven't sent all yet. Fine; go another round.
526 goto sending_header_or_short_response_again;
529 // We're done sending the header or error! Clear it to release some memory.
530 client->header_or_short_response.clear();
532 if (client->state == Client::SENDING_SHORT_RESPONSE) {
533 // We're done sending the error, so now close.
534 // This is postcondition #1.
535 close_client(client);
539 Stream *stream = client->stream;
540 if (client->stream_pos == size_t(-2)) {
541 // Start sending from the beginning of the backlog.
542 client->stream_pos = min<size_t>(
543 stream->bytes_received - stream->backlog_size,
545 client->state = Client::SENDING_DATA;
547 } else if (stream->prebuffering_bytes == 0) {
548 // Start sending from the first keyframe we get. In other
549 // words, we won't send any of the backlog, but we'll start
550 // sending immediately as we get the next keyframe block.
551 // Note that this is functionally identical to the next if branch,
552 // except that we save a binary search.
553 client->stream_pos = stream->bytes_received;
554 client->state = Client::WAITING_FOR_KEYFRAME;
556 // We're not going to send anything to the client before we have
557 // N bytes. However, this wait might be boring; we can just as well
558 // use it to send older data if we have it. We use lower_bound()
559 // so that we are conservative and never add extra latency over just
560 // waiting (assuming CBR or nearly so); otherwise, we could want e.g.
561 // 100 kB prebuffer but end up sending a 10 MB GOP.
562 deque<size_t>::const_iterator starting_point_it =
563 lower_bound(stream->suitable_starting_points.begin(),
564 stream->suitable_starting_points.end(),
565 stream->bytes_received - stream->prebuffering_bytes);
566 if (starting_point_it == stream->suitable_starting_points.end()) {
567 // None found. Just put us at the end, and then wait for the
568 // first keyframe to appear.
569 client->stream_pos = stream->bytes_received;
570 client->state = Client::WAITING_FOR_KEYFRAME;
572 client->stream_pos = *starting_point_it;
573 client->state = Client::PREBUFFERING;
579 case Client::WAITING_FOR_KEYFRAME: {
580 Stream *stream = client->stream;
581 if (stream->suitable_starting_points.empty() ||
582 client->stream_pos > stream->suitable_starting_points.back()) {
583 // We haven't received a keyframe since this stream started waiting,
584 // so keep on waiting for one.
585 // This is postcondition #3.
586 stream->put_client_to_sleep(client);
589 client->stream_pos = stream->suitable_starting_points.back();
590 client->state = Client::PREBUFFERING;
593 case Client::PREBUFFERING: {
595 Stream *stream = client->stream;
596 size_t bytes_to_send = stream->bytes_received - client->stream_pos;
597 assert(bytes_to_send <= stream->backlog_size);
598 if (bytes_to_send < stream->prebuffering_bytes) {
599 // We don't have enough bytes buffered to start this client yet.
600 // This is postcondition #3.
601 stream->put_client_to_sleep(client);
604 client->state = Client::SENDING_DATA;
607 case Client::SENDING_DATA: {
609 skip_lost_data(client);
610 Stream *stream = client->stream;
613 size_t bytes_to_send = stream->bytes_received - client->stream_pos;
614 assert(bytes_to_send <= stream->backlog_size);
615 if (bytes_to_send == 0) {
619 // See if we need to split across the circular buffer.
620 bool more_data = false;
621 if ((client->stream_pos % stream->backlog_size) + bytes_to_send > stream->backlog_size) {
622 bytes_to_send = stream->backlog_size - (client->stream_pos % stream->backlog_size);
628 off_t offset = client->stream_pos % stream->backlog_size;
629 ret = sendfile(client->sock, stream->data_fd, &offset, bytes_to_send);
630 } while (ret == -1 && errno == EINTR);
632 if (ret == -1 && errno == EAGAIN) {
633 // We're out of socket space, so return; epoll will wake us up
634 // when there is more room.
635 // This is postcondition #4.
639 // Error, close; postcondition #1.
640 log_perror("sendfile");
641 close_client(client);
644 client->stream_pos += ret;
645 client->bytes_sent += ret;
647 if (client->stream_pos == stream->bytes_received) {
648 // We don't have any more data for this client, so put it to sleep.
649 // This is postcondition #3.
650 stream->put_client_to_sleep(client);
651 } else if (more_data && size_t(ret) == bytes_to_send) {
652 goto sending_data_again;
661 bool Server::send_pending_tls_data(Client *client)
663 // See if there's data from the TLS library to write.
664 if (client->tls_data_to_send == NULL) {
665 client->tls_data_to_send = tls_get_write_buffer(client->tls_context, &client->tls_data_left_to_send);
666 if (client->tls_data_to_send == NULL) {
667 // Really no data to send.
675 ret = write(client->sock, client->tls_data_to_send, client->tls_data_left_to_send);
676 } while (ret == -1 && errno == EINTR);
677 assert(ret < 0 || size_t(ret) <= client->tls_data_left_to_send);
679 if (ret == -1 && errno == EAGAIN) {
680 // We're out of socket space, so now we're at the “low edge” of epoll's
681 // edge triggering. epoll will tell us when there is more room, so for now,
683 // This is postcondition #4.
687 // Error! Postcondition #1.
689 close_client(client);
692 if (ret > 0 && size_t(ret) == client->tls_data_left_to_send) {
693 // All data has been sent, so we don't need to go to sleep.
694 tls_buffer_clear(client->tls_context);
695 client->tls_data_to_send = NULL;
699 // More data to send, so try again.
700 client->tls_data_to_send += ret;
701 client->tls_data_left_to_send -= ret;
702 goto send_data_again;
705 int Server::read_nontls_data(Client *client, char *buf, size_t max_size)
709 ret = read(client->sock, buf, max_size);
710 } while (ret == -1 && errno == EINTR);
712 if (ret == -1 && errno == EAGAIN) {
713 // No more data right now. Nothing to do.
714 // This is postcondition #2.
719 close_client(client);
723 // OK, the socket is closed.
724 close_client(client);
731 int Server::read_tls_data(Client *client, char *buf, size_t max_size)
736 ret = read(client->sock, buf, max_size);
737 } while (ret == -1 && errno == EINTR);
739 if (ret == -1 && errno == EAGAIN) {
740 // No more data right now. Nothing to do.
741 // This is postcondition #2.
746 close_client(client);
750 // OK, the socket is closed.
751 close_client(client);
755 // Give it to the TLS library.
756 int err = tls_consume_stream(client->tls_context, reinterpret_cast<const unsigned char *>(buf), ret, nullptr);
758 log_tls_error("tls_consume_stream", err);
759 close_client(client);
763 // Not consumed any data. See if we can read more.
767 // Read any decrypted data available for us. (We can reuse buf, since it's free now.)
768 ret = tls_read(client->tls_context, reinterpret_cast<unsigned char *>(buf), max_size);
770 // No decrypted data for us yet, but there might be some more handshaking
771 // to send. Do that if needed, then look for more data.
772 if (send_pending_tls_data(client)) {
773 // send_pending_tls_data() hit postconditions #1 or #4.
779 log_tls_error("tls_read", ret);
780 close_client(client);
788 // See if there's some data we've lost. Ideally, we should drop to a block boundary,
789 // but resync will be the mux's problem.
790 void Server::skip_lost_data(Client *client)
792 Stream *stream = client->stream;
793 if (stream == NULL) {
796 size_t bytes_to_send = stream->bytes_received - client->stream_pos;
797 if (bytes_to_send > stream->backlog_size) {
798 size_t bytes_lost = bytes_to_send - stream->backlog_size;
799 client->stream_pos = stream->bytes_received - stream->backlog_size;
800 client->bytes_lost += bytes_lost;
801 ++client->num_loss_events;
805 int Server::parse_request(Client *client)
807 vector<string> lines = split_lines(client->request);
809 return 400; // Bad request (empty).
812 // Parse the headers, for logging purposes.
813 // TODO: Case-insensitivity.
814 multimap<string, string> headers = extract_headers(lines, client->remote_addr);
815 multimap<string, string>::const_iterator referer_it = headers.find("Referer");
816 if (referer_it != headers.end()) {
817 client->referer = referer_it->second;
819 multimap<string, string>::const_iterator user_agent_it = headers.find("User-Agent");
820 if (user_agent_it != headers.end()) {
821 client->user_agent = user_agent_it->second;
824 vector<string> request_tokens = split_tokens(lines[0]);
825 if (request_tokens.size() < 2) {
826 return 400; // Bad request (empty).
828 if (request_tokens[0] != "GET") {
829 return 400; // Should maybe be 405 instead?
832 string url = request_tokens[1];
834 if (url.size() > 8 && url.find("?backlog") == url.size() - 8) {
835 client->stream_pos = -2;
836 url = url.substr(0, url.size() - 8);
838 client->stream_pos = -1;
841 map<string, int>::const_iterator stream_url_map_it = stream_url_map.find(url);
842 if (stream_url_map_it == stream_url_map.end()) {
843 map<string, string>::const_iterator ping_url_map_it = ping_url_map.find(url);
844 if (ping_url_map_it == ping_url_map.end()) {
845 return 404; // Not found.
847 return 204; // No error.
851 Stream *stream = streams[stream_url_map_it->second].get();
852 if (stream->http_header.empty()) {
853 return 503; // Service unavailable.
856 client->stream = stream;
857 if (setsockopt(client->sock, SOL_SOCKET, SO_MAX_PACING_RATE, &client->stream->pacing_rate, sizeof(client->stream->pacing_rate)) == -1) {
858 if (client->stream->pacing_rate != ~0U) {
859 log_perror("setsockopt(SO_MAX_PACING_RATE)");
862 client->request.clear();
867 void Server::construct_header(Client *client)
869 Stream *stream = client->stream;
870 if (stream->encoding == Stream::STREAM_ENCODING_RAW) {
871 client->header_or_short_response = stream->http_header +
873 stream->stream_header;
874 } else if (stream->encoding == Stream::STREAM_ENCODING_METACUBE) {
875 client->header_or_short_response = stream->http_header +
876 "Content-encoding: metacube\r\n" +
878 if (!stream->stream_header.empty()) {
879 metacube2_block_header hdr;
880 memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync));
881 hdr.size = htonl(stream->stream_header.size());
882 hdr.flags = htons(METACUBE_FLAGS_HEADER);
883 hdr.csum = htons(metacube2_compute_crc(&hdr));
884 client->header_or_short_response.append(
885 string(reinterpret_cast<char *>(&hdr), sizeof(hdr)));
887 client->header_or_short_response.append(stream->stream_header);
893 client->state = Client::SENDING_HEADER;
894 change_epoll_events(client, EPOLLOUT | EPOLLET | EPOLLRDHUP);
897 void Server::construct_error(Client *client, int error_code)
900 snprintf(error, 256, "HTTP/1.0 %d Error\r\nContent-type: text/plain\r\n\r\nSomething went wrong. Sorry.\r\n",
902 client->header_or_short_response = error;
905 client->state = Client::SENDING_SHORT_RESPONSE;
906 change_epoll_events(client, EPOLLOUT | EPOLLET | EPOLLRDHUP);
909 void Server::construct_204(Client *client)
911 map<string, string>::const_iterator ping_url_map_it = ping_url_map.find(client->url);
912 assert(ping_url_map_it != ping_url_map.end());
914 if (ping_url_map_it->second.empty()) {
915 client->header_or_short_response =
916 "HTTP/1.0 204 No Content\r\n"
920 snprintf(response, 256,
921 "HTTP/1.0 204 No Content\r\n"
922 "Access-Control-Allow-Origin: %s\r\n"
924 ping_url_map_it->second.c_str());
925 client->header_or_short_response = response;
929 client->state = Client::SENDING_SHORT_RESPONSE;
930 change_epoll_events(client, EPOLLOUT | EPOLLET | EPOLLRDHUP);
934 void delete_from(vector<T> *v, T elem)
936 typename vector<T>::iterator new_end = remove(v->begin(), v->end(), elem);
937 v->erase(new_end, v->end());
940 void Server::close_client(Client *client)
942 if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, NULL) == -1) {
943 log_perror("epoll_ctl(EPOLL_CTL_DEL)");
947 // This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
948 if (client->stream != NULL) {
949 delete_from(&client->stream->sleeping_clients, client);
950 delete_from(&client->stream->to_process, client);
953 if (client->tls_context) {
954 tls_destroy_context(client->tls_context);
957 // Log to access_log.
958 access_log->write(client->get_stats());
961 safe_close(client->sock);
963 clients.erase(client->sock);
966 void Server::change_epoll_events(Client *client, uint32_t events)
970 ev.data.ptr = client;
972 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
973 log_perror("epoll_ctl(EPOLL_CTL_MOD)");
978 void Server::process_queued_data()
981 MutexLock lock(&queued_clients_mutex);
983 for (const pair<int, Acceptor *> &id_and_acceptor : queued_add_clients) {
984 add_client(id_and_acceptor.first, id_and_acceptor.second);
986 queued_add_clients.clear();
989 for (unique_ptr<Stream> &stream : streams) {
990 stream->process_queued_data();