6 #include <netinet/in.h>
11 #include <sys/ioctl.h>
12 #include <sys/socket.h>
14 #include <sys/types.h>
25 #include "httpinput.h"
27 #include "metacube2.h"
29 #include "serverpool.h"
40 string host_header(const string &host, const string &port)
42 if (port == "http" || atoi(port.c_str()) == 80) {
45 return host + ":" + port;
51 extern ServerPool *servers;
53 HTTPInput::HTTPInput(const string &url, Input::Encoding encoding)
54 : state(NOT_CONNECTED),
59 stats.bytes_received = 0;
60 stats.data_bytes_received = 0;
61 stats.metadata_bytes_received = 0;
62 stats.connect_time = -1;
63 stats.latency_sec = HUGE_VAL;
66 HTTPInput::HTTPInput(const InputProto &serialized)
67 : state(State(serialized.state())),
68 url(serialized.url()),
69 encoding(serialized.is_metacube_encoded() ?
70 Input::INPUT_ENCODING_METACUBE :
71 Input::INPUT_ENCODING_RAW),
72 request(serialized.request()),
73 request_bytes_sent(serialized.request_bytes_sent()),
74 response(serialized.response()),
75 http_header(serialized.http_header()),
76 stream_header(serialized.stream_header()),
77 has_metacube_header(serialized.has_metacube_header()),
78 sock(serialized.sock())
80 // Set back the close-on-exec flag for the socket.
81 // (This can't leak into a child, since we haven't been started yet.)
82 fcntl(sock, F_SETFD, 1);
84 pending_data.resize(serialized.pending_data().size());
85 memcpy(&pending_data[0], serialized.pending_data().data(), serialized.pending_data().size());
87 string protocol, user;
88 parse_url(url, &protocol, &user, &host, &port, &path); // Don't care if it fails.
91 stats.bytes_received = serialized.bytes_received();
92 stats.data_bytes_received = serialized.data_bytes_received();
93 stats.metadata_bytes_received = serialized.metadata_bytes_received();
94 if (serialized.has_connect_time()) {
95 stats.connect_time = serialized.connect_time();
97 stats.connect_time = time(nullptr);
99 if (serialized.has_latency_sec()) {
100 stats.latency_sec = serialized.latency_sec();
102 stats.latency_sec = HUGE_VAL;
105 last_verbose_connection.tv_sec = -3600;
106 last_verbose_connection.tv_nsec = 0;
109 void HTTPInput::close_socket()
116 lock_guard<mutex> lock(stats_mutex);
117 stats.connect_time = -1;
120 InputProto HTTPInput::serialize() const
122 // Unset the close-on-exec flag for the socket.
123 // (This can't leak into a child, since there's only one thread left.)
124 fcntl(sock, F_SETFD, 0);
126 InputProto serialized;
127 serialized.set_state(state);
128 serialized.set_url(url);
129 serialized.set_request(request);
130 serialized.set_request_bytes_sent(request_bytes_sent);
131 serialized.set_response(response);
132 serialized.set_http_header(http_header);
133 serialized.set_stream_header(stream_header);
134 serialized.set_pending_data(string(pending_data.begin(), pending_data.end()));
135 serialized.set_has_metacube_header(has_metacube_header);
136 serialized.set_sock(sock);
137 serialized.set_bytes_received(stats.bytes_received);
138 serialized.set_data_bytes_received(stats.data_bytes_received);
139 if (isfinite(stats.latency_sec)) {
140 serialized.set_latency_sec(stats.latency_sec);
142 serialized.set_connect_time(stats.connect_time);
143 if (encoding == Input::INPUT_ENCODING_METACUBE) {
144 serialized.set_is_metacube_encoded(true);
146 assert(encoding == Input::INPUT_ENCODING_RAW);
147 serialized.set_is_metacube_encoded(false);
152 int HTTPInput::lookup_and_connect(const string &host, const string &port)
155 int err = getaddrinfo(host.c_str(), port.c_str(), nullptr, &ai);
157 if (!suppress_logging) {
158 log(WARNING, "[%s] Lookup of '%s' failed (%s).",
159 url.c_str(), host.c_str(), gai_strerror(err));
164 addrinfo *base_ai = ai;
166 // Connect to everything in turn until we have a socket.
167 for ( ; ai && !should_stop(); ai = ai->ai_next) {
168 // Now do a non-blocking connect. This is important because we want to be able to be
169 // woken up, even though it's rather cumbersome.
170 int sock = socket(ai->ai_family, SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC, IPPROTO_TCP);
172 // Could be e.g. EPROTONOSUPPORT. The show must go on.
176 // Do a non-blocking connect.
178 err = connect(sock, ai->ai_addr, ai->ai_addrlen);
179 } while (err == -1 && errno == EINTR);
181 if (err == -1 && errno != EINPROGRESS) {
182 log_perror("connect");
187 // Wait for the connect to complete, or an error to happen.
189 bool complete = wait_for_activity(sock, POLLIN | POLLOUT, nullptr);
199 // Check whether it ended in an error or not.
200 socklen_t err_size = sizeof(err);
201 if (getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &err_size) == -1) {
202 log_perror("getsockopt");
210 // Successful connect.
211 freeaddrinfo(base_ai);
218 // Give the last one as error.
219 if (!suppress_logging) {
220 log(WARNING, "[%s] Connect to '%s' failed (%s)",
221 url.c_str(), host.c_str(), strerror(errno));
223 freeaddrinfo(base_ai);
227 int HTTPInput::open_child_process(const string &cmdline)
229 int devnullfd = open("/dev/null", O_RDONLY | O_CLOEXEC);
230 if (devnullfd == -1) {
231 log_perror("/dev/null");
236 if (pipe2(pipefd, O_CLOEXEC) == -1) {
237 log_perror("pipe2()");
242 // Point stdout to us, stdin to /dev/null, and stderr remains where it is
243 // (probably the systemd log). All other file descriptors should be marked
244 // as close-on-exec, and should thus not leak into the child.
245 posix_spawn_file_actions_t actions;
246 posix_spawn_file_actions_init(&actions);
247 posix_spawn_file_actions_adddup2(&actions, devnullfd, 0);
248 posix_spawn_file_actions_adddup2(&actions, pipefd[1], 1);
251 char * const argv[] = {
254 strdup(path.c_str()),
257 int err = posix_spawn(&child_pid, "/bin/sh", &actions, /*attrp=*/nullptr, argv, /*envp=*/nullptr);
258 posix_spawn_file_actions_destroy(&actions);
267 log_perror(cmdline.c_str());
273 bool HTTPInput::parse_response(const string &request)
275 vector<string> lines = split_lines(response);
277 if (!suppress_logging) {
278 log(WARNING, "[%s] Empty HTTP response from input.", url.c_str());
283 vector<string> first_line_tokens = split_tokens(lines[0]);
284 if (first_line_tokens.size() < 2) {
285 if (!suppress_logging) {
286 log(WARNING, "[%s] Malformed response line '%s' from input.",
287 url.c_str(), lines[0].c_str());
292 int response = atoi(first_line_tokens[1].c_str());
293 if (response != 200) {
294 if (!suppress_logging) {
295 log(WARNING, "[%s] Non-200 response '%s' from input.",
296 url.c_str(), lines[0].c_str());
301 HTTPHeaderMultimap parameters = extract_headers(lines, url);
303 // Remove “Content-encoding: metacube”.
304 const auto encoding_it = parameters.find("Content-Encoding");
305 if (encoding_it != parameters.end() && encoding_it->second == "metacube") {
306 parameters.erase(encoding_it);
309 // Change “Server: foo” to “Server: metacube/0.1 (reflecting: foo)”
310 // XXX: Use a Via: instead?
311 if (parameters.count("Server") == 0) {
312 parameters.insert(make_pair("Server", SERVER_IDENTIFICATION));
314 for (auto &key_and_value : parameters) {
315 if (key_and_value.first != "Server") {
318 key_and_value.second = SERVER_IDENTIFICATION " (reflecting: " + key_and_value.second + ")";
322 // Erase “Connection: close”; we'll set it on the sending side if needed.
323 parameters.erase("Connection");
325 // Construct the new HTTP header.
326 http_header = "HTTP/1.0 200 OK\r\n";
327 for (const auto &key_and_value : parameters) {
328 http_header.append(key_and_value.first + ": " + key_and_value.second + "\r\n");
331 for (int stream_index : stream_indices) {
332 servers->set_header(stream_index, http_header, stream_header);
338 void HTTPInput::do_work()
340 timespec last_activity;
342 // TODO: Make the timeout persist across restarts.
343 if (state == SENDING_REQUEST || state == RECEIVING_HEADER || state == RECEIVING_DATA) {
344 int err = clock_gettime(CLOCK_MONOTONIC_COARSE, &last_activity);
348 while (!should_stop()) {
349 if (state == SENDING_REQUEST || state == RECEIVING_HEADER || state == RECEIVING_DATA) {
350 // Give the socket 30 seconds since last activity before we time out.
351 static const int timeout_secs = 30;
354 int err = clock_gettime(CLOCK_MONOTONIC_COARSE, &now);
357 timespec elapsed = clock_diff(last_activity, now);
358 if (elapsed.tv_sec >= timeout_secs) {
360 if (!suppress_logging) {
361 log(ERROR, "[%s] Timeout after %d seconds, closing.", url.c_str(), elapsed.tv_sec);
363 state = CLOSING_SOCKET;
367 // Basically calculate (30 - (now - last_activity)) = (30 + (last_activity - now)).
368 // Add a second of slack to account for differences between clocks.
369 timespec timeout = clock_diff(now, last_activity);
370 timeout.tv_sec += timeout_secs + 1;
371 assert(timeout.tv_sec > 0 || (timeout.tv_sec >= 0 && timeout.tv_nsec > 0));
373 bool activity = wait_for_activity(sock, (state == SENDING_REQUEST) ? POLLOUT : POLLIN, &timeout);
375 err = clock_gettime(CLOCK_MONOTONIC_COARSE, &last_activity);
378 // OK. Most likely, should_stop was set, or we have timed out.
384 case NOT_CONNECTED: {
385 // Reap any exited children.
388 err = waitpid(-1, &wstatus, WNOHANG);
390 if (errno == EINTR) {
393 if (errno == ECHILD) {
396 log_perror("waitpid");
402 request_bytes_sent = 0;
404 pending_data.clear();
405 has_metacube_header = false;
406 for (int stream_index : stream_indices) {
407 // Don't zero out the header; it might still be of use to HLS clients.
408 servers->set_unavailable(stream_index);
413 string user; // Thrown away.
414 if (!parse_url(url, &protocol, &user, &host, &port, &path)) {
415 if (!suppress_logging) {
416 log(WARNING, "[%s] Failed to parse URL '%s'", url.c_str(), url.c_str());
421 // Remove the brackets around IPv6 address literals.
422 // TODO: See if we can join this with the code in parse_ip_address(),
423 // or maybe even more it into parse_url().
424 if (!host.empty() && host[0] == '[' && host[host.size() - 1] == ']') {
425 host = host.substr(1, host.size() - 2);
429 if (suppress_logging) {
430 // See if there's more than one minute since last time we made a connection
431 // with logging enabled. If so, turn it on again.
433 int err = clock_gettime(CLOCK_MONOTONIC_COARSE, &now);
436 double elapsed = now.tv_sec - last_verbose_connection.tv_sec +
437 1e-9 * (now.tv_nsec - last_verbose_connection.tv_nsec);
438 if (elapsed > 60.0) {
439 suppress_logging = false;
442 if (!suppress_logging) {
443 int err = clock_gettime(CLOCK_MONOTONIC_COARSE, &last_verbose_connection);
446 ++num_connection_attempts;
447 if (protocol == "pipe") {
448 sock = open_child_process(path.c_str());
451 // Construct a minimal HTTP header.
452 http_header = "HTTP/1.0 200 OK\r\n";
453 for (int stream_index : stream_indices) {
454 servers->set_header(stream_index, http_header, stream_header);
456 state = RECEIVING_DATA;
459 sock = lookup_and_connect(host, port);
461 // Yay, successful connect.
462 state = SENDING_REQUEST;
463 request = "GET " + path + " HTTP/1.0\r\nHost: " + host_header(host, port) + "\r\nUser-Agent: cubemap\r\n\r\n";
464 request_bytes_sent = 0;
468 lock_guard<mutex> lock(stats_mutex);
469 stats.connect_time = time(nullptr);
470 clock_gettime(CLOCK_MONOTONIC_COARSE, &last_activity);
474 case SENDING_REQUEST: {
475 size_t to_send = request.size() - request_bytes_sent;
479 ret = write(sock, request.data() + request_bytes_sent, to_send);
480 } while (ret == -1 && errno == EINTR);
484 state = CLOSING_SOCKET;
489 request_bytes_sent += ret;
491 if (request_bytes_sent == request.size()) {
492 state = RECEIVING_HEADER;
496 case RECEIVING_HEADER: {
501 ret = read(sock, buf, sizeof(buf));
502 } while (ret == -1 && errno == EINTR);
506 state = CLOSING_SOCKET;
511 // This really shouldn't happen...
512 if (!suppress_logging) {
513 log(ERROR, "[%s] Socket unexpectedly closed while reading header",
516 state = CLOSING_SOCKET;
520 RequestParseStatus status = wait_for_double_newline(&response, buf, ret);
522 if (status == RP_OUT_OF_SPACE) {
523 if (!suppress_logging) {
524 log(WARNING, "[%s] Server sent overlong HTTP response!", url.c_str());
526 state = CLOSING_SOCKET;
528 } else if (status == RP_NOT_FINISHED_YET) {
532 // OK, so we're fine, but there might be some of the actual data after the response.
533 // We'll need to deal with that separately.
535 if (status == RP_EXTRA_DATA) {
536 char *ptr = static_cast<char *>(
537 memmem(response.data(), response.size(), "\r\n\r\n", 4));
538 assert(ptr != nullptr);
539 extra_data = string(ptr + 4, &response[0] + response.size());
540 response.resize(ptr - response.data());
543 if (!parse_response(response)) {
544 state = CLOSING_SOCKET;
548 if (!extra_data.empty()) {
549 process_data(&extra_data[0], extra_data.size());
552 if (!suppress_logging) {
553 if (encoding == Input::INPUT_ENCODING_RAW) {
554 log(INFO, "[%s] Connected to '%s', receiving raw data.",
555 url.c_str(), url.c_str());
557 assert(encoding == Input::INPUT_ENCODING_METACUBE);
558 log(INFO, "[%s] Connected to '%s', receiving data.",
559 url.c_str(), url.c_str());
562 state = RECEIVING_DATA;
565 case RECEIVING_DATA: {
570 ret = read(sock, buf, sizeof(buf));
571 } while (ret == -1 && errno == EINTR);
575 state = CLOSING_SOCKET;
580 // This really shouldn't happen...
581 if (!suppress_logging) {
582 log(ERROR, "[%s] Socket unexpectedly closed while reading data",
585 state = CLOSING_SOCKET;
589 num_connection_attempts = 0; // Reset, since we have a successful read.
590 if (suppress_logging) {
591 // This was suppressed earlier, so print it out now.
592 if (encoding == Input::INPUT_ENCODING_RAW) {
593 log(INFO, "[%s] Connected to '%s', receiving raw data.",
594 url.c_str(), url.c_str());
596 assert(encoding == Input::INPUT_ENCODING_METACUBE);
597 log(INFO, "[%s] Connected to '%s', receiving data.",
598 url.c_str(), url.c_str());
600 suppress_logging = false;
603 process_data(buf, ret);
606 case CLOSING_SOCKET: {
608 state = NOT_CONNECTED;
615 // If we are still in NOT_CONNECTED, either something went wrong,
616 // or the connection just got closed.
617 // The earlier steps have already given the error message, if any.
618 if (state == NOT_CONNECTED && !should_stop()) {
619 if (!suppress_logging) {
620 log(INFO, "[%s] Waiting 0.2 seconds and restarting...", url.c_str());
623 if (num_connection_attempts >= 3 && !suppress_logging) {
624 log(INFO, "[%s] %d failed connection attempts, suppressing logging for one minute.",
625 url.c_str(), num_connection_attempts);
626 suppress_logging = true;
629 timeout_ts.tv_sec = 0;
630 timeout_ts.tv_nsec = 200000000;
631 wait_for_wakeup(&timeout_ts);
636 void HTTPInput::process_data(char *ptr, size_t bytes)
639 lock_guard<mutex> lock(stats_mutex);
640 stats.bytes_received += bytes;
643 if (encoding == Input::INPUT_ENCODING_RAW) {
644 for (int stream_index : stream_indices) {
645 servers->add_data(stream_index, ptr, bytes, /*metacube_flags=*/0, /*pts=*/RationalPTS());
650 assert(encoding == Input::INPUT_ENCODING_METACUBE);
651 pending_data.insert(pending_data.end(), ptr, ptr + bytes);
654 // If we don't have enough data (yet) for even the Metacube header, just return.
655 if (pending_data.size() < sizeof(metacube2_block_header)) {
659 // Make sure we have the Metacube sync header at the start.
660 // We may need to skip over junk data (it _should_ not happen, though).
661 if (!has_metacube_header) {
662 char *ptr = static_cast<char *>(
663 memmem(pending_data.data(), pending_data.size(),
664 METACUBE2_SYNC, strlen(METACUBE2_SYNC)));
665 if (ptr == nullptr) {
666 // OK, so we didn't find the sync marker. We know then that
667 // we do not have the _full_ marker in the buffer, but we
668 // could have N-1 bytes. Drop everything before that,
670 drop_pending_data(pending_data.size() - (strlen(METACUBE2_SYNC) - 1));
673 // Yay, we found the header. Drop everything (if anything) before it.
674 drop_pending_data(ptr - pending_data.data());
675 has_metacube_header = true;
677 // Re-check that we have the entire header; we could have dropped data.
678 if (pending_data.size() < sizeof(metacube2_block_header)) {
684 // Now it's safe to read the header.
685 metacube2_block_header hdr;
686 memcpy(&hdr, pending_data.data(), sizeof(hdr));
687 assert(memcmp(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync)) == 0);
688 uint32_t size = ntohl(hdr.size);
689 uint16_t flags = ntohs(hdr.flags);
690 uint16_t expected_csum = metacube2_compute_crc(&hdr);
692 if (expected_csum != ntohs(hdr.csum)) {
693 log(WARNING, "[%s] Metacube checksum failed (expected 0x%x, got 0x%x), "
694 "not reading block claiming to be %d bytes (flags=%x).",
695 url.c_str(), expected_csum, ntohs(hdr.csum),
698 // Drop only the first byte, and let the rest of the code handle resync.
699 pending_data.erase(pending_data.begin(), pending_data.begin() + 1);
700 has_metacube_header = false;
703 if (size > 10485760) {
704 log(WARNING, "[%s] Metacube block of %d bytes (flags=%x); corrupted header??",
705 url.c_str(), size, flags);
708 // See if we have the entire block. If not, wait for more data.
709 if (pending_data.size() < sizeof(metacube2_block_header) + size) {
713 // See if this is a metadata block. If so, we don't want to send it on,
714 // but rather process it ourselves.
715 // TODO: Keep metadata when sending on to other Metacube users.
716 if (flags & METACUBE_FLAGS_METADATA) {
718 lock_guard<mutex> lock(stats_mutex);
719 stats.metadata_bytes_received += size;
721 process_metacube_metadata_block(hdr, pending_data.data() + sizeof(hdr), size);
723 // Send this block on to the servers.
725 lock_guard<mutex> lock(stats_mutex);
726 stats.data_bytes_received += size;
728 char *inner_data = pending_data.data() + sizeof(metacube2_block_header);
729 if (flags & METACUBE_FLAGS_HEADER) {
730 stream_header = string(inner_data, inner_data + size);
731 for (int stream_index : stream_indices) {
732 servers->set_header(stream_index, http_header, stream_header);
735 for (int stream_index : stream_indices) {
736 servers->add_data(stream_index, inner_data, size, flags, next_block_pts);
738 next_block_pts = RationalPTS();
741 // Consume the block. This isn't the most efficient way of dealing with things
742 // should we have many blocks, but these routines don't need to be too efficient
744 pending_data.erase(pending_data.begin(), pending_data.begin() + sizeof(metacube2_block_header) + size);
745 has_metacube_header = false;
749 void HTTPInput::drop_pending_data(size_t num_bytes)
751 if (num_bytes == 0) {
754 log(WARNING, "[%s] Dropping %lld junk bytes; not a Metacube2 stream, or data was dropped from the middle of the stream.",
755 url.c_str(), (long long)num_bytes);
756 assert(pending_data.size() >= num_bytes);
757 pending_data.erase(pending_data.begin(), pending_data.begin() + num_bytes);
760 void HTTPInput::add_destination(int stream_index)
762 stream_indices.push_back(stream_index);
763 servers->set_header(stream_index, http_header, stream_header);
766 InputStats HTTPInput::get_stats() const
768 lock_guard<mutex> lock(stats_mutex);
772 void HTTPInput::process_metacube_metadata_block(const metacube2_block_header &hdr, const char *payload, uint32_t payload_size)
774 if (payload_size < sizeof(uint64_t)) {
775 log(WARNING, "[%s] Undersized Metacube metadata block (%d bytes); corrupted header?",
776 url.c_str(), payload_size);
780 uint64_t type = be64toh(*(const uint64_t *)payload);
781 if (type == METACUBE_METADATA_TYPE_ENCODER_TIMESTAMP) {
783 clock_gettime(CLOCK_REALTIME, &now);
785 const metacube2_timestamp_packet *pkt = (const metacube2_timestamp_packet *)payload;
786 if (payload_size != sizeof(*pkt)) {
787 log(WARNING, "[%s] Metacube timestamp block of wrong size (%d bytes); ignoring.",
788 url.c_str(), payload_size);
792 double elapsed = now.tv_sec - be64toh(pkt->tv_sec) +
793 1e-9 * (now.tv_nsec - long(be64toh(pkt->tv_nsec)));
795 lock_guard<mutex> lock(stats_mutex);
796 stats.latency_sec = elapsed;
798 } else if (type == METACUBE_METADATA_TYPE_NEXT_BLOCK_PTS) {
799 const metacube2_pts_packet *pkt = (const metacube2_pts_packet *)payload;
800 if (payload_size != sizeof(*pkt)) {
801 log(WARNING, "[%s] Metacube pts block of wrong size (%d bytes); ignoring.",
802 url.c_str(), payload_size);
805 next_block_pts.pts = be64toh(pkt->pts);
806 next_block_pts.timebase_num = be64toh(pkt->timebase_num);
807 next_block_pts.timebase_den = be64toh(pkt->timebase_den);
809 // Unknown metadata block, ignore
810 log(INFO, "[%s] Metadata block %llu\n", url.c_str(), type);