5 #include <netinet/in.h>
10 #include <sys/ioctl.h>
11 #include <sys/socket.h>
21 #include "httpinput.h"
23 #include "metacube2.h"
25 #include "serverpool.h"
36 string host_header(const string &host, const string &port)
38 if (port == "http" || atoi(port.c_str()) == 80) {
41 return host + ":" + port;
47 extern ServerPool *servers;
49 HTTPInput::HTTPInput(const string &url, Input::Encoding encoding)
50 : state(NOT_CONNECTED),
55 stats.bytes_received = 0;
56 stats.data_bytes_received = 0;
57 stats.metadata_bytes_received = 0;
58 stats.connect_time = -1;
59 stats.latency_sec = HUGE_VAL;
62 HTTPInput::HTTPInput(const InputProto &serialized)
63 : state(State(serialized.state())),
64 url(serialized.url()),
65 encoding(serialized.is_metacube_encoded() ?
66 Input::INPUT_ENCODING_METACUBE :
67 Input::INPUT_ENCODING_RAW),
68 request(serialized.request()),
69 request_bytes_sent(serialized.request_bytes_sent()),
70 response(serialized.response()),
71 http_header(serialized.http_header()),
72 stream_header(serialized.stream_header()),
73 has_metacube_header(serialized.has_metacube_header()),
74 sock(serialized.sock())
76 pending_data.resize(serialized.pending_data().size());
77 memcpy(&pending_data[0], serialized.pending_data().data(), serialized.pending_data().size());
79 string protocol, user;
80 parse_url(url, &protocol, &user, &host, &port, &path); // Don't care if it fails.
83 stats.bytes_received = serialized.bytes_received();
84 stats.data_bytes_received = serialized.data_bytes_received();
85 stats.metadata_bytes_received = serialized.metadata_bytes_received();
86 if (serialized.has_connect_time()) {
87 stats.connect_time = serialized.connect_time();
89 stats.connect_time = time(nullptr);
91 if (serialized.has_latency_sec()) {
92 stats.latency_sec = serialized.latency_sec();
94 stats.latency_sec = HUGE_VAL;
97 last_verbose_connection.tv_sec = -3600;
98 last_verbose_connection.tv_nsec = 0;
101 void HTTPInput::close_socket()
108 lock_guard<mutex> lock(stats_mutex);
109 stats.connect_time = -1;
112 InputProto HTTPInput::serialize() const
114 InputProto serialized;
115 serialized.set_state(state);
116 serialized.set_url(url);
117 serialized.set_request(request);
118 serialized.set_request_bytes_sent(request_bytes_sent);
119 serialized.set_response(response);
120 serialized.set_http_header(http_header);
121 serialized.set_stream_header(stream_header);
122 serialized.set_pending_data(string(pending_data.begin(), pending_data.end()));
123 serialized.set_has_metacube_header(has_metacube_header);
124 serialized.set_sock(sock);
125 serialized.set_bytes_received(stats.bytes_received);
126 serialized.set_data_bytes_received(stats.data_bytes_received);
127 if (isfinite(stats.latency_sec)) {
128 serialized.set_latency_sec(stats.latency_sec);
130 serialized.set_connect_time(stats.connect_time);
131 if (encoding == Input::INPUT_ENCODING_METACUBE) {
132 serialized.set_is_metacube_encoded(true);
134 assert(encoding == Input::INPUT_ENCODING_RAW);
135 serialized.set_is_metacube_encoded(false);
140 int HTTPInput::lookup_and_connect(const string &host, const string &port)
143 int err = getaddrinfo(host.c_str(), port.c_str(), nullptr, &ai);
145 if (!suppress_logging) {
146 log(WARNING, "[%s] Lookup of '%s' failed (%s).",
147 url.c_str(), host.c_str(), gai_strerror(err));
152 addrinfo *base_ai = ai;
154 // Connect to everything in turn until we have a socket.
155 for ( ; ai && !should_stop(); ai = ai->ai_next) {
156 int sock = socket(ai->ai_family, SOCK_STREAM, IPPROTO_TCP);
158 // Could be e.g. EPROTONOSUPPORT. The show must go on.
162 // Now do a non-blocking connect. This is important because we want to be able to be
163 // woken up, even though it's rather cumbersome.
165 // Set the socket as nonblocking.
167 if (ioctl(sock, FIONBIO, &one) == -1) {
168 log_perror("ioctl(FIONBIO)");
173 // Do a non-blocking connect.
175 err = connect(sock, ai->ai_addr, ai->ai_addrlen);
176 } while (err == -1 && errno == EINTR);
178 if (err == -1 && errno != EINPROGRESS) {
179 log_perror("connect");
184 // Wait for the connect to complete, or an error to happen.
186 bool complete = wait_for_activity(sock, POLLIN | POLLOUT, nullptr);
196 // Check whether it ended in an error or not.
197 socklen_t err_size = sizeof(err);
198 if (getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &err_size) == -1) {
199 log_perror("getsockopt");
207 // Successful connect.
208 freeaddrinfo(base_ai);
215 // Give the last one as error.
216 if (!suppress_logging) {
217 log(WARNING, "[%s] Connect to '%s' failed (%s)",
218 url.c_str(), host.c_str(), strerror(errno));
220 freeaddrinfo(base_ai);
224 bool HTTPInput::parse_response(const string &request)
226 vector<string> lines = split_lines(response);
228 if (!suppress_logging) {
229 log(WARNING, "[%s] Empty HTTP response from input.", url.c_str());
234 vector<string> first_line_tokens = split_tokens(lines[0]);
235 if (first_line_tokens.size() < 2) {
236 if (!suppress_logging) {
237 log(WARNING, "[%s] Malformed response line '%s' from input.",
238 url.c_str(), lines[0].c_str());
243 int response = atoi(first_line_tokens[1].c_str());
244 if (response != 200) {
245 if (!suppress_logging) {
246 log(WARNING, "[%s] Non-200 response '%s' from input.",
247 url.c_str(), lines[0].c_str());
252 multimap<string, string> parameters = extract_headers(lines, url);
254 // Remove “Content-encoding: metacube”.
255 // TODO: Make case-insensitive.
256 const auto encoding_it = parameters.find("Content-encoding");
257 if (encoding_it != parameters.end() && encoding_it->second == "metacube") {
258 parameters.erase(encoding_it);
261 // Change “Server: foo” to “Server: metacube/0.1 (reflecting: foo)”
262 // TODO: Make case-insensitive.
263 // XXX: Use a Via: instead?
264 if (parameters.count("Server") == 0) {
265 parameters.insert(make_pair("Server", SERVER_IDENTIFICATION));
267 for (auto &key_and_value : parameters) {
268 if (key_and_value.first != "Server") {
271 key_and_value.second = SERVER_IDENTIFICATION " (reflecting: " + key_and_value.second + ")";
275 // Erase “Connection: close”; we'll set it on the sending side if needed.
276 // TODO: Make case-insensitive.
277 parameters.erase("Connection");
279 // Construct the new HTTP header.
280 http_header = "HTTP/1.0 200 OK\r\n";
281 for (const auto &key_and_value : parameters) {
282 http_header.append(key_and_value.first + ": " + key_and_value.second + "\r\n");
285 for (int stream_index : stream_indices) {
286 servers->set_header(stream_index, http_header, stream_header);
292 void HTTPInput::do_work()
294 timespec last_activity;
296 // TODO: Make the timeout persist across restarts.
297 if (state == SENDING_REQUEST || state == RECEIVING_HEADER || state == RECEIVING_DATA) {
298 int err = clock_gettime(CLOCK_MONOTONIC_COARSE, &last_activity);
302 while (!should_stop()) {
303 if (state == SENDING_REQUEST || state == RECEIVING_HEADER || state == RECEIVING_DATA) {
304 // Give the socket 30 seconds since last activity before we time out.
305 static const int timeout_secs = 30;
308 int err = clock_gettime(CLOCK_MONOTONIC_COARSE, &now);
311 timespec elapsed = clock_diff(last_activity, now);
312 if (elapsed.tv_sec >= timeout_secs) {
314 if (!suppress_logging) {
315 log(ERROR, "[%s] Timeout after %d seconds, closing.", url.c_str(), elapsed.tv_sec);
317 state = CLOSING_SOCKET;
321 // Basically calculate (30 - (now - last_activity)) = (30 + (last_activity - now)).
322 // Add a second of slack to account for differences between clocks.
323 timespec timeout = clock_diff(now, last_activity);
324 timeout.tv_sec += timeout_secs + 1;
325 assert(timeout.tv_sec > 0 || (timeout.tv_sec >= 0 && timeout.tv_nsec > 0));
327 bool activity = wait_for_activity(sock, (state == SENDING_REQUEST) ? POLLOUT : POLLIN, &timeout);
329 err = clock_gettime(CLOCK_MONOTONIC_COARSE, &last_activity);
332 // OK. Most likely, should_stop was set, or we have timed out.
340 request_bytes_sent = 0;
342 pending_data.clear();
343 has_metacube_header = false;
344 for (int stream_index : stream_indices) {
345 servers->set_header(stream_index, "", "");
349 string protocol, user; // Thrown away.
350 if (!parse_url(url, &protocol, &user, &host, &port, &path)) {
351 if (!suppress_logging) {
352 log(WARNING, "[%s] Failed to parse URL '%s'", url.c_str(), url.c_str());
357 // Remove the brackets around IPv6 address literals.
358 // TODO: See if we can join this with the code in parse_ip_address(),
359 // or maybe even more it into parse_url().
360 if (!host.empty() && host[0] == '[' && host[host.size() - 1] == ']') {
361 host = host.substr(1, host.size() - 2);
365 if (suppress_logging) {
366 // See if there's more than one minute since last time we made a connection
367 // with logging enabled. If so, turn it on again.
369 int err = clock_gettime(CLOCK_MONOTONIC_COARSE, &now);
372 double elapsed = now.tv_sec - last_verbose_connection.tv_sec +
373 1e-9 * (now.tv_nsec - last_verbose_connection.tv_nsec);
374 if (elapsed > 60.0) {
375 suppress_logging = false;
378 if (!suppress_logging) {
379 int err = clock_gettime(CLOCK_MONOTONIC_COARSE, &last_verbose_connection);
382 ++num_connection_attempts;
383 sock = lookup_and_connect(host, port);
385 // Yay, successful connect. Try to set it as nonblocking.
387 if (ioctl(sock, FIONBIO, &one) == -1) {
388 log_perror("ioctl(FIONBIO)");
389 state = CLOSING_SOCKET;
391 state = SENDING_REQUEST;
392 request = "GET " + path + " HTTP/1.0\r\nHost: " + host_header(host, port) + "\r\nUser-Agent: cubemap\r\n\r\n";
393 request_bytes_sent = 0;
396 lock_guard<mutex> lock(stats_mutex);
397 stats.connect_time = time(nullptr);
398 clock_gettime(CLOCK_MONOTONIC_COARSE, &last_activity);
401 case SENDING_REQUEST: {
402 size_t to_send = request.size() - request_bytes_sent;
406 ret = write(sock, request.data() + request_bytes_sent, to_send);
407 } while (ret == -1 && errno == EINTR);
411 state = CLOSING_SOCKET;
416 request_bytes_sent += ret;
418 if (request_bytes_sent == request.size()) {
419 state = RECEIVING_HEADER;
423 case RECEIVING_HEADER: {
428 ret = read(sock, buf, sizeof(buf));
429 } while (ret == -1 && errno == EINTR);
433 state = CLOSING_SOCKET;
438 // This really shouldn't happen...
439 if (!suppress_logging) {
440 log(ERROR, "[%s] Socket unexpectedly closed while reading header",
443 state = CLOSING_SOCKET;
447 RequestParseStatus status = wait_for_double_newline(&response, buf, ret);
449 if (status == RP_OUT_OF_SPACE) {
450 if (!suppress_logging) {
451 log(WARNING, "[%s] Server sent overlong HTTP response!", url.c_str());
453 state = CLOSING_SOCKET;
455 } else if (status == RP_NOT_FINISHED_YET) {
459 // OK, so we're fine, but there might be some of the actual data after the response.
460 // We'll need to deal with that separately.
462 if (status == RP_EXTRA_DATA) {
463 char *ptr = static_cast<char *>(
464 memmem(response.data(), response.size(), "\r\n\r\n", 4));
465 assert(ptr != nullptr);
466 extra_data = string(ptr + 4, &response[0] + response.size());
467 response.resize(ptr - response.data());
470 if (!parse_response(response)) {
471 state = CLOSING_SOCKET;
475 if (!extra_data.empty()) {
476 process_data(&extra_data[0], extra_data.size());
479 if (!suppress_logging) {
480 if (encoding == Input::INPUT_ENCODING_RAW) {
481 log(INFO, "[%s] Connected to '%s', receiving raw data.",
482 url.c_str(), url.c_str());
484 assert(encoding == Input::INPUT_ENCODING_METACUBE);
485 log(INFO, "[%s] Connected to '%s', receiving data.",
486 url.c_str(), url.c_str());
489 state = RECEIVING_DATA;
492 case RECEIVING_DATA: {
497 ret = read(sock, buf, sizeof(buf));
498 } while (ret == -1 && errno == EINTR);
502 state = CLOSING_SOCKET;
507 // This really shouldn't happen...
508 if (!suppress_logging) {
509 log(ERROR, "[%s] Socket unexpectedly closed while reading data",
512 state = CLOSING_SOCKET;
516 num_connection_attempts = 0; // Reset, since we have a successful read.
517 if (suppress_logging) {
518 // This was suppressed earlier, so print it out now.
519 if (encoding == Input::INPUT_ENCODING_RAW) {
520 log(INFO, "[%s] Connected to '%s', receiving raw data.",
521 url.c_str(), url.c_str());
523 assert(encoding == Input::INPUT_ENCODING_METACUBE);
524 log(INFO, "[%s] Connected to '%s', receiving data.",
525 url.c_str(), url.c_str());
527 suppress_logging = false;
530 process_data(buf, ret);
533 case CLOSING_SOCKET: {
535 state = NOT_CONNECTED;
542 // If we are still in NOT_CONNECTED, either something went wrong,
543 // or the connection just got closed.
544 // The earlier steps have already given the error message, if any.
545 if (state == NOT_CONNECTED && !should_stop()) {
546 if (!suppress_logging) {
547 log(INFO, "[%s] Waiting 0.2 seconds and restarting...", url.c_str());
550 if (num_connection_attempts >= 3 && !suppress_logging) {
551 log(INFO, "[%s] %d failed connection attempts, suppressing logging for one minute.",
552 url.c_str(), num_connection_attempts);
553 suppress_logging = true;
556 timeout_ts.tv_sec = 0;
557 timeout_ts.tv_nsec = 200000000;
558 wait_for_wakeup(&timeout_ts);
563 void HTTPInput::process_data(char *ptr, size_t bytes)
566 lock_guard<mutex> lock(stats_mutex);
567 stats.bytes_received += bytes;
570 if (encoding == Input::INPUT_ENCODING_RAW) {
571 for (int stream_index : stream_indices) {
572 servers->add_data(stream_index, ptr, bytes, /*metacube_flags=*/0, /*pts=*/RationalPTS());
577 assert(encoding == Input::INPUT_ENCODING_METACUBE);
578 pending_data.insert(pending_data.end(), ptr, ptr + bytes);
581 // If we don't have enough data (yet) for even the Metacube header, just return.
582 if (pending_data.size() < sizeof(metacube2_block_header)) {
586 // Make sure we have the Metacube sync header at the start.
587 // We may need to skip over junk data (it _should_ not happen, though).
588 if (!has_metacube_header) {
589 char *ptr = static_cast<char *>(
590 memmem(pending_data.data(), pending_data.size(),
591 METACUBE2_SYNC, strlen(METACUBE2_SYNC)));
592 if (ptr == nullptr) {
593 // OK, so we didn't find the sync marker. We know then that
594 // we do not have the _full_ marker in the buffer, but we
595 // could have N-1 bytes. Drop everything before that,
597 drop_pending_data(pending_data.size() - (strlen(METACUBE2_SYNC) - 1));
600 // Yay, we found the header. Drop everything (if anything) before it.
601 drop_pending_data(ptr - pending_data.data());
602 has_metacube_header = true;
604 // Re-check that we have the entire header; we could have dropped data.
605 if (pending_data.size() < sizeof(metacube2_block_header)) {
611 // Now it's safe to read the header.
612 metacube2_block_header hdr;
613 memcpy(&hdr, pending_data.data(), sizeof(hdr));
614 assert(memcmp(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync)) == 0);
615 uint32_t size = ntohl(hdr.size);
616 uint16_t flags = ntohs(hdr.flags);
617 uint16_t expected_csum = metacube2_compute_crc(&hdr);
619 if (expected_csum != ntohs(hdr.csum)) {
620 log(WARNING, "[%s] Metacube checksum failed (expected 0x%x, got 0x%x), "
621 "not reading block claiming to be %d bytes (flags=%x).",
622 url.c_str(), expected_csum, ntohs(hdr.csum),
625 // Drop only the first byte, and let the rest of the code handle resync.
626 pending_data.erase(pending_data.begin(), pending_data.begin() + 1);
627 has_metacube_header = false;
630 if (size > 10485760) {
631 log(WARNING, "[%s] Metacube block of %d bytes (flags=%x); corrupted header??",
632 url.c_str(), size, flags);
635 // See if we have the entire block. If not, wait for more data.
636 if (pending_data.size() < sizeof(metacube2_block_header) + size) {
640 // See if this is a metadata block. If so, we don't want to send it on,
641 // but rather process it ourselves.
642 // TODO: Keep metadata when sending on to other Metacube users.
643 if (flags & METACUBE_FLAGS_METADATA) {
645 lock_guard<mutex> lock(stats_mutex);
646 stats.metadata_bytes_received += size;
648 process_metacube_metadata_block(hdr, pending_data.data() + sizeof(hdr), size);
650 // Send this block on to the servers.
652 lock_guard<mutex> lock(stats_mutex);
653 stats.data_bytes_received += size;
655 char *inner_data = pending_data.data() + sizeof(metacube2_block_header);
656 if (flags & METACUBE_FLAGS_HEADER) {
657 stream_header = string(inner_data, inner_data + size);
658 for (int stream_index : stream_indices) {
659 servers->set_header(stream_index, http_header, stream_header);
662 for (int stream_index : stream_indices) {
663 servers->add_data(stream_index, inner_data, size, flags, next_block_pts);
665 next_block_pts = RationalPTS();
668 // Consume the block. This isn't the most efficient way of dealing with things
669 // should we have many blocks, but these routines don't need to be too efficient
671 pending_data.erase(pending_data.begin(), pending_data.begin() + sizeof(metacube2_block_header) + size);
672 has_metacube_header = false;
676 void HTTPInput::drop_pending_data(size_t num_bytes)
678 if (num_bytes == 0) {
681 log(WARNING, "[%s] Dropping %lld junk bytes; not a Metacube2 stream, or data was dropped from the middle of the stream.",
682 url.c_str(), (long long)num_bytes);
683 assert(pending_data.size() >= num_bytes);
684 pending_data.erase(pending_data.begin(), pending_data.begin() + num_bytes);
687 void HTTPInput::add_destination(int stream_index)
689 stream_indices.push_back(stream_index);
690 servers->set_header(stream_index, http_header, stream_header);
693 InputStats HTTPInput::get_stats() const
695 lock_guard<mutex> lock(stats_mutex);
699 void HTTPInput::process_metacube_metadata_block(const metacube2_block_header &hdr, const char *payload, uint32_t payload_size)
701 if (payload_size < sizeof(uint64_t)) {
702 log(WARNING, "[%s] Undersized Metacube metadata block (%d bytes); corrupted header?",
703 url.c_str(), payload_size);
707 uint64_t type = be64toh(*(const uint64_t *)payload);
708 if (type == METACUBE_METADATA_TYPE_ENCODER_TIMESTAMP) {
710 clock_gettime(CLOCK_REALTIME, &now);
712 const metacube2_timestamp_packet *pkt = (const metacube2_timestamp_packet *)payload;
713 if (payload_size != sizeof(*pkt)) {
714 log(WARNING, "[%s] Metacube timestamp block of wrong size (%d bytes); ignoring.",
715 url.c_str(), payload_size);
719 double elapsed = now.tv_sec - be64toh(pkt->tv_sec) +
720 1e-9 * (now.tv_nsec - long(be64toh(pkt->tv_nsec)));
722 lock_guard<mutex> lock(stats_mutex);
723 stats.latency_sec = elapsed;
725 } else if (type == METACUBE_METADATA_TYPE_NEXT_BLOCK_PTS) {
726 const metacube2_pts_packet *pkt = (const metacube2_pts_packet *)payload;
727 if (payload_size != sizeof(*pkt)) {
728 log(WARNING, "[%s] Metacube pts block of wrong size (%d bytes); ignoring.",
729 url.c_str(), payload_size);
732 next_block_pts.pts = be64toh(pkt->pts);
733 next_block_pts.timebase_num = be64toh(pkt->timebase_num);
734 next_block_pts.timebase_den = be64toh(pkt->timebase_den);
736 // Unknown metadata block, ignore
737 log(INFO, "[%s] Metadata block %llu\n", url.c_str(), type);