X-Git-Url: https://git.sesse.net/?p=cubemap;a=blobdiff_plain;f=httpinput.cpp;h=9cacc1f0041e8ebccbaccf6c101c35d82a7c899a;hp=9dcaeaa0191dd1951fc8fdb15f34148468c563c0;hb=d5f3f941faaf113936113fc2105bf59913e9125e;hpb=bff5371d96506c8571fdeeafc5404c362022685b diff --git a/httpinput.cpp b/httpinput.cpp index 9dcaeaa..9cacc1f 100644 --- a/httpinput.cpp +++ b/httpinput.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -11,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -19,7 +21,6 @@ #include "httpinput.h" #include "log.h" #include "metacube2.h" -#include "mutexlock.h" #include "parse.h" #include "serverpool.h" #include "state.pb.h" @@ -48,15 +49,14 @@ extern ServerPool *servers; HTTPInput::HTTPInput(const string &url, Input::Encoding encoding) : state(NOT_CONNECTED), url(url), - encoding(encoding), - has_metacube_header(false), - sock(-1) + encoding(encoding) { - pthread_mutex_init(&stats_mutex, NULL); stats.url = url; stats.bytes_received = 0; stats.data_bytes_received = 0; + stats.metadata_bytes_received = 0; stats.connect_time = -1; + stats.latency_sec = HUGE_VAL; } HTTPInput::HTTPInput(const InputProto &serialized) @@ -79,15 +79,23 @@ HTTPInput::HTTPInput(const InputProto &serialized) string protocol, user; parse_url(url, &protocol, &user, &host, &port, &path); // Don't care if it fails. - pthread_mutex_init(&stats_mutex, NULL); stats.url = url; stats.bytes_received = serialized.bytes_received(); stats.data_bytes_received = serialized.data_bytes_received(); + stats.metadata_bytes_received = serialized.metadata_bytes_received(); if (serialized.has_connect_time()) { stats.connect_time = serialized.connect_time(); } else { - stats.connect_time = time(NULL); + stats.connect_time = time(nullptr); } + if (serialized.has_latency_sec()) { + stats.latency_sec = serialized.latency_sec(); + } else { + stats.latency_sec = HUGE_VAL; + } + + last_verbose_connection.tv_sec = -3600; + last_verbose_connection.tv_nsec = 0; } void HTTPInput::close_socket() @@ -97,7 +105,7 @@ void HTTPInput::close_socket() sock = -1; } - MutexLock lock(&stats_mutex); + lock_guard lock(stats_mutex); stats.connect_time = -1; } @@ -116,6 +124,9 @@ InputProto HTTPInput::serialize() const serialized.set_sock(sock); serialized.set_bytes_received(stats.bytes_received); serialized.set_data_bytes_received(stats.data_bytes_received); + if (isfinite(stats.latency_sec)) { + serialized.set_latency_sec(stats.latency_sec); + } serialized.set_connect_time(stats.connect_time); if (encoding == Input::INPUT_ENCODING_METACUBE) { serialized.set_is_metacube_encoded(true); @@ -129,10 +140,12 @@ InputProto HTTPInput::serialize() const int HTTPInput::lookup_and_connect(const string &host, const string &port) { addrinfo *ai; - int err = getaddrinfo(host.c_str(), port.c_str(), NULL, &ai); + int err = getaddrinfo(host.c_str(), port.c_str(), nullptr, &ai); if (err != 0) { - log(WARNING, "[%s] Lookup of '%s' failed (%s).", - url.c_str(), host.c_str(), gai_strerror(err)); + if (!suppress_logging) { + log(WARNING, "[%s] Lookup of '%s' failed (%s).", + url.c_str(), host.c_str(), gai_strerror(err)); + } return -1; } @@ -140,23 +153,14 @@ int HTTPInput::lookup_and_connect(const string &host, const string &port) // Connect to everything in turn until we have a socket. for ( ; ai && !should_stop(); ai = ai->ai_next) { - int sock = socket(ai->ai_family, SOCK_STREAM, IPPROTO_TCP); + // Now do a non-blocking connect. This is important because we want to be able to be + // woken up, even though it's rather cumbersome. + int sock = socket(ai->ai_family, SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP); if (sock == -1) { // Could be e.g. EPROTONOSUPPORT. The show must go on. continue; } - // Now do a non-blocking connect. This is important because we want to be able to be - // woken up, even though it's rather cumbersome. - - // Set the socket as nonblocking. - int one = 1; - if (ioctl(sock, FIONBIO, &one) == -1) { - log_perror("ioctl(FIONBIO)"); - safe_close(sock); - return -1; - } - // Do a non-blocking connect. do { err = connect(sock, ai->ai_addr, ai->ai_addrlen); @@ -170,7 +174,7 @@ int HTTPInput::lookup_and_connect(const string &host, const string &port) // Wait for the connect to complete, or an error to happen. for ( ;; ) { - bool complete = wait_for_activity(sock, POLLIN | POLLOUT, NULL); + bool complete = wait_for_activity(sock, POLLIN | POLLOUT, nullptr); if (should_stop()) { safe_close(sock); return -1; @@ -200,8 +204,10 @@ int HTTPInput::lookup_and_connect(const string &host, const string &port) } // Give the last one as error. - log(WARNING, "[%s] Connect to '%s' failed (%s)", - url.c_str(), host.c_str(), strerror(errno)); + if (!suppress_logging) { + log(WARNING, "[%s] Connect to '%s' failed (%s)", + url.c_str(), host.c_str(), strerror(errno)); + } freeaddrinfo(base_ai); return -1; } @@ -210,65 +216,62 @@ bool HTTPInput::parse_response(const string &request) { vector lines = split_lines(response); if (lines.empty()) { - log(WARNING, "[%s] Empty HTTP response from input.", url.c_str()); + if (!suppress_logging) { + log(WARNING, "[%s] Empty HTTP response from input.", url.c_str()); + } return false; } vector first_line_tokens = split_tokens(lines[0]); if (first_line_tokens.size() < 2) { - log(WARNING, "[%s] Malformed response line '%s' from input.", - url.c_str(), lines[0].c_str()); + if (!suppress_logging) { + log(WARNING, "[%s] Malformed response line '%s' from input.", + url.c_str(), lines[0].c_str()); + } return false; } int response = atoi(first_line_tokens[1].c_str()); if (response != 200) { - log(WARNING, "[%s] Non-200 response '%s' from input.", - url.c_str(), lines[0].c_str()); + if (!suppress_logging) { + log(WARNING, "[%s] Non-200 response '%s' from input.", + url.c_str(), lines[0].c_str()); + } return false; } - multimap parameters = extract_headers(lines, url); + HTTPHeaderMultimap parameters = extract_headers(lines, url); // Remove “Content-encoding: metacube”. - // TODO: Make case-insensitive. - multimap::iterator encoding_it = - parameters.find("Content-encoding"); + const auto encoding_it = parameters.find("Content-Encoding"); if (encoding_it != parameters.end() && encoding_it->second == "metacube") { parameters.erase(encoding_it); } // Change “Server: foo” to “Server: metacube/0.1 (reflecting: foo)” - // TODO: Make case-insensitive. // XXX: Use a Via: instead? if (parameters.count("Server") == 0) { parameters.insert(make_pair("Server", SERVER_IDENTIFICATION)); } else { - for (multimap::iterator it = parameters.begin(); - it != parameters.end(); - ++it) { - if (it->first != "Server") { + for (auto &key_and_value : parameters) { + if (key_and_value.first != "Server") { continue; } - it->second = SERVER_IDENTIFICATION " (reflecting: " + it->second + ")"; + key_and_value.second = SERVER_IDENTIFICATION " (reflecting: " + key_and_value.second + ")"; } } - // Set “Connection: close”. - // TODO: Make case-insensitive. + // Erase “Connection: close”; we'll set it on the sending side if needed. parameters.erase("Connection"); - parameters.insert(make_pair("Connection", "close")); // Construct the new HTTP header. http_header = "HTTP/1.0 200 OK\r\n"; - for (multimap::iterator it = parameters.begin(); - it != parameters.end(); - ++it) { - http_header.append(it->first + ": " + it->second + "\r\n"); + for (const auto &key_and_value : parameters) { + http_header.append(key_and_value.first + ": " + key_and_value.second + "\r\n"); } - for (size_t i = 0; i < stream_indices.size(); ++i) { - servers->set_header(stream_indices[i], http_header, stream_header); + for (int stream_index : stream_indices) { + servers->set_header(stream_index, http_header, stream_header); } return true; @@ -296,7 +299,9 @@ void HTTPInput::do_work() timespec elapsed = clock_diff(last_activity, now); if (elapsed.tv_sec >= timeout_secs) { // Timeout! - log(ERROR, "[%s] Timeout after %d seconds, closing.", url.c_str(), elapsed.tv_sec); + if (!suppress_logging) { + log(ERROR, "[%s] Timeout after %d seconds, closing.", url.c_str(), elapsed.tv_sec); + } state = CLOSING_SOCKET; continue; } @@ -324,14 +329,17 @@ void HTTPInput::do_work() response.clear(); pending_data.clear(); has_metacube_header = false; - for (size_t i = 0; i < stream_indices.size(); ++i) { - servers->set_header(stream_indices[i], "", ""); + for (int stream_index : stream_indices) { + // Don't zero out the header; it might still be of use to HLS clients. + servers->set_unavailable(stream_index); } { string protocol, user; // Thrown away. if (!parse_url(url, &protocol, &user, &host, &port, &path)) { - log(WARNING, "[%s] Failed to parse URL '%s'", url.c_str(), url.c_str()); + if (!suppress_logging) { + log(WARNING, "[%s] Failed to parse URL '%s'", url.c_str(), url.c_str()); + } break; } @@ -343,21 +351,33 @@ void HTTPInput::do_work() } } + if (suppress_logging) { + // See if there's more than one minute since last time we made a connection + // with logging enabled. If so, turn it on again. + timespec now; + int err = clock_gettime(CLOCK_MONOTONIC_COARSE, &now); + assert(err != -1); + + double elapsed = now.tv_sec - last_verbose_connection.tv_sec + + 1e-9 * (now.tv_nsec - last_verbose_connection.tv_nsec); + if (elapsed > 60.0) { + suppress_logging = false; + } + } + if (!suppress_logging) { + int err = clock_gettime(CLOCK_MONOTONIC_COARSE, &last_verbose_connection); + assert(err != -1); + } + ++num_connection_attempts; sock = lookup_and_connect(host, port); if (sock != -1) { - // Yay, successful connect. Try to set it as nonblocking. - int one = 1; - if (ioctl(sock, FIONBIO, &one) == -1) { - log_perror("ioctl(FIONBIO)"); - state = CLOSING_SOCKET; - } else { - state = SENDING_REQUEST; - request = "GET " + path + " HTTP/1.0\r\nHost: " + host_header(host, port) + "\r\nUser-Agent: cubemap\r\n\r\n"; - request_bytes_sent = 0; - } + // Yay, successful connect. + state = SENDING_REQUEST; + request = "GET " + path + " HTTP/1.0\r\nHost: " + host_header(host, port) + "\r\nUser-Agent: cubemap\r\n\r\n"; + request_bytes_sent = 0; - MutexLock lock(&stats_mutex); - stats.connect_time = time(NULL); + lock_guard lock(stats_mutex); + stats.connect_time = time(nullptr); clock_gettime(CLOCK_MONOTONIC_COARSE, &last_activity); } break; @@ -399,8 +419,10 @@ void HTTPInput::do_work() if (ret == 0) { // This really shouldn't happen... - log(ERROR, "[%s] Socket unexpectedly closed while reading header", - url.c_str()); + if (!suppress_logging) { + log(ERROR, "[%s] Socket unexpectedly closed while reading header", + url.c_str()); + } state = CLOSING_SOCKET; continue; } @@ -408,7 +430,9 @@ void HTTPInput::do_work() RequestParseStatus status = wait_for_double_newline(&response, buf, ret); if (status == RP_OUT_OF_SPACE) { - log(WARNING, "[%s] Server sent overlong HTTP response!", url.c_str()); + if (!suppress_logging) { + log(WARNING, "[%s] Server sent overlong HTTP response!", url.c_str()); + } state = CLOSING_SOCKET; continue; } else if (status == RP_NOT_FINISHED_YET) { @@ -421,7 +445,7 @@ void HTTPInput::do_work() if (status == RP_EXTRA_DATA) { char *ptr = static_cast( memmem(response.data(), response.size(), "\r\n\r\n", 4)); - assert(ptr != NULL); + assert(ptr != nullptr); extra_data = string(ptr + 4, &response[0] + response.size()); response.resize(ptr - response.data()); } @@ -435,13 +459,15 @@ void HTTPInput::do_work() process_data(&extra_data[0], extra_data.size()); } - if (encoding == Input::INPUT_ENCODING_RAW) { - log(INFO, "[%s] Connected to '%s', receiving raw data.", - url.c_str(), url.c_str()); - } else { - assert(encoding == Input::INPUT_ENCODING_METACUBE); - log(INFO, "[%s] Connected to '%s', receiving data.", - url.c_str(), url.c_str()); + if (!suppress_logging) { + if (encoding == Input::INPUT_ENCODING_RAW) { + log(INFO, "[%s] Connected to '%s', receiving raw data.", + url.c_str(), url.c_str()); + } else { + assert(encoding == Input::INPUT_ENCODING_METACUBE); + log(INFO, "[%s] Connected to '%s', receiving data.", + url.c_str(), url.c_str()); + } } state = RECEIVING_DATA; break; @@ -462,12 +488,28 @@ void HTTPInput::do_work() if (ret == 0) { // This really shouldn't happen... - log(ERROR, "[%s] Socket unexpectedly closed while reading data", - url.c_str()); + if (!suppress_logging) { + log(ERROR, "[%s] Socket unexpectedly closed while reading data", + url.c_str()); + } state = CLOSING_SOCKET; continue; } + num_connection_attempts = 0; // Reset, since we have a successful read. + if (suppress_logging) { + // This was suppressed earlier, so print it out now. + if (encoding == Input::INPUT_ENCODING_RAW) { + log(INFO, "[%s] Connected to '%s', receiving raw data.", + url.c_str(), url.c_str()); + } else { + assert(encoding == Input::INPUT_ENCODING_METACUBE); + log(INFO, "[%s] Connected to '%s', receiving data.", + url.c_str(), url.c_str()); + } + suppress_logging = false; + } + process_data(buf, ret); break; } @@ -484,7 +526,15 @@ void HTTPInput::do_work() // or the connection just got closed. // The earlier steps have already given the error message, if any. if (state == NOT_CONNECTED && !should_stop()) { - log(INFO, "[%s] Waiting 0.2 second and restarting...", url.c_str()); + if (!suppress_logging) { + log(INFO, "[%s] Waiting 0.2 seconds and restarting...", url.c_str()); + } + + if (num_connection_attempts >= 3 && !suppress_logging) { + log(INFO, "[%s] %d failed connection attempts, suppressing logging for one minute.", + url.c_str(), num_connection_attempts); + suppress_logging = true; + } timespec timeout_ts; timeout_ts.tv_sec = 0; timeout_ts.tv_nsec = 200000000; @@ -495,20 +545,20 @@ void HTTPInput::do_work() void HTTPInput::process_data(char *ptr, size_t bytes) { - pending_data.insert(pending_data.end(), ptr, ptr + bytes); { - MutexLock mutex(&stats_mutex); + lock_guard lock(stats_mutex); stats.bytes_received += bytes; } if (encoding == Input::INPUT_ENCODING_RAW) { - for (size_t i = 0; i < stream_indices.size(); ++i) { - servers->add_data(stream_indices[i], ptr, bytes, /*metacube_flags=*/0); + for (int stream_index : stream_indices) { + servers->add_data(stream_index, ptr, bytes, /*metacube_flags=*/0, /*pts=*/RationalPTS()); } return; } assert(encoding == Input::INPUT_ENCODING_METACUBE); + pending_data.insert(pending_data.end(), ptr, ptr + bytes); for ( ;; ) { // If we don't have enough data (yet) for even the Metacube header, just return. @@ -522,7 +572,7 @@ void HTTPInput::process_data(char *ptr, size_t bytes) char *ptr = static_cast( memmem(pending_data.data(), pending_data.size(), METACUBE2_SYNC, strlen(METACUBE2_SYNC))); - if (ptr == NULL) { + if (ptr == nullptr) { // OK, so we didn't find the sync marker. We know then that // we do not have the _full_ marker in the buffer, but we // could have N-1 bytes. Drop everything before that, @@ -570,20 +620,32 @@ void HTTPInput::process_data(char *ptr, size_t bytes) return; } - // Send this block on to the servers. - { - MutexLock lock(&stats_mutex); - stats.data_bytes_received += size; - } - char *inner_data = pending_data.data() + sizeof(metacube2_block_header); - if (flags & METACUBE_FLAGS_HEADER) { - stream_header = string(inner_data, inner_data + size); - for (size_t i = 0; i < stream_indices.size(); ++i) { - servers->set_header(stream_indices[i], http_header, stream_header); + // See if this is a metadata block. If so, we don't want to send it on, + // but rather process it ourselves. + // TODO: Keep metadata when sending on to other Metacube users. + if (flags & METACUBE_FLAGS_METADATA) { + { + lock_guard lock(stats_mutex); + stats.metadata_bytes_received += size; } - } - for (size_t i = 0; i < stream_indices.size(); ++i) { - servers->add_data(stream_indices[i], inner_data, size, flags); + process_metacube_metadata_block(hdr, pending_data.data() + sizeof(hdr), size); + } else { + // Send this block on to the servers. + { + lock_guard lock(stats_mutex); + stats.data_bytes_received += size; + } + char *inner_data = pending_data.data() + sizeof(metacube2_block_header); + if (flags & METACUBE_FLAGS_HEADER) { + stream_header = string(inner_data, inner_data + size); + for (int stream_index : stream_indices) { + servers->set_header(stream_index, http_header, stream_header); + } + } + for (int stream_index : stream_indices) { + servers->add_data(stream_index, inner_data, size, flags, next_block_pts); + } + next_block_pts = RationalPTS(); } // Consume the block. This isn't the most efficient way of dealing with things @@ -613,6 +675,49 @@ void HTTPInput::add_destination(int stream_index) InputStats HTTPInput::get_stats() const { - MutexLock lock(&stats_mutex); + lock_guard lock(stats_mutex); return stats; } + +void HTTPInput::process_metacube_metadata_block(const metacube2_block_header &hdr, const char *payload, uint32_t payload_size) +{ + if (payload_size < sizeof(uint64_t)) { + log(WARNING, "[%s] Undersized Metacube metadata block (%d bytes); corrupted header?", + url.c_str(), payload_size); + return; + } + + uint64_t type = be64toh(*(const uint64_t *)payload); + if (type == METACUBE_METADATA_TYPE_ENCODER_TIMESTAMP) { + timespec now; + clock_gettime(CLOCK_REALTIME, &now); + + const metacube2_timestamp_packet *pkt = (const metacube2_timestamp_packet *)payload; + if (payload_size != sizeof(*pkt)) { + log(WARNING, "[%s] Metacube timestamp block of wrong size (%d bytes); ignoring.", + url.c_str(), payload_size); + return; + } + + double elapsed = now.tv_sec - be64toh(pkt->tv_sec) + + 1e-9 * (now.tv_nsec - long(be64toh(pkt->tv_nsec))); + { + lock_guard lock(stats_mutex); + stats.latency_sec = elapsed; + } + } else if (type == METACUBE_METADATA_TYPE_NEXT_BLOCK_PTS) { + const metacube2_pts_packet *pkt = (const metacube2_pts_packet *)payload; + if (payload_size != sizeof(*pkt)) { + log(WARNING, "[%s] Metacube pts block of wrong size (%d bytes); ignoring.", + url.c_str(), payload_size); + return; + } + next_block_pts.pts = be64toh(pkt->pts); + next_block_pts.timebase_num = be64toh(pkt->timebase_num); + next_block_pts.timebase_den = be64toh(pkt->timebase_den); + } else { + // Unknown metadata block, ignore + log(INFO, "[%s] Metadata block %llu\n", url.c_str(), type); + return; + } +}