X-Git-Url: https://git.sesse.net/?p=cubemap;a=blobdiff_plain;f=httpinput.cpp;h=7f96970b678fb75c68a31746a39087a1a41e269d;hp=7da8daa1d85e732be755236a780d010a82e9cd4e;hb=70c0baf4bcec3a77f0626d5a7bfde87fc7339698;hpb=6cb6815facd386521b21c66c35b48ea295dff98e diff --git a/httpinput.cpp b/httpinput.cpp index 7da8daa..7f96970 100644 --- a/httpinput.cpp +++ b/httpinput.cpp @@ -18,6 +18,7 @@ #include "httpinput.h" #include "log.h" #include "metacube.h" +#include "mutexlock.h" #include "parse.h" #include "serverpool.h" #include "state.pb.h" @@ -27,13 +28,36 @@ using namespace std; extern ServerPool *servers; - + +namespace { + +// Compute b-a. +timespec clock_diff(const timespec &a, const timespec &b) +{ + timespec ret; + ret.tv_sec = b.tv_sec - a.tv_sec; + ret.tv_nsec = b.tv_nsec - a.tv_nsec; + if (ret.tv_nsec < 0) { + ret.tv_sec--; + ret.tv_nsec += 1000000000; + } + assert(ret.tv_nsec >= 0); + return ret; +} + +} // namespace + HTTPInput::HTTPInput(const string &url) : state(NOT_CONNECTED), url(url), has_metacube_header(false), sock(-1) { + pthread_mutex_init(&stats_mutex, NULL); + stats.url = url; + stats.bytes_received = 0; + stats.data_bytes_received = 0; + stats.connect_time = -1; } HTTPInput::HTTPInput(const InputProto &serialized) @@ -43,6 +67,7 @@ HTTPInput::HTTPInput(const InputProto &serialized) request_bytes_sent(serialized.request_bytes_sent()), response(serialized.response()), http_header(serialized.http_header()), + stream_header(serialized.stream_header()), has_metacube_header(serialized.has_metacube_header()), sock(serialized.sock()) { @@ -58,6 +83,16 @@ HTTPInput::HTTPInput(const InputProto &serialized) memcmp(http_header.data() + http_header.size() - 4, "\r\n\r\n", 4) == 0) { http_header.resize(http_header.size() - 2); } + + pthread_mutex_init(&stats_mutex, NULL); + stats.url = url; + stats.bytes_received = serialized.bytes_received(); + stats.data_bytes_received = serialized.data_bytes_received(); + if (serialized.has_connect_time()) { + stats.connect_time = serialized.connect_time(); + } else { + stats.connect_time = time(NULL); + } } void HTTPInput::close_socket() @@ -65,6 +100,9 @@ void HTTPInput::close_socket() if (sock != -1) { safe_close(sock); } + + MutexLock lock(&stats_mutex); + stats.connect_time = -1; } InputProto HTTPInput::serialize() const @@ -76,9 +114,13 @@ InputProto HTTPInput::serialize() const serialized.set_request_bytes_sent(request_bytes_sent); serialized.set_response(response); serialized.set_http_header(http_header); + serialized.set_stream_header(stream_header); serialized.set_pending_data(string(pending_data.begin(), pending_data.end())); serialized.set_has_metacube_header(has_metacube_header); serialized.set_sock(sock); + serialized.set_bytes_received(stats.bytes_received); + serialized.set_data_bytes_received(stats.data_bytes_received); + serialized.set_connect_time(stats.connect_time); return serialized; } @@ -86,17 +128,16 @@ int HTTPInput::lookup_and_connect(const string &host, const string &port) { addrinfo *ai; int err = getaddrinfo(host.c_str(), port.c_str(), NULL, &ai); - if (err == -1) { + if (err != 0) { log(WARNING, "[%s] Lookup of '%s' failed (%s).", url.c_str(), host.c_str(), gai_strerror(err)); - freeaddrinfo(ai); return -1; } addrinfo *base_ai = ai; // Connect to everything in turn until we have a socket. - while (ai && !should_stop()) { + for ( ; ai && !should_stop(); ai = ai->ai_next) { int sock = socket(ai->ai_family, SOCK_STREAM, IPPROTO_TCP); if (sock == -1) { // Could be e.g. EPROTONOSUPPORT. The show must go on. @@ -154,7 +195,6 @@ int HTTPInput::lookup_and_connect(const string &host, const string &port) } safe_close(sock); - ai = ai->ai_next; } // Give the last one as error. @@ -243,7 +283,7 @@ bool HTTPInput::parse_response(const std::string &request) } for (size_t i = 0; i < stream_indices.size(); ++i) { - servers->set_header(stream_indices[i], http_header, ""); + servers->set_header(stream_indices[i], http_header, stream_header); } return true; @@ -251,11 +291,43 @@ bool HTTPInput::parse_response(const std::string &request) void HTTPInput::do_work() { + timespec last_activity; + + // TODO: Make the timeout persist across restarts. + if (state == SENDING_REQUEST || state == RECEIVING_HEADER || state == RECEIVING_DATA) { + int err = clock_gettime(CLOCK_MONOTONIC, &last_activity); + assert(err != -1); + } + while (!should_stop()) { if (state == SENDING_REQUEST || state == RECEIVING_HEADER || state == RECEIVING_DATA) { - bool activity = wait_for_activity(sock, (state == SENDING_REQUEST) ? POLLOUT : POLLIN, NULL); - if (!activity) { - // Most likely, should_stop was set. + // Give the socket 30 seconds since last activity before we time out. + static const int timeout_secs = 30; + + timespec now; + int err = clock_gettime(CLOCK_MONOTONIC, &now); + assert(err != -1); + + timespec elapsed = clock_diff(last_activity, now); + if (elapsed.tv_sec >= timeout_secs) { + // Timeout! + log(ERROR, "[%s] Timeout after %d seconds, closing.", url.c_str(), elapsed.tv_sec); + state = CLOSING_SOCKET; + continue; + } + + // Basically calculate (30 - (now - last_activity)) = (30 + (last_activity - now)). + // Add a second of slack to account for differences between clocks. + timespec timeout = clock_diff(now, last_activity); + timeout.tv_sec += timeout_secs + 1; + assert(timeout.tv_sec > 0 || (timeout.tv_sec >= 0 && timeout.tv_nsec > 0)); + + bool activity = wait_for_activity(sock, (state == SENDING_REQUEST) ? POLLOUT : POLLIN, &timeout); + if (activity) { + err = clock_gettime(CLOCK_MONOTONIC, &last_activity); + assert(err != -1); + } else { + // OK. Most likely, should_stop was set, or we have timed out. continue; } } @@ -266,6 +338,7 @@ void HTTPInput::do_work() request_bytes_sent = 0; response.clear(); pending_data.clear(); + has_metacube_header = false; for (size_t i = 0; i < stream_indices.size(); ++i) { servers->set_header(stream_indices[i], "", ""); } @@ -290,6 +363,10 @@ void HTTPInput::do_work() request = "GET " + path + " HTTP/1.0\r\nUser-Agent: cubemap\r\n\r\n"; request_bytes_sent = 0; } + + MutexLock lock(&stats_mutex); + stats.connect_time = time(NULL); + clock_gettime(CLOCK_MONOTONIC, &last_activity); } break; case SENDING_REQUEST: { @@ -353,7 +430,7 @@ void HTTPInput::do_work() char *ptr = static_cast( memmem(response.data(), response.size(), "\r\n\r\n", 4)); assert(ptr != NULL); - extra_data = string(ptr, &response[0] + response.size()); + extra_data = string(ptr + 4, &response[0] + response.size()); response.resize(ptr - response.data()); } @@ -421,6 +498,10 @@ void HTTPInput::do_work() void HTTPInput::process_data(char *ptr, size_t bytes) { pending_data.insert(pending_data.end(), ptr, ptr + bytes); + { + MutexLock mutex(&stats_mutex); + stats.bytes_received += bytes; + } for ( ;; ) { // If we don't have enough data (yet) for even the Metacube header, just return. @@ -459,21 +540,36 @@ void HTTPInput::process_data(char *ptr, size_t bytes) uint32_t size = ntohl(hdr->size); uint32_t flags = ntohl(hdr->flags); + if (size > 262144) { + log(WARNING, "[%s] Metacube block of %d bytes (flags=%x); corrupted header?", + url.c_str(), size, flags); + } + // See if we have the entire block. If not, wait for more data. if (pending_data.size() < sizeof(metacube_block_header) + size) { return; } - // Send this block on to the data. + // Send this block on to the servers. + { + MutexLock lock(&stats_mutex); + stats.data_bytes_received += size; + } char *inner_data = pending_data.data() + sizeof(metacube_block_header); if (flags & METACUBE_FLAGS_HEADER) { - string header(inner_data, inner_data + size); + stream_header = string(inner_data, inner_data + size); for (size_t i = 0; i < stream_indices.size(); ++i) { - servers->set_header(stream_indices[i], http_header, header); + servers->set_header(stream_indices[i], http_header, stream_header); + } + } else { + StreamStartSuitability suitable_for_stream_start; + if (flags & METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START) { + suitable_for_stream_start = NOT_SUITABLE_FOR_STREAM_START; + } else { + suitable_for_stream_start = SUITABLE_FOR_STREAM_START; } - } else { for (size_t i = 0; i < stream_indices.size(); ++i) { - servers->add_data(stream_indices[i], inner_data, size); + servers->add_data(stream_indices[i], inner_data, size, suitable_for_stream_start); } } @@ -492,6 +588,18 @@ void HTTPInput::drop_pending_data(size_t num_bytes) } log(WARNING, "[%s] Dropping %lld junk bytes from stream, maybe it is not a Metacube stream?", url.c_str(), (long long)num_bytes); + assert(pending_data.size() >= num_bytes); pending_data.erase(pending_data.begin(), pending_data.begin() + num_bytes); } +void HTTPInput::add_destination(int stream_index) +{ + stream_indices.push_back(stream_index); + servers->set_header(stream_index, http_header, stream_header); +} + +InputStats HTTPInput::get_stats() const +{ + MutexLock lock(&stats_mutex); + return stats; +}