]> git.sesse.net Git - cubemap/blobdiff - httpinput.cpp
Make the Metacube block size warning a bit less aggressive, as WebM seems to generate...
[cubemap] / httpinput.cpp
index f92ac13ca9ca010f1b2a8efb2d45cba68c40b12f..3bee5d675fde7385fc61307ac1ad14d8648f9edc 100644 (file)
@@ -1,4 +1,3 @@
-#include <stdio.h>
 #include <assert.h>
 #include <errno.h>
 #include <netdb.h>
@@ -9,6 +8,7 @@
 #include <string.h>
 #include <sys/ioctl.h>
 #include <sys/socket.h>
+#include <time.h>
 #include <unistd.h>
 #include <map>
 #include <string>
@@ -62,7 +62,9 @@ HTTPInput::HTTPInput(const InputProto &serialized)
 
 void HTTPInput::close_socket()
 {
-       safe_close(sock);
+       if (sock != -1) {
+               safe_close(sock);
+       }
 }
 
 InputProto HTTPInput::serialize() const
@@ -84,17 +86,16 @@ int HTTPInput::lookup_and_connect(const string &host, const string &port)
 {
        addrinfo *ai;
        int err = getaddrinfo(host.c_str(), port.c_str(), NULL, &ai);
-       if (err == -1) {
+       if (err != 0) {
                log(WARNING, "[%s] Lookup of '%s' failed (%s).",
                        url.c_str(), host.c_str(), gai_strerror(err));
-               freeaddrinfo(ai);
                return -1;
        }
 
        addrinfo *base_ai = ai;
 
        // Connect to everything in turn until we have a socket.
-       while (ai && !should_stop()) {
+       for ( ; ai && !should_stop(); ai = ai->ai_next) {
                int sock = socket(ai->ai_family, SOCK_STREAM, IPPROTO_TCP);
                if (sock == -1) {
                        // Could be e.g. EPROTONOSUPPORT. The show must go on.
@@ -152,7 +153,6 @@ int HTTPInput::lookup_and_connect(const string &host, const string &port)
                }
 
                safe_close(sock);
-               ai = ai->ai_next;
        }
 
        // Give the last one as error.
@@ -240,8 +240,8 @@ bool HTTPInput::parse_response(const std::string &request)
                http_header.append(it->first + ": " + it->second + "\r\n");
        }
 
-       for (size_t i = 0; i < stream_ids.size(); ++i) {
-               servers->set_header(stream_ids[i], http_header, "");
+       for (size_t i = 0; i < stream_indices.size(); ++i) {
+               servers->set_header(stream_indices[i], http_header, "");
        }
 
        return true;
@@ -264,8 +264,9 @@ void HTTPInput::do_work()
                        request_bytes_sent = 0;
                        response.clear();
                        pending_data.clear();
-                       for (size_t i = 0; i < stream_ids.size(); ++i) {
-                               servers->set_header(stream_ids[i], "", "");
+                       has_metacube_header = false;
+                       for (size_t i = 0; i < stream_indices.size(); ++i) {
+                               servers->set_header(stream_indices[i], "", "");
                        }
 
                        {
@@ -351,7 +352,7 @@ void HTTPInput::do_work()
                                char *ptr = static_cast<char *>(
                                        memmem(response.data(), response.size(), "\r\n\r\n", 4));
                                assert(ptr != NULL);
-                               extra_data = string(ptr, &response[0] + response.size());
+                               extra_data = string(ptr + 4, &response[0] + response.size());
                                response.resize(ptr - response.data());
                        }
 
@@ -385,7 +386,7 @@ void HTTPInput::do_work()
 
                        if (ret == 0) {
                                // This really shouldn't happen...
-                               log(ERROR, "[%s] Socket unexpectedly closed while reading header",
+                               log(ERROR, "[%s] Socket unexpectedly closed while reading data",
                                           url.c_str());
                                state = CLOSING_SOCKET;
                                continue;
@@ -457,6 +458,11 @@ void HTTPInput::process_data(char *ptr, size_t bytes)
                uint32_t size = ntohl(hdr->size);
                uint32_t flags = ntohl(hdr->flags);
 
+               if (size > 262144) {
+                       log(WARNING, "[%s] Metacube block of %d bytes (flags=%x); corrupted header?",
+                               url.c_str(), size, flags);
+               }
+
                // See if we have the entire block. If not, wait for more data.
                if (pending_data.size() < sizeof(metacube_block_header) + size) {
                        return;
@@ -466,12 +472,18 @@ void HTTPInput::process_data(char *ptr, size_t bytes)
                char *inner_data = pending_data.data() + sizeof(metacube_block_header);
                if (flags & METACUBE_FLAGS_HEADER) {
                        string header(inner_data, inner_data + size);
-                       for (size_t i = 0; i < stream_ids.size(); ++i) {
-                               servers->set_header(stream_ids[i], http_header, header);
+                       for (size_t i = 0; i < stream_indices.size(); ++i) {
+                               servers->set_header(stream_indices[i], http_header, header);
+                       }
+               } else {
+                       StreamStartSuitability suitable_for_stream_start;
+                       if (flags & METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START) {
+                               suitable_for_stream_start = NOT_SUITABLE_FOR_STREAM_START;
+                       } else {
+                               suitable_for_stream_start = SUITABLE_FOR_STREAM_START;
                        }
-               } else { 
-                       for (size_t i = 0; i < stream_ids.size(); ++i) {
-                               servers->add_data(stream_ids[i], inner_data, size);
+                       for (size_t i = 0; i < stream_indices.size(); ++i) {
+                               servers->add_data(stream_indices[i], inner_data, size, suitable_for_stream_start);
                        }
                }
 
@@ -490,6 +502,7 @@ void HTTPInput::drop_pending_data(size_t num_bytes)
        }
        log(WARNING, "[%s] Dropping %lld junk bytes from stream, maybe it is not a Metacube stream?",
                url.c_str(), (long long)num_bytes);
+       assert(pending_data.size() >= num_bytes);
        pending_data.erase(pending_data.begin(), pending_data.begin() + num_bytes);
 }