]> git.sesse.net Git - cubemap/blobdiff - stream.cpp
Partially revert last patch.
[cubemap] / stream.cpp
index b7a92c2293832e7d77c3b0c8b4c20e604b195a44..50679ca5f5738c2bf0870d6816f24490284d3679 100644 (file)
@@ -1,5 +1,6 @@
 #include <assert.h>
 #include <errno.h>
+#include <limits.h>
 #include <netinet/in.h>
 #include <stdio.h>
 #include <stdlib.h>
 
 using namespace std;
 
-Stream::Stream(const string &url, size_t backlog_size, Encoding encoding)
+Stream::Stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Encoding encoding)
        : url(url),
          encoding(encoding),
          data_fd(make_tempfile("")),
           backlog_size(backlog_size),
+         prebuffering_bytes(prebuffering_bytes),
          bytes_received(0),
          last_suitable_starting_point(-1),
-         mark_pool(NULL),
-         queued_data_last_starting_point(-1)
+         pacing_rate(~0U)
 {
        if (data_fd == -1) {
                exit(1);
@@ -49,33 +50,16 @@ Stream::Stream(const StreamProto &serialized, int data_fd)
          encoding(Stream::STREAM_ENCODING_RAW),  // Will be changed later.
          data_fd(data_fd),
          backlog_size(serialized.backlog_size()),
+         prebuffering_bytes(serialized.prebuffering_bytes()),
          bytes_received(serialized.bytes_received()),
-         mark_pool(NULL),
-         queued_data_last_starting_point(-1)
+         pacing_rate(~0U)
 {
        if (data_fd == -1) {
                exit(1);
        }
 
-       // Split old-style headers into HTTP and video headers.
-       if (!serialized.header().empty()) {
-               string header = serialized.header();
-               size_t split = header.find("\r\n\r\n");
-               if (split == string::npos) {
-                       http_header = header;
-                       stream_header = "";
-               } else {
-                       http_header = header.substr(0, split + 2);  // Split off the second \r\n.
-                       stream_header = header.substr(split, string::npos);
-               }
-       }
-
-       // Older versions did not set last_suitable_starting_point.
-       if (serialized.has_last_suitable_starting_point()) {
-               last_suitable_starting_point = serialized.last_suitable_starting_point();
-       } else {
-               last_suitable_starting_point = bytes_received;
-       }
+       assert(serialized.has_last_suitable_starting_point());
+       last_suitable_starting_point = serialized.last_suitable_starting_point();
 
        pthread_mutex_init(&queued_data_mutex, NULL);
 }
@@ -87,6 +71,7 @@ StreamProto Stream::serialize()
        serialized.set_stream_header(stream_header);
        serialized.add_data_fds(data_fd);
        serialized.set_backlog_size(backlog_size);
+       serialized.set_prebuffering_bytes(prebuffering_bytes);
        serialized.set_bytes_received(bytes_received);
        serialized.set_last_suitable_starting_point(last_suitable_starting_point);
        serialized.set_url(url);
@@ -129,13 +114,14 @@ void Stream::set_backlog_size(size_t new_size)
 
        // Now cheat a bit by rewinding, and adding all the old data back.
        bytes_received -= existing_data.size();
-       iovec iov;
-       iov.iov_base = const_cast<char *>(existing_data.data());
-       iov.iov_len = existing_data.size();
-
-       vector<iovec> iovs;
-       iovs.push_back(iov);
-       add_data_raw(iovs);
+       DataElement data_element;
+       data_element.data.iov_base = const_cast<char *>(existing_data.data());
+       data_element.data.iov_len = existing_data.size();
+       data_element.suitable_for_stream_start = NOT_SUITABLE_FOR_STREAM_START;  // Ignored by add_data_raw().
+
+       vector<DataElement> data_elements;
+       data_elements.push_back(data_element);
+       add_data_raw(data_elements);
 }
 
 void Stream::put_client_to_sleep(Client *client)
@@ -144,20 +130,20 @@ void Stream::put_client_to_sleep(Client *client)
 }
 
 // Return a new set of iovecs that contains only the first <bytes_wanted> bytes of <data>.
-vector<iovec> collect_iovecs(const vector<iovec> &data, size_t bytes_wanted)
+vector<iovec> collect_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
 {
        vector<iovec> ret;
        size_t max_iovecs = std::min<size_t>(data.size(), IOV_MAX);
        for (size_t i = 0; i < max_iovecs && bytes_wanted > 0; ++i) {
-               if (data[i].iov_len <= bytes_wanted) {
+               if (data[i].data.iov_len <= bytes_wanted) {
                        // Consume the entire iovec.
-                       ret.push_back(data[i]);
-                       bytes_wanted -= data[i].iov_len;
+                       ret.push_back(data[i].data);
+                       bytes_wanted -= data[i].data.iov_len;
                } else {
                        // Take only parts of this iovec.
                        iovec iov;
-                       iov.iov_base = data[i].iov_base;
-                       iov.iov_len = bytes_wanted;     
+                       iov.iov_base = data[i].data.iov_base;
+                       iov.iov_len = bytes_wanted;
                        ret.push_back(iov);
                        bytes_wanted = 0;
                }
@@ -166,20 +152,21 @@ vector<iovec> collect_iovecs(const vector<iovec> &data, size_t bytes_wanted)
 }
 
 // Return a new set of iovecs that contains all of <data> except the first <bytes_wanted> bytes.
-vector<iovec> remove_iovecs(const vector<iovec> &data, size_t bytes_wanted)
+vector<Stream::DataElement> remove_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
 {
-       vector<iovec> ret;
+       vector<Stream::DataElement> ret;
        size_t i;
        for (i = 0; i < data.size() && bytes_wanted > 0; ++i) {
-               if (data[i].iov_len <= bytes_wanted) {
+               if (data[i].data.iov_len <= bytes_wanted) {
                        // Consume the entire iovec.
-                       bytes_wanted -= data[i].iov_len;
+                       bytes_wanted -= data[i].data.iov_len;
                } else {
                        // Take only parts of this iovec.
-                       iovec iov;
-                       iov.iov_base = reinterpret_cast<char *>(data[i].iov_base) + bytes_wanted;
-                       iov.iov_len = data[i].iov_len - bytes_wanted;
-                       ret.push_back(iov);
+                       Stream::DataElement data_element;
+                       data_element.data.iov_base = reinterpret_cast<char *>(data[i].data.iov_base) + bytes_wanted;
+                       data_element.data.iov_len = data[i].data.iov_len - bytes_wanted;
+                       data_element.suitable_for_stream_start = NOT_SUITABLE_FOR_STREAM_START;
+                       ret.push_back(data_element);
                        bytes_wanted = 0;
                }
        }
@@ -189,9 +176,9 @@ vector<iovec> remove_iovecs(const vector<iovec> &data, size_t bytes_wanted)
        return ret;
 }
 
-void Stream::add_data_raw(const vector<iovec> &orig_data)
+void Stream::add_data_raw(const vector<DataElement> &orig_data)
 {
-       vector<iovec> data = orig_data;
+       vector<DataElement> data = orig_data;
        while (!data.empty()) {
                size_t pos = bytes_received % backlog_size;
 
@@ -220,9 +207,9 @@ void Stream::add_data_deferred(const char *data, size_t bytes, StreamStartSuitab
        MutexLock lock(&queued_data_mutex);
        assert(suitable_for_stream_start == SUITABLE_FOR_STREAM_START ||
               suitable_for_stream_start == NOT_SUITABLE_FOR_STREAM_START);
-       if (suitable_for_stream_start == SUITABLE_FOR_STREAM_START) {
-               queued_data_last_starting_point = queued_data.size();
-       }
+
+       DataElement data_element;
+       data_element.suitable_for_stream_start = suitable_for_stream_start;
 
        if (encoding == Stream::STREAM_ENCODING_METACUBE) {
                // Add a Metacube block header before the data.
@@ -235,22 +222,20 @@ void Stream::add_data_deferred(const char *data, size_t bytes, StreamStartSuitab
                }
                hdr.csum = htons(metacube2_compute_crc(&hdr));
 
-               iovec iov;
-               iov.iov_base = new char[bytes + sizeof(hdr)];
-               iov.iov_len = bytes + sizeof(hdr);
+               data_element.data.iov_base = new char[bytes + sizeof(hdr)];
+               data_element.data.iov_len = bytes + sizeof(hdr);
 
-               memcpy(iov.iov_base, &hdr, sizeof(hdr));
-               memcpy(reinterpret_cast<char *>(iov.iov_base) + sizeof(hdr), data, bytes);
+               memcpy(data_element.data.iov_base, &hdr, sizeof(hdr));
+               memcpy(reinterpret_cast<char *>(data_element.data.iov_base) + sizeof(hdr), data, bytes);
 
-               queued_data.push_back(iov);
+               queued_data.push_back(data_element);
        } else if (encoding == Stream::STREAM_ENCODING_RAW) {
                // Just add the data itself.
-               iovec iov;
-               iov.iov_base = new char[bytes];
-               memcpy(iov.iov_base, data, bytes);
-               iov.iov_len = bytes;
+               data_element.data.iov_base = new char[bytes];
+               memcpy(data_element.data.iov_base, data, bytes);
+               data_element.data.iov_len = bytes;
 
-               queued_data.push_back(iov);
+               queued_data.push_back(data_element);
        } else {
                assert(false);
        }
@@ -258,8 +243,7 @@ void Stream::add_data_deferred(const char *data, size_t bytes, StreamStartSuitab
 
 void Stream::process_queued_data()
 {
-       std::vector<iovec> queued_data_copy;
-       int queued_data_last_starting_point_copy = -1;
+       std::vector<DataElement> queued_data_copy;
 
        // Hold the lock for as short as possible, since add_data_raw() can possibly
        // write to disk, which might disturb the input thread.
@@ -270,22 +254,21 @@ void Stream::process_queued_data()
                }
 
                swap(queued_data, queued_data_copy);
-               swap(queued_data_last_starting_point, queued_data_last_starting_point_copy);
        }
 
        // Update the last suitable starting point for the stream,
        // if the queued data contains such a starting point.
-       assert(queued_data_last_starting_point_copy < ssize_t(queued_data_copy.size()));
-       if (queued_data_last_starting_point_copy >= 0) {
-               last_suitable_starting_point = bytes_received;
-               for (int i = 0; i < queued_data_last_starting_point_copy; ++i) {
-                       last_suitable_starting_point += queued_data_copy[i].iov_len;
+       size_t byte_position = bytes_received;
+       for (size_t i = 0; i < queued_data_copy.size(); ++i) {
+               if (queued_data_copy[i].suitable_for_stream_start == SUITABLE_FOR_STREAM_START) {
+                       last_suitable_starting_point = byte_position;
                }
+               byte_position += queued_data_copy[i].data.iov_len;
        }
 
        add_data_raw(queued_data_copy);
        for (size_t i = 0; i < queued_data_copy.size(); ++i) {
-               char *data = reinterpret_cast<char *>(queued_data_copy[i].iov_base);
+               char *data = reinterpret_cast<char *>(queued_data_copy[i].data.iov_base);
                delete[] data;
        }