]> git.sesse.net Git - cubemap/blobdiff - stream.cpp
Remove std:: from all code in .cpp files, for consistency.
[cubemap] / stream.cpp
index 557502bee006783f7b386540dd299586fc8ec67b..64bf2e8053e7c0edd79b17f43ba12160a3dd8795 100644 (file)
@@ -8,6 +8,7 @@
 #include <sys/types.h>
 #include <algorithm>
 #include <string>
+#include <queue>
 #include <vector>
 
 #include "log.h"
@@ -26,7 +27,6 @@ Stream::Stream(const string &url, size_t backlog_size, size_t prebuffering_bytes
           backlog_size(backlog_size),
          prebuffering_bytes(prebuffering_bytes),
          bytes_received(0),
-         last_suitable_starting_point(-1),
          pacing_rate(~0U)
 {
        if (data_fd == -1) {
@@ -58,8 +58,16 @@ Stream::Stream(const StreamProto &serialized, int data_fd)
                exit(1);
        }
 
-       assert(serialized.has_last_suitable_starting_point());
-       last_suitable_starting_point = serialized.last_suitable_starting_point();
+       for (int i = 0; i < serialized.suitable_starting_point_size(); ++i) {
+               ssize_t point = serialized.suitable_starting_point(i);
+               if (point == -1) {
+                       // Can happen when upgrading from before 1.1.3,
+                       // where this was an optional field with -1 signifying
+                       // "no such point".
+                       continue;
+               }
+               suitable_starting_points.push_back(point);
+       }
 
        pthread_mutex_init(&queued_data_mutex, NULL);
 }
@@ -73,7 +81,9 @@ StreamProto Stream::serialize()
        serialized.set_backlog_size(backlog_size);
        serialized.set_prebuffering_bytes(prebuffering_bytes);
        serialized.set_bytes_received(bytes_received);
-       serialized.set_last_suitable_starting_point(last_suitable_starting_point);
+       for (size_t i = 0; i < suitable_starting_points.size(); ++i) {
+               serialized.add_suitable_starting_point(suitable_starting_points[i]);
+       }
        serialized.set_url(url);
        data_fd = -1;
        return serialized;
@@ -114,36 +124,15 @@ void Stream::set_backlog_size(size_t new_size)
 
        // Now cheat a bit by rewinding, and adding all the old data back.
        bytes_received -= existing_data.size();
-
-       size_t bytes_before_suitable_starting_point;
-       if (last_suitable_starting_point == -1) {
-               bytes_before_suitable_starting_point = existing_data.size();
-       } else if (size_t(last_suitable_starting_point) < backlog_size) {
-               bytes_before_suitable_starting_point = 0;
-       } else {
-               bytes_before_suitable_starting_point = last_suitable_starting_point - backlog_size;
-       }
+       DataElement data_element;
+       data_element.data.iov_base = const_cast<char *>(existing_data.data());
+       data_element.data.iov_len = existing_data.size();
+       data_element.suitable_for_stream_start = NOT_SUITABLE_FOR_STREAM_START;  // Ignored by add_data_raw().
 
        vector<DataElement> data_elements;
-       if (bytes_before_suitable_starting_point > 0) {
-               // There's really no usable data here (except for ?backlog=1 users),
-               // but we need to get the accounting right anyway.
-               DataElement data_element;
-               data_element.data.iov_base = const_cast<char *>(existing_data.data());
-               data_element.data.iov_len = bytes_before_suitable_starting_point;
-               data_element.suitable_for_stream_start = NOT_SUITABLE_FOR_STREAM_START;
-               data_elements.push_back(data_element);
-       }
-       if (bytes_before_suitable_starting_point < existing_data.size()) {
-               DataElement data_element;
-               data_element.data.iov_base = const_cast<char *>(existing_data.data() + bytes_before_suitable_starting_point);
-               data_element.data.iov_len = existing_data.size() - bytes_before_suitable_starting_point;
-               data_element.suitable_for_stream_start = SUITABLE_FOR_STREAM_START;
-               data_elements.push_back(data_element);
-       }
-
-       last_suitable_starting_point = -1;
+       data_elements.push_back(data_element);
        add_data_raw(data_elements);
+       remove_obsolete_starting_points();
 }
 
 void Stream::put_client_to_sleep(Client *client)
@@ -155,7 +144,7 @@ void Stream::put_client_to_sleep(Client *client)
 vector<iovec> collect_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
 {
        vector<iovec> ret;
-       size_t max_iovecs = std::min<size_t>(data.size(), IOV_MAX);
+       size_t max_iovecs = min<size_t>(data.size(), IOV_MAX);
        for (size_t i = 0; i < max_iovecs && bytes_wanted > 0; ++i) {
                if (data[i].data.iov_len <= bytes_wanted) {
                        // Consume the entire iovec.
@@ -224,6 +213,16 @@ void Stream::add_data_raw(const vector<DataElement> &orig_data)
        }
 }
 
+void Stream::remove_obsolete_starting_points()
+{
+       // We could do a binary search here (std::lower_bound), but it seems
+       // overkill for removing what's probably only a few points.
+       while (!suitable_starting_points.empty() &&
+              bytes_received - suitable_starting_points[0] > backlog_size) {
+               suitable_starting_points.pop_front();
+       }
+}
+
 void Stream::add_data_deferred(const char *data, size_t bytes, StreamStartSuitability suitable_for_stream_start)
 {
        MutexLock lock(&queued_data_mutex);
@@ -265,7 +264,7 @@ void Stream::add_data_deferred(const char *data, size_t bytes, StreamStartSuitab
 
 void Stream::process_queued_data()
 {
-       std::vector<DataElement> queued_data_copy;
+       vector<DataElement> queued_data_copy;
 
        // Hold the lock for as short as possible, since add_data_raw() can possibly
        // write to disk, which might disturb the input thread.
@@ -278,17 +277,30 @@ void Stream::process_queued_data()
                swap(queued_data, queued_data_copy);
        }
 
-       // Update the last suitable starting point for the stream,
-       // if the queued data contains such a starting point.
+       // Add suitable starting points for the stream, if the queued data
+       // contains such starting points. Note that we drop starting points
+       // if they're less than 10 kB apart, so that we don't get a huge
+       // amount of them for e.g. each and every MPEG-TS 188-byte cell.
+       // The 10 kB value is somewhat arbitrary, but at least it should make
+       // the RAM cost of saving the position ~0.1% (or less) of the actual
+       // data, and 10 kB is a very fine granularity in most streams.
+       static const int minimum_start_point_distance = 10240;
        size_t byte_position = bytes_received;
        for (size_t i = 0; i < queued_data_copy.size(); ++i) {
                if (queued_data_copy[i].suitable_for_stream_start == SUITABLE_FOR_STREAM_START) {
-                       last_suitable_starting_point = byte_position;
+                       size_t num_points = suitable_starting_points.size();
+                       if (num_points >= 2 &&
+                           suitable_starting_points[num_points - 1] - suitable_starting_points[num_points - 2] < minimum_start_point_distance) {
+                               // p[n-1] - p[n-2] < 10 kB, so drop p[n-1].
+                               suitable_starting_points.pop_back();
+                       }
+                       suitable_starting_points.push_back(byte_position);
                }
                byte_position += queued_data_copy[i].data.iov_len;
        }
 
        add_data_raw(queued_data_copy);
+       remove_obsolete_starting_points();
        for (size_t i = 0; i < queued_data_copy.size(); ++i) {
                char *data = reinterpret_cast<char *>(queued_data_copy[i].data.iov_base);
                delete[] data;