]> git.sesse.net Git - cubemap/blobdiff - stream.cpp
Use C++11 std::mutex and std::lock_guard instead of our RAII wrapper.
[cubemap] / stream.cpp
index 322391ef5c2158dfc04c440301a11b9cc4e792fb..3a4ccc3ed88065a87738796e00e0b0cda627fb55 100644 (file)
@@ -1,22 +1,31 @@
-#include <stdio.h>
-#include <unistd.h>
+#include <assert.h>
 #include <errno.h>
+#include <limits.h>
+#include <netinet/in.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
 #include <algorithm>
 #include <string>
+#include <queue>
 #include <vector>
 
+#include "log.h"
+#include "metacube2.h"
+#include "state.pb.h"
 #include "stream.h"
 #include "util.h"
-#include "state.pb.h"
 
 using namespace std;
 
-Stream::Stream(const string &stream_id, size_t backlog_size)
-       : stream_id(stream_id),
+Stream::Stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Encoding encoding, Encoding src_encoding)
+       : url(url),
+         encoding(encoding),
+         src_encoding(src_encoding),
          data_fd(make_tempfile("")),
           backlog_size(backlog_size),
-         bytes_received(0),
-         mark_pool(NULL)
+         prebuffering_bytes(prebuffering_bytes)
 {
        if (data_fd == -1) {
                exit(1);
@@ -26,50 +35,275 @@ Stream::Stream(const string &stream_id, size_t backlog_size)
 Stream::~Stream()
 {
        if (data_fd != -1) {
-               int ret;
-               do {
-                       ret = close(data_fd);
-               } while (ret == -1 && errno == EINTR);
-               if (ret == -1) {
-                       perror("close");
-               }
+               safe_close(data_fd);
        }
 }
 
-Stream::Stream(const StreamProto &serialized)
-       : stream_id(serialized.stream_id()),
-         header(serialized.header()),
-         data_fd(make_tempfile(serialized.data())),
+Stream::Stream(const StreamProto &serialized, int data_fd)
+       : url(serialized.url()),
+         http_header(serialized.http_header()),
+         stream_header(serialized.stream_header()),
+         encoding(Stream::STREAM_ENCODING_RAW),  // Will be changed later.
+         data_fd(data_fd),
          backlog_size(serialized.backlog_size()),
-         bytes_received(serialized.bytes_received()),
-         mark_pool(NULL)
+         prebuffering_bytes(serialized.prebuffering_bytes()),
+         bytes_received(serialized.bytes_received())
 {
        if (data_fd == -1) {
                exit(1);
        }
+
+       for (ssize_t point : serialized.suitable_starting_point()) {
+               if (point == -1) {
+                       // Can happen when upgrading from before 1.1.3,
+                       // where this was an optional field with -1 signifying
+                       // "no such point".
+                       continue;
+               }
+               suitable_starting_points.push_back(point);
+       }
 }
 
 StreamProto Stream::serialize()
 {
        StreamProto serialized;
-       serialized.set_header(header);
-       if (!read_tempfile(data_fd, serialized.mutable_data())) {  // Closes data_fd.
-               exit(1);
-       }
+       serialized.set_http_header(http_header);
+       serialized.set_stream_header(stream_header);
+       serialized.add_data_fds(data_fd);
        serialized.set_backlog_size(backlog_size);
+       serialized.set_prebuffering_bytes(prebuffering_bytes);
        serialized.set_bytes_received(bytes_received);
-       serialized.set_stream_id(stream_id);
+       for (size_t point : suitable_starting_points) {
+               serialized.add_suitable_starting_point(point);
+       }
+       serialized.set_url(url);
        data_fd = -1;
        return serialized;
 }
+       
+void Stream::set_backlog_size(size_t new_size)
+{
+       if (backlog_size == new_size) {
+               return;
+       }
+
+       string existing_data;
+       if (!read_tempfile_and_close(data_fd, &existing_data)) {
+               exit(1);
+       }
+
+       // Unwrap the data so it's no longer circular.
+       if (bytes_received <= backlog_size) {
+               existing_data.resize(bytes_received);
+       } else {
+               size_t pos = bytes_received % backlog_size;
+               existing_data = existing_data.substr(pos, string::npos) +
+                       existing_data.substr(0, pos);
+       }
+
+       // See if we need to discard data.
+       if (new_size < existing_data.size()) {
+               size_t to_discard = existing_data.size() - new_size;
+               existing_data = existing_data.substr(to_discard, string::npos);
+       }
+
+       // Create a new, empty data file.
+       data_fd = make_tempfile("");
+       if (data_fd == -1) {
+               exit(1);
+       }
+       backlog_size = new_size;
+
+       // Now cheat a bit by rewinding, and adding all the old data back.
+       bytes_received -= existing_data.size();
+       DataElement data_element;
+       data_element.data.iov_base = const_cast<char *>(existing_data.data());
+       data_element.data.iov_len = existing_data.size();
+       data_element.metacube_flags = 0;  // Ignored by add_data_raw().
+
+       vector<DataElement> data_elements;
+       data_elements.push_back(data_element);
+       add_data_raw(data_elements);
+       remove_obsolete_starting_points();
+}
 
 void Stream::put_client_to_sleep(Client *client)
 {
        sleeping_clients.push_back(client);
 }
 
-void Stream::wake_up_all_clients()
+// Return a new set of iovecs that contains only the first <bytes_wanted> bytes of <data>.
+vector<iovec> collect_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
+{
+       vector<iovec> ret;
+       size_t max_iovecs = min<size_t>(data.size(), IOV_MAX);
+       for (size_t i = 0; i < max_iovecs && bytes_wanted > 0; ++i) {
+               if (data[i].data.iov_len <= bytes_wanted) {
+                       // Consume the entire iovec.
+                       ret.push_back(data[i].data);
+                       bytes_wanted -= data[i].data.iov_len;
+               } else {
+                       // Take only parts of this iovec.
+                       iovec iov;
+                       iov.iov_base = data[i].data.iov_base;
+                       iov.iov_len = bytes_wanted;
+                       ret.push_back(iov);
+                       bytes_wanted = 0;
+               }
+       }
+       return ret;
+}
+
+// Return a new set of iovecs that contains all of <data> except the first <bytes_wanted> bytes.
+vector<Stream::DataElement> remove_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
+{
+       vector<Stream::DataElement> ret;
+       size_t i;
+       for (i = 0; i < data.size() && bytes_wanted > 0; ++i) {
+               if (data[i].data.iov_len <= bytes_wanted) {
+                       // Consume the entire iovec.
+                       bytes_wanted -= data[i].data.iov_len;
+               } else {
+                       // Take only parts of this iovec.
+                       Stream::DataElement data_element;
+                       data_element.data.iov_base = reinterpret_cast<char *>(data[i].data.iov_base) + bytes_wanted;
+                       data_element.data.iov_len = data[i].data.iov_len - bytes_wanted;
+                       data_element.metacube_flags = METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START;
+                       ret.push_back(data_element);
+                       bytes_wanted = 0;
+               }
+       }
+
+       // Add the rest of the iovecs unchanged.
+       ret.insert(ret.end(), data.begin() + i, data.end());
+       return ret;
+}
+
+void Stream::add_data_raw(const vector<DataElement> &orig_data)
+{
+       vector<DataElement> data = orig_data;
+       while (!data.empty()) {
+               size_t pos = bytes_received % backlog_size;
+
+               // Collect as many iovecs as we can before we hit the point
+               // where the circular buffer wraps around.
+               vector<iovec> to_write = collect_iovecs(data, backlog_size - pos);
+               ssize_t ret;
+               do {
+                       ret = pwritev(data_fd, to_write.data(), to_write.size(), pos);
+               } while (ret == -1 && errno == EINTR);
+
+               if (ret == -1) {
+                       log_perror("pwritev");
+                       // Dazed and confused, but trying to continue...
+                       return;
+               }
+               bytes_received += ret;
+
+               // Remove the data that was actually written from the set of iovecs.
+               data = remove_iovecs(data, ret);
+       }
+}
+
+void Stream::remove_obsolete_starting_points()
+{
+       // We could do a binary search here (std::lower_bound), but it seems
+       // overkill for removing what's probably only a few points.
+       while (!suitable_starting_points.empty() &&
+              bytes_received - suitable_starting_points[0] > backlog_size) {
+               suitable_starting_points.pop_front();
+       }
+}
+
+void Stream::add_data_deferred(const char *data, size_t bytes, uint16_t metacube_flags)
+{
+       // For regular output, we don't want to send the client twice
+       // (it's already sent out together with the HTTP header).
+       // However, for Metacube output, we need to send it so that
+       // the Cubemap instance in the other end has a chance to update it.
+       // It may come twice in its stream, but Cubemap doesn't care.
+       if (encoding == Stream::STREAM_ENCODING_RAW &&
+           (metacube_flags & METACUBE_FLAGS_HEADER) != 0) {
+               return;
+       }
+
+       lock_guard<mutex> lock(queued_data_mutex);
+
+       DataElement data_element;
+       data_element.metacube_flags = metacube_flags;
+
+       if (encoding == Stream::STREAM_ENCODING_METACUBE) {
+               // Add a Metacube block header before the data.
+               metacube2_block_header hdr;
+               memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync));
+               hdr.size = htonl(bytes);
+               hdr.flags = htons(metacube_flags);
+               hdr.csum = htons(metacube2_compute_crc(&hdr));
+
+               data_element.data.iov_base = new char[bytes + sizeof(hdr)];
+               data_element.data.iov_len = bytes + sizeof(hdr);
+
+               memcpy(data_element.data.iov_base, &hdr, sizeof(hdr));
+               memcpy(reinterpret_cast<char *>(data_element.data.iov_base) + sizeof(hdr), data, bytes);
+
+               queued_data.push_back(data_element);
+       } else if (encoding == Stream::STREAM_ENCODING_RAW) {
+               // Just add the data itself.
+               data_element.data.iov_base = new char[bytes];
+               memcpy(data_element.data.iov_base, data, bytes);
+               data_element.data.iov_len = bytes;
+
+               queued_data.push_back(data_element);
+       } else {
+               assert(false);
+       }
+}
+
+void Stream::process_queued_data()
 {
+       vector<DataElement> queued_data_copy;
+
+       // Hold the lock for as short as possible, since add_data_raw() can possibly
+       // write to disk, which might disturb the input thread.
+       {
+               lock_guard<mutex> lock(queued_data_mutex);
+               if (queued_data.empty()) {
+                       return;
+               }
+
+               swap(queued_data, queued_data_copy);
+       }
+
+       // Add suitable starting points for the stream, if the queued data
+       // contains such starting points. Note that we drop starting points
+       // if they're less than 10 kB apart, so that we don't get a huge
+       // amount of them for e.g. each and every MPEG-TS 188-byte cell.
+       // The 10 kB value is somewhat arbitrary, but at least it should make
+       // the RAM cost of saving the position ~0.1% (or less) of the actual
+       // data, and 10 kB is a very fine granularity in most streams.
+       static const int minimum_start_point_distance = 10240;
+       size_t byte_position = bytes_received;
+       for (const DataElement &elem : queued_data_copy) {
+               if ((elem.metacube_flags & METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START) == 0) {
+                       size_t num_points = suitable_starting_points.size();
+                       if (num_points >= 2 &&
+                           suitable_starting_points[num_points - 1] - suitable_starting_points[num_points - 2] < minimum_start_point_distance) {
+                               // p[n-1] - p[n-2] < 10 kB, so drop p[n-1].
+                               suitable_starting_points.pop_back();
+                       }
+                       suitable_starting_points.push_back(byte_position);
+               }
+               byte_position += elem.data.iov_len;
+       }
+
+       add_data_raw(queued_data_copy);
+       remove_obsolete_starting_points();
+       for (const DataElement &elem : queued_data_copy) {
+               char *data = reinterpret_cast<char *>(elem.data.iov_base);
+               delete[] data;
+       }
+
+       // We have more data, so wake up all clients.
        if (to_process.empty()) {
                swap(sleeping_clients, to_process);
        } else {