X-Git-Url: https://git.sesse.net/?p=cubemap;a=blobdiff_plain;f=stream.cpp;h=df131932435fd9a095edc50ec50c3c099a66a5a7;hp=e88ad779648e7639fd35a2926476eb446075cb1c;hb=6889a665614e926437484a556124a5ff60363568;hpb=979a284b4039b0ea74525b700b9f1089b8c4248d diff --git a/stream.cpp b/stream.cpp index e88ad77..df13193 100644 --- a/stream.cpp +++ b/stream.cpp @@ -2,33 +2,39 @@ #include #include #include +#include #include #include #include +#include #include #include #include "log.h" #include "metacube2.h" +#include "mutexlock.h" #include "state.pb.h" #include "stream.h" #include "util.h" using namespace std; -Stream::Stream(const string &url, size_t backlog_size, Encoding encoding) +Stream::Stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Encoding encoding) : url(url), encoding(encoding), data_fd(make_tempfile("")), backlog_size(backlog_size), + prebuffering_bytes(prebuffering_bytes), bytes_received(0), last_suitable_starting_point(-1), - mark_pool(NULL), + pacing_rate(~0U), queued_data_last_starting_point(-1) { if (data_fd == -1) { exit(1); } + + pthread_mutex_init(&queued_data_mutex, NULL); } Stream::~Stream() @@ -45,33 +51,19 @@ Stream::Stream(const StreamProto &serialized, int data_fd) encoding(Stream::STREAM_ENCODING_RAW), // Will be changed later. data_fd(data_fd), backlog_size(serialized.backlog_size()), + prebuffering_bytes(serialized.prebuffering_bytes()), bytes_received(serialized.bytes_received()), - mark_pool(NULL), + pacing_rate(~0U), queued_data_last_starting_point(-1) { if (data_fd == -1) { exit(1); } - // Split old-style headers into HTTP and video headers. - if (!serialized.header().empty()) { - string header = serialized.header(); - size_t split = header.find("\r\n\r\n"); - if (split == string::npos) { - http_header = header; - stream_header = ""; - } else { - http_header = header.substr(0, split + 2); // Split off the second \r\n. - stream_header = header.substr(split, string::npos); - } - } + assert(serialized.has_last_suitable_starting_point()); + last_suitable_starting_point = serialized.last_suitable_starting_point(); - // Older versions did not set last_suitable_starting_point. - if (serialized.has_last_suitable_starting_point()) { - last_suitable_starting_point = serialized.last_suitable_starting_point(); - } else { - last_suitable_starting_point = bytes_received; - } + pthread_mutex_init(&queued_data_mutex, NULL); } StreamProto Stream::serialize() @@ -81,6 +73,7 @@ StreamProto Stream::serialize() serialized.set_stream_header(stream_header); serialized.add_data_fds(data_fd); serialized.set_backlog_size(backlog_size); + serialized.set_prebuffering_bytes(prebuffering_bytes); serialized.set_bytes_received(bytes_received); serialized.set_last_suitable_starting_point(last_suitable_starting_point); serialized.set_url(url); @@ -211,6 +204,7 @@ void Stream::add_data_raw(const vector &orig_data) void Stream::add_data_deferred(const char *data, size_t bytes, StreamStartSuitability suitable_for_stream_start) { + MutexLock lock(&queued_data_mutex); assert(suitable_for_stream_start == SUITABLE_FOR_STREAM_START || suitable_for_stream_start == NOT_SUITABLE_FOR_STREAM_START); if (suitable_for_stream_start == SUITABLE_FOR_STREAM_START) { @@ -226,6 +220,7 @@ void Stream::add_data_deferred(const char *data, size_t bytes, StreamStartSuitab if (suitable_for_stream_start == NOT_SUITABLE_FOR_STREAM_START) { hdr.flags |= htons(METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START); } + hdr.csum = htons(metacube2_compute_crc(&hdr)); iovec iov; iov.iov_base = new char[bytes + sizeof(hdr)]; @@ -250,27 +245,36 @@ void Stream::add_data_deferred(const char *data, size_t bytes, StreamStartSuitab void Stream::process_queued_data() { - if (queued_data.empty()) { - return; + std::vector queued_data_copy; + int queued_data_last_starting_point_copy = -1; + + // Hold the lock for as short as possible, since add_data_raw() can possibly + // write to disk, which might disturb the input thread. + { + MutexLock lock(&queued_data_mutex); + if (queued_data.empty()) { + return; + } + + swap(queued_data, queued_data_copy); + swap(queued_data_last_starting_point, queued_data_last_starting_point_copy); } // Update the last suitable starting point for the stream, // if the queued data contains such a starting point. - assert(queued_data_last_starting_point < ssize_t(queued_data.size())); - if (queued_data_last_starting_point >= 0) { + assert(queued_data_last_starting_point_copy < ssize_t(queued_data_copy.size())); + if (queued_data_last_starting_point_copy >= 0) { last_suitable_starting_point = bytes_received; - for (int i = 0; i < queued_data_last_starting_point; ++i) { - last_suitable_starting_point += queued_data[i].iov_len; + for (int i = 0; i < queued_data_last_starting_point_copy; ++i) { + last_suitable_starting_point += queued_data_copy[i].iov_len; } } - add_data_raw(queued_data); - for (size_t i = 0; i < queued_data.size(); ++i) { - char *data = reinterpret_cast(queued_data[i].iov_base); + add_data_raw(queued_data_copy); + for (size_t i = 0; i < queued_data_copy.size(); ++i) { + char *data = reinterpret_cast(queued_data_copy[i].iov_base); delete[] data; } - queued_data.clear(); - queued_data_last_starting_point = -1; // We have more data, so wake up all clients. if (to_process.empty()) {