X-Git-Url: https://git.sesse.net/?p=cubemap;a=blobdiff_plain;f=stream.cpp;h=df131932435fd9a095edc50ec50c3c099a66a5a7;hp=0c0fabac29c90f99330cfc8c5d634f3c78321f95;hb=6889a665614e926437484a556124a5ff60363568;hpb=c0ffd43e514e6392835ef85529423744263809af diff --git a/stream.cpp b/stream.cpp index 0c0faba..df13193 100644 --- a/stream.cpp +++ b/stream.cpp @@ -1,31 +1,40 @@ #include #include +#include #include +#include #include #include #include +#include #include #include #include "log.h" -#include "metacube.h" +#include "metacube2.h" +#include "mutexlock.h" #include "state.pb.h" #include "stream.h" #include "util.h" using namespace std; -Stream::Stream(const string &url, size_t backlog_size, Encoding encoding) +Stream::Stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Encoding encoding) : url(url), encoding(encoding), data_fd(make_tempfile("")), backlog_size(backlog_size), + prebuffering_bytes(prebuffering_bytes), bytes_received(0), - mark_pool(NULL) + last_suitable_starting_point(-1), + pacing_rate(~0U), + queued_data_last_starting_point(-1) { if (data_fd == -1) { exit(1); } + + pthread_mutex_init(&queued_data_mutex, NULL); } Stream::~Stream() @@ -42,25 +51,19 @@ Stream::Stream(const StreamProto &serialized, int data_fd) encoding(Stream::STREAM_ENCODING_RAW), // Will be changed later. data_fd(data_fd), backlog_size(serialized.backlog_size()), + prebuffering_bytes(serialized.prebuffering_bytes()), bytes_received(serialized.bytes_received()), - mark_pool(NULL) + pacing_rate(~0U), + queued_data_last_starting_point(-1) { if (data_fd == -1) { exit(1); } - // Split old-style headers into HTTP and video headers. - if (!serialized.header().empty()) { - string header = serialized.header(); - size_t split = header.find("\r\n\r\n"); - if (split == string::npos) { - http_header = header; - stream_header = ""; - } else { - http_header = header.substr(0, split + 2); // Split off the second \r\n. - stream_header = header.substr(split, string::npos); - } - } + assert(serialized.has_last_suitable_starting_point()); + last_suitable_starting_point = serialized.last_suitable_starting_point(); + + pthread_mutex_init(&queued_data_mutex, NULL); } StreamProto Stream::serialize() @@ -70,7 +73,9 @@ StreamProto Stream::serialize() serialized.set_stream_header(stream_header); serialized.add_data_fds(data_fd); serialized.set_backlog_size(backlog_size); + serialized.set_prebuffering_bytes(prebuffering_bytes); serialized.set_bytes_received(bytes_received); + serialized.set_last_suitable_starting_point(last_suitable_starting_point); serialized.set_url(url); data_fd = -1; return serialized; @@ -129,7 +134,8 @@ void Stream::put_client_to_sleep(Client *client) vector collect_iovecs(const vector &data, size_t bytes_wanted) { vector ret; - for (size_t i = 0; i < data.size() && bytes_wanted > 0; ++i) { + size_t max_iovecs = std::min(data.size(), IOV_MAX); + for (size_t i = 0; i < max_iovecs && bytes_wanted > 0; ++i) { if (data[i].iov_len <= bytes_wanted) { // Consume the entire iovec. ret.push_back(data[i]); @@ -196,14 +202,25 @@ void Stream::add_data_raw(const vector &orig_data) } } -void Stream::add_data_deferred(const char *data, size_t bytes) +void Stream::add_data_deferred(const char *data, size_t bytes, StreamStartSuitability suitable_for_stream_start) { + MutexLock lock(&queued_data_mutex); + assert(suitable_for_stream_start == SUITABLE_FOR_STREAM_START || + suitable_for_stream_start == NOT_SUITABLE_FOR_STREAM_START); + if (suitable_for_stream_start == SUITABLE_FOR_STREAM_START) { + queued_data_last_starting_point = queued_data.size(); + } + if (encoding == Stream::STREAM_ENCODING_METACUBE) { // Add a Metacube block header before the data. - metacube_block_header hdr; - memcpy(hdr.sync, METACUBE_SYNC, sizeof(hdr.sync)); + metacube2_block_header hdr; + memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync)); hdr.size = htonl(bytes); - hdr.flags = htonl(0); + hdr.flags = htons(0); + if (suitable_for_stream_start == NOT_SUITABLE_FOR_STREAM_START) { + hdr.flags |= htons(METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START); + } + hdr.csum = htons(metacube2_compute_crc(&hdr)); iovec iov; iov.iov_base = new char[bytes + sizeof(hdr)]; @@ -228,16 +245,36 @@ void Stream::add_data_deferred(const char *data, size_t bytes) void Stream::process_queued_data() { - if (queued_data.empty()) { - return; + std::vector queued_data_copy; + int queued_data_last_starting_point_copy = -1; + + // Hold the lock for as short as possible, since add_data_raw() can possibly + // write to disk, which might disturb the input thread. + { + MutexLock lock(&queued_data_mutex); + if (queued_data.empty()) { + return; + } + + swap(queued_data, queued_data_copy); + swap(queued_data_last_starting_point, queued_data_last_starting_point_copy); + } + + // Update the last suitable starting point for the stream, + // if the queued data contains such a starting point. + assert(queued_data_last_starting_point_copy < ssize_t(queued_data_copy.size())); + if (queued_data_last_starting_point_copy >= 0) { + last_suitable_starting_point = bytes_received; + for (int i = 0; i < queued_data_last_starting_point_copy; ++i) { + last_suitable_starting_point += queued_data_copy[i].iov_len; + } } - add_data_raw(queued_data); - for (size_t i = 0; i < queued_data.size(); ++i) { - char *data = reinterpret_cast(queued_data[i].iov_base); + add_data_raw(queued_data_copy); + for (size_t i = 0; i < queued_data_copy.size(); ++i) { + char *data = reinterpret_cast(queued_data_copy[i].iov_base); delete[] data; } - queued_data.clear(); // We have more data, so wake up all clients. if (to_process.empty()) {