X-Git-Url: https://git.sesse.net/?p=cubemap;a=blobdiff_plain;f=stream.cpp;h=b498b2e6f7108ced5fab2f0f4d87109521671ca8;hp=df131932435fd9a095edc50ec50c3c099a66a5a7;hb=6544fa0ec3f3a501bcb89ea977756911bd7f3ebd;hpb=6889a665614e926437484a556124a5ff60363568 diff --git a/stream.cpp b/stream.cpp index df13193..b498b2e 100644 --- a/stream.cpp +++ b/stream.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include "log.h" @@ -19,16 +20,15 @@ using namespace std; -Stream::Stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Encoding encoding) +Stream::Stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Encoding encoding, Encoding src_encoding) : url(url), encoding(encoding), + src_encoding(src_encoding), data_fd(make_tempfile("")), backlog_size(backlog_size), prebuffering_bytes(prebuffering_bytes), bytes_received(0), - last_suitable_starting_point(-1), - pacing_rate(~0U), - queued_data_last_starting_point(-1) + pacing_rate(~0U) { if (data_fd == -1) { exit(1); @@ -53,15 +53,22 @@ Stream::Stream(const StreamProto &serialized, int data_fd) backlog_size(serialized.backlog_size()), prebuffering_bytes(serialized.prebuffering_bytes()), bytes_received(serialized.bytes_received()), - pacing_rate(~0U), - queued_data_last_starting_point(-1) + pacing_rate(~0U) { if (data_fd == -1) { exit(1); } - assert(serialized.has_last_suitable_starting_point()); - last_suitable_starting_point = serialized.last_suitable_starting_point(); + for (int i = 0; i < serialized.suitable_starting_point_size(); ++i) { + ssize_t point = serialized.suitable_starting_point(i); + if (point == -1) { + // Can happen when upgrading from before 1.1.3, + // where this was an optional field with -1 signifying + // "no such point". + continue; + } + suitable_starting_points.push_back(point); + } pthread_mutex_init(&queued_data_mutex, NULL); } @@ -75,7 +82,9 @@ StreamProto Stream::serialize() serialized.set_backlog_size(backlog_size); serialized.set_prebuffering_bytes(prebuffering_bytes); serialized.set_bytes_received(bytes_received); - serialized.set_last_suitable_starting_point(last_suitable_starting_point); + for (size_t i = 0; i < suitable_starting_points.size(); ++i) { + serialized.add_suitable_starting_point(suitable_starting_points[i]); + } serialized.set_url(url); data_fd = -1; return serialized; @@ -116,13 +125,15 @@ void Stream::set_backlog_size(size_t new_size) // Now cheat a bit by rewinding, and adding all the old data back. bytes_received -= existing_data.size(); - iovec iov; - iov.iov_base = const_cast(existing_data.data()); - iov.iov_len = existing_data.size(); - - vector iovs; - iovs.push_back(iov); - add_data_raw(iovs); + DataElement data_element; + data_element.data.iov_base = const_cast(existing_data.data()); + data_element.data.iov_len = existing_data.size(); + data_element.suitable_for_stream_start = NOT_SUITABLE_FOR_STREAM_START; // Ignored by add_data_raw(). + + vector data_elements; + data_elements.push_back(data_element); + add_data_raw(data_elements); + remove_obsolete_starting_points(); } void Stream::put_client_to_sleep(Client *client) @@ -131,20 +142,20 @@ void Stream::put_client_to_sleep(Client *client) } // Return a new set of iovecs that contains only the first bytes of . -vector collect_iovecs(const vector &data, size_t bytes_wanted) +vector collect_iovecs(const vector &data, size_t bytes_wanted) { vector ret; - size_t max_iovecs = std::min(data.size(), IOV_MAX); + size_t max_iovecs = min(data.size(), IOV_MAX); for (size_t i = 0; i < max_iovecs && bytes_wanted > 0; ++i) { - if (data[i].iov_len <= bytes_wanted) { + if (data[i].data.iov_len <= bytes_wanted) { // Consume the entire iovec. - ret.push_back(data[i]); - bytes_wanted -= data[i].iov_len; + ret.push_back(data[i].data); + bytes_wanted -= data[i].data.iov_len; } else { // Take only parts of this iovec. iovec iov; - iov.iov_base = data[i].iov_base; - iov.iov_len = bytes_wanted; + iov.iov_base = data[i].data.iov_base; + iov.iov_len = bytes_wanted; ret.push_back(iov); bytes_wanted = 0; } @@ -153,20 +164,21 @@ vector collect_iovecs(const vector &data, size_t bytes_wanted) } // Return a new set of iovecs that contains all of except the first bytes. -vector remove_iovecs(const vector &data, size_t bytes_wanted) +vector remove_iovecs(const vector &data, size_t bytes_wanted) { - vector ret; + vector ret; size_t i; for (i = 0; i < data.size() && bytes_wanted > 0; ++i) { - if (data[i].iov_len <= bytes_wanted) { + if (data[i].data.iov_len <= bytes_wanted) { // Consume the entire iovec. - bytes_wanted -= data[i].iov_len; + bytes_wanted -= data[i].data.iov_len; } else { // Take only parts of this iovec. - iovec iov; - iov.iov_base = reinterpret_cast(data[i].iov_base) + bytes_wanted; - iov.iov_len = data[i].iov_len - bytes_wanted; - ret.push_back(iov); + Stream::DataElement data_element; + data_element.data.iov_base = reinterpret_cast(data[i].data.iov_base) + bytes_wanted; + data_element.data.iov_len = data[i].data.iov_len - bytes_wanted; + data_element.suitable_for_stream_start = NOT_SUITABLE_FOR_STREAM_START; + ret.push_back(data_element); bytes_wanted = 0; } } @@ -176,9 +188,9 @@ vector remove_iovecs(const vector &data, size_t bytes_wanted) return ret; } -void Stream::add_data_raw(const vector &orig_data) +void Stream::add_data_raw(const vector &orig_data) { - vector data = orig_data; + vector data = orig_data; while (!data.empty()) { size_t pos = bytes_received % backlog_size; @@ -202,14 +214,24 @@ void Stream::add_data_raw(const vector &orig_data) } } +void Stream::remove_obsolete_starting_points() +{ + // We could do a binary search here (std::lower_bound), but it seems + // overkill for removing what's probably only a few points. + while (!suitable_starting_points.empty() && + bytes_received - suitable_starting_points[0] > backlog_size) { + suitable_starting_points.pop_front(); + } +} + void Stream::add_data_deferred(const char *data, size_t bytes, StreamStartSuitability suitable_for_stream_start) { MutexLock lock(&queued_data_mutex); assert(suitable_for_stream_start == SUITABLE_FOR_STREAM_START || suitable_for_stream_start == NOT_SUITABLE_FOR_STREAM_START); - if (suitable_for_stream_start == SUITABLE_FOR_STREAM_START) { - queued_data_last_starting_point = queued_data.size(); - } + + DataElement data_element; + data_element.suitable_for_stream_start = suitable_for_stream_start; if (encoding == Stream::STREAM_ENCODING_METACUBE) { // Add a Metacube block header before the data. @@ -222,22 +244,20 @@ void Stream::add_data_deferred(const char *data, size_t bytes, StreamStartSuitab } hdr.csum = htons(metacube2_compute_crc(&hdr)); - iovec iov; - iov.iov_base = new char[bytes + sizeof(hdr)]; - iov.iov_len = bytes + sizeof(hdr); + data_element.data.iov_base = new char[bytes + sizeof(hdr)]; + data_element.data.iov_len = bytes + sizeof(hdr); - memcpy(iov.iov_base, &hdr, sizeof(hdr)); - memcpy(reinterpret_cast(iov.iov_base) + sizeof(hdr), data, bytes); + memcpy(data_element.data.iov_base, &hdr, sizeof(hdr)); + memcpy(reinterpret_cast(data_element.data.iov_base) + sizeof(hdr), data, bytes); - queued_data.push_back(iov); + queued_data.push_back(data_element); } else if (encoding == Stream::STREAM_ENCODING_RAW) { // Just add the data itself. - iovec iov; - iov.iov_base = new char[bytes]; - memcpy(iov.iov_base, data, bytes); - iov.iov_len = bytes; + data_element.data.iov_base = new char[bytes]; + memcpy(data_element.data.iov_base, data, bytes); + data_element.data.iov_len = bytes; - queued_data.push_back(iov); + queued_data.push_back(data_element); } else { assert(false); } @@ -245,8 +265,7 @@ void Stream::add_data_deferred(const char *data, size_t bytes, StreamStartSuitab void Stream::process_queued_data() { - std::vector queued_data_copy; - int queued_data_last_starting_point_copy = -1; + vector queued_data_copy; // Hold the lock for as short as possible, since add_data_raw() can possibly // write to disk, which might disturb the input thread. @@ -257,22 +276,34 @@ void Stream::process_queued_data() } swap(queued_data, queued_data_copy); - swap(queued_data_last_starting_point, queued_data_last_starting_point_copy); } - // Update the last suitable starting point for the stream, - // if the queued data contains such a starting point. - assert(queued_data_last_starting_point_copy < ssize_t(queued_data_copy.size())); - if (queued_data_last_starting_point_copy >= 0) { - last_suitable_starting_point = bytes_received; - for (int i = 0; i < queued_data_last_starting_point_copy; ++i) { - last_suitable_starting_point += queued_data_copy[i].iov_len; + // Add suitable starting points for the stream, if the queued data + // contains such starting points. Note that we drop starting points + // if they're less than 10 kB apart, so that we don't get a huge + // amount of them for e.g. each and every MPEG-TS 188-byte cell. + // The 10 kB value is somewhat arbitrary, but at least it should make + // the RAM cost of saving the position ~0.1% (or less) of the actual + // data, and 10 kB is a very fine granularity in most streams. + static const int minimum_start_point_distance = 10240; + size_t byte_position = bytes_received; + for (size_t i = 0; i < queued_data_copy.size(); ++i) { + if (queued_data_copy[i].suitable_for_stream_start == SUITABLE_FOR_STREAM_START) { + size_t num_points = suitable_starting_points.size(); + if (num_points >= 2 && + suitable_starting_points[num_points - 1] - suitable_starting_points[num_points - 2] < minimum_start_point_distance) { + // p[n-1] - p[n-2] < 10 kB, so drop p[n-1]. + suitable_starting_points.pop_back(); + } + suitable_starting_points.push_back(byte_position); } + byte_position += queued_data_copy[i].data.iov_len; } add_data_raw(queued_data_copy); + remove_obsolete_starting_points(); for (size_t i = 0; i < queued_data_copy.size(); ++i) { - char *data = reinterpret_cast(queued_data_copy[i].iov_base); + char *data = reinterpret_cast(queued_data_copy[i].data.iov_base); delete[] data; }