X-Git-Url: https://git.sesse.net/?p=cubemap;a=blobdiff_plain;f=stream.cpp;h=734110c9f47e79ee18586dff5afaabb0f1b311c3;hp=64bf2e8053e7c0edd79b17f43ba12160a3dd8795;hb=26fe3ab755034ea3be8321ec0af548670f8c3bd8;hpb=b757a4a2ce9d24835b52a185134835762af2f50c diff --git a/stream.cpp b/stream.cpp index 64bf2e8..734110c 100644 --- a/stream.cpp +++ b/stream.cpp @@ -20,9 +20,10 @@ using namespace std; -Stream::Stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Encoding encoding) +Stream::Stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Encoding encoding, Encoding src_encoding) : url(url), encoding(encoding), + src_encoding(src_encoding), data_fd(make_tempfile("")), backlog_size(backlog_size), prebuffering_bytes(prebuffering_bytes), @@ -58,8 +59,7 @@ Stream::Stream(const StreamProto &serialized, int data_fd) exit(1); } - for (int i = 0; i < serialized.suitable_starting_point_size(); ++i) { - ssize_t point = serialized.suitable_starting_point(i); + for (ssize_t point : serialized.suitable_starting_point()) { if (point == -1) { // Can happen when upgrading from before 1.1.3, // where this was an optional field with -1 signifying @@ -81,8 +81,8 @@ StreamProto Stream::serialize() serialized.set_backlog_size(backlog_size); serialized.set_prebuffering_bytes(prebuffering_bytes); serialized.set_bytes_received(bytes_received); - for (size_t i = 0; i < suitable_starting_points.size(); ++i) { - serialized.add_suitable_starting_point(suitable_starting_points[i]); + for (size_t point : suitable_starting_points) { + serialized.add_suitable_starting_point(point); } serialized.set_url(url); data_fd = -1; @@ -127,7 +127,7 @@ void Stream::set_backlog_size(size_t new_size) DataElement data_element; data_element.data.iov_base = const_cast(existing_data.data()); data_element.data.iov_len = existing_data.size(); - data_element.suitable_for_stream_start = NOT_SUITABLE_FOR_STREAM_START; // Ignored by add_data_raw(). + data_element.metacube_flags = 0; // Ignored by add_data_raw(). vector data_elements; data_elements.push_back(data_element); @@ -176,7 +176,7 @@ vector remove_iovecs(const vector &dat Stream::DataElement data_element; data_element.data.iov_base = reinterpret_cast(data[i].data.iov_base) + bytes_wanted; data_element.data.iov_len = data[i].data.iov_len - bytes_wanted; - data_element.suitable_for_stream_start = NOT_SUITABLE_FOR_STREAM_START; + data_element.metacube_flags = METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START; ret.push_back(data_element); bytes_wanted = 0; } @@ -223,24 +223,29 @@ void Stream::remove_obsolete_starting_points() } } -void Stream::add_data_deferred(const char *data, size_t bytes, StreamStartSuitability suitable_for_stream_start) +void Stream::add_data_deferred(const char *data, size_t bytes, uint16_t metacube_flags) { + // For regular output, we don't want to send the client twice + // (it's already sent out together with the HTTP header). + // However, for Metacube output, we need to send it so that + // the Cubemap instance in the other end has a chance to update it. + // It may come twice in its stream, but Cubemap doesn't care. + if (encoding == Stream::STREAM_ENCODING_RAW && + (metacube_flags & METACUBE_FLAGS_HEADER) != 0) { + return; + } + MutexLock lock(&queued_data_mutex); - assert(suitable_for_stream_start == SUITABLE_FOR_STREAM_START || - suitable_for_stream_start == NOT_SUITABLE_FOR_STREAM_START); DataElement data_element; - data_element.suitable_for_stream_start = suitable_for_stream_start; + data_element.metacube_flags = metacube_flags; if (encoding == Stream::STREAM_ENCODING_METACUBE) { // Add a Metacube block header before the data. metacube2_block_header hdr; memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync)); hdr.size = htonl(bytes); - hdr.flags = htons(0); - if (suitable_for_stream_start == NOT_SUITABLE_FOR_STREAM_START) { - hdr.flags |= htons(METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START); - } + hdr.flags = htons(metacube_flags); hdr.csum = htons(metacube2_compute_crc(&hdr)); data_element.data.iov_base = new char[bytes + sizeof(hdr)]; @@ -286,8 +291,8 @@ void Stream::process_queued_data() // data, and 10 kB is a very fine granularity in most streams. static const int minimum_start_point_distance = 10240; size_t byte_position = bytes_received; - for (size_t i = 0; i < queued_data_copy.size(); ++i) { - if (queued_data_copy[i].suitable_for_stream_start == SUITABLE_FOR_STREAM_START) { + for (const DataElement &elem : queued_data_copy) { + if ((elem.metacube_flags & METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START) == 0) { size_t num_points = suitable_starting_points.size(); if (num_points >= 2 && suitable_starting_points[num_points - 1] - suitable_starting_points[num_points - 2] < minimum_start_point_distance) { @@ -296,13 +301,13 @@ void Stream::process_queued_data() } suitable_starting_points.push_back(byte_position); } - byte_position += queued_data_copy[i].data.iov_len; + byte_position += elem.data.iov_len; } add_data_raw(queued_data_copy); remove_obsolete_starting_points(); - for (size_t i = 0; i < queued_data_copy.size(); ++i) { - char *data = reinterpret_cast(queued_data_copy[i].data.iov_base); + for (const DataElement &elem : queued_data_copy) { + char *data = reinterpret_cast(elem.data.iov_base); delete[] data; }