+ // We could do a binary search here (std::lower_bound), but it seems
+ // overkill for removing what's probably only a few points.
+ while (!suitable_starting_points.empty() &&
+ bytes_received - suitable_starting_points[0] > backlog_size) {
+ suitable_starting_points.pop_front();
+ }
+ assert(backlog_size >= hls_backlog_margin);
+ while (!fragments.empty() &&
+ bytes_received - fragments[0].byte_position > (backlog_size - hls_backlog_margin)) {
+ fragments.pop_front();
+ ++first_fragment_index;
+ clear_hls_playlist_cache();
+ }
+}
+
+void Stream::add_data_deferred(const char *data, size_t bytes, uint16_t metacube_flags, const RationalPTS &pts)
+{
+ // For regular output, we don't want to send the client twice
+ // (it's already sent out together with the HTTP header).
+ // However, for Metacube output, we need to send it so that
+ // the Cubemap instance in the other end has a chance to update it.
+ // It may come twice in its stream, but Cubemap doesn't care.
+ if (encoding == Stream::STREAM_ENCODING_RAW &&
+ (metacube_flags & METACUBE_FLAGS_HEADER) != 0) {
+ return;
+ }
+
+ lock_guard<mutex> lock(queued_data_mutex);
+
+ DataElement data_element;
+ data_element.metacube_flags = metacube_flags;
+ data_element.pts = pts;
+
+ if (encoding == Stream::STREAM_ENCODING_METACUBE) {
+ // Construct a PTS metadata block. (We'll avoid sending it out
+ // if we don't have a valid PTS.)
+ metacube2_pts_packet pts_packet;
+ pts_packet.type = htobe64(METACUBE_METADATA_TYPE_NEXT_BLOCK_PTS);
+ pts_packet.pts = htobe64(pts.pts);
+ pts_packet.timebase_num = htobe64(pts.timebase_num);
+ pts_packet.timebase_den = htobe64(pts.timebase_den);
+
+ metacube2_block_header pts_hdr;
+ memcpy(pts_hdr.sync, METACUBE2_SYNC, sizeof(pts_hdr.sync));
+ pts_hdr.size = htonl(sizeof(pts_packet));
+ pts_hdr.flags = htons(METACUBE_FLAGS_METADATA);
+ pts_hdr.csum = htons(metacube2_compute_crc(&pts_hdr));
+
+ // Add a Metacube block header before the data.
+ metacube2_block_header hdr;
+ memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync));
+ hdr.size = htonl(bytes);
+ hdr.flags = htons(metacube_flags);
+ hdr.csum = htons(metacube2_compute_crc(&hdr));
+
+ data_element.data.iov_len = bytes + sizeof(hdr);
+ if (pts.timebase_num != 0) {
+ data_element.data.iov_len += sizeof(pts_hdr) + sizeof(pts_packet);
+ }
+ data_element.data.iov_base = new char[data_element.data.iov_len];
+
+ char *ptr = reinterpret_cast<char *>(data_element.data.iov_base);
+ if (pts.timebase_num != 0) {
+ memcpy(ptr, &pts_hdr, sizeof(pts_hdr));
+ ptr += sizeof(pts_hdr);
+ memcpy(ptr, &pts_packet, sizeof(pts_packet));
+ ptr += sizeof(pts_packet);
+ }
+
+ memcpy(ptr, &hdr, sizeof(hdr));
+ ptr += sizeof(hdr);
+ memcpy(ptr, data, bytes);
+
+ queued_data.push_back(data_element);
+ } else if (encoding == Stream::STREAM_ENCODING_RAW) {
+ // Just add the data itself.
+ data_element.data.iov_base = new char[bytes];
+ memcpy(data_element.data.iov_base, data, bytes);
+ data_element.data.iov_len = bytes;
+
+ queued_data.push_back(data_element);
+ } else {
+ assert(false);
+ }
+}
+
+void Stream::process_queued_data()
+{
+ vector<DataElement> queued_data_copy;
+
+ // Hold the lock for as short as possible, since add_data_raw() can possibly
+ // write to disk, which might disturb the input thread.
+ {
+ lock_guard<mutex> lock(queued_data_mutex);
+ if (queued_data.empty()) {
+ return;
+ }
+
+ swap(queued_data, queued_data_copy);
+ }
+
+ // Add suitable starting points for the stream, if the queued data
+ // contains such starting points. Note that we drop starting points
+ // if they're less than 10 kB apart, so that we don't get a huge
+ // amount of them for e.g. each and every MPEG-TS 188-byte cell.
+ // The 10 kB value is somewhat arbitrary, but at least it should make
+ // the RAM cost of saving the position ~0.1% (or less) of the actual
+ // data, and 10 kB is a very fine granularity in most streams.
+ static const int minimum_start_point_distance = 10240;
+ size_t byte_position = bytes_received;
+ bool need_hls_clear = false;
+ for (const DataElement &elem : queued_data_copy) {
+ if ((elem.metacube_flags & METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START) == 0) {
+ size_t num_points = suitable_starting_points.size();
+ if (num_points >= 2 &&
+ suitable_starting_points[num_points - 1] - suitable_starting_points[num_points - 2] < minimum_start_point_distance) {
+ // p[n-1] - p[n-2] < 10 kB, so drop p[n-1].
+ suitable_starting_points.pop_back();
+ }
+ suitable_starting_points.push_back(byte_position);
+
+ if (elem.pts.timebase_num != 0) {
+ need_hls_clear |= add_fragment_boundary(byte_position, elem.pts);
+ }
+ }
+ byte_position += elem.data.iov_len;
+ }
+ if (need_hls_clear) {
+ clear_hls_playlist_cache();
+ }
+
+ add_data_raw(queued_data_copy);
+ remove_obsolete_starting_points();
+ for (const DataElement &elem : queued_data_copy) {
+ char *data = reinterpret_cast<char *>(elem.data.iov_base);
+ delete[] data;
+ }
+
+ // We have more data, so wake up all clients.