+ vector<iovec> ret;
+ size_t max_iovecs = min<size_t>(data.size(), IOV_MAX);
+ for (size_t i = 0; i < max_iovecs && bytes_wanted > 0; ++i) {
+ if (data[i].data.iov_len <= bytes_wanted) {
+ // Consume the entire iovec.
+ ret.push_back(data[i].data);
+ bytes_wanted -= data[i].data.iov_len;
+ } else {
+ // Take only parts of this iovec.
+ iovec iov;
+ iov.iov_base = data[i].data.iov_base;
+ iov.iov_len = bytes_wanted;
+ ret.push_back(iov);
+ bytes_wanted = 0;
+ }
+ }
+ return ret;
+}
+
+// Return a new set of iovecs that contains all of <data> except the first <bytes_wanted> bytes.
+vector<Stream::DataElement> remove_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
+{
+ vector<Stream::DataElement> ret;
+ size_t i;
+ for (i = 0; i < data.size() && bytes_wanted > 0; ++i) {
+ if (data[i].data.iov_len <= bytes_wanted) {
+ // Consume the entire iovec.
+ bytes_wanted -= data[i].data.iov_len;
+ } else {
+ // Take only parts of this iovec.
+ Stream::DataElement data_element;
+ data_element.data.iov_base = reinterpret_cast<char *>(data[i].data.iov_base) + bytes_wanted;
+ data_element.data.iov_len = data[i].data.iov_len - bytes_wanted;
+ data_element.metacube_flags = METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START;
+ data_element.pts = RationalPTS();
+ ret.push_back(data_element);
+ bytes_wanted = 0;
+ }
+ }
+
+ // Add the rest of the iovecs unchanged.
+ ret.insert(ret.end(), data.begin() + i, data.end());
+ return ret;
+}
+
+void Stream::add_data_raw(const vector<DataElement> &orig_data)
+{
+ vector<DataElement> data = orig_data;
+ while (!data.empty()) {
+ size_t pos = bytes_received % backlog_size;
+
+ // Collect as many iovecs as we can before we hit the point
+ // where the circular buffer wraps around.
+ vector<iovec> to_write = collect_iovecs(data, backlog_size - pos);
+ ssize_t ret;
+ do {
+ ret = pwritev(data_fd, to_write.data(), to_write.size(), pos);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1) {
+ log_perror("pwritev");
+ // Dazed and confused, but trying to continue...
+ return;
+ }
+ bytes_received += ret;
+
+ // Remove the data that was actually written from the set of iovecs.
+ data = remove_iovecs(data, ret);
+ }
+}
+
+void Stream::remove_obsolete_starting_points()
+{
+ // We could do a binary search here (std::lower_bound), but it seems
+ // overkill for removing what's probably only a few points.
+ while (!suitable_starting_points.empty() &&
+ bytes_received - suitable_starting_points[0] > backlog_size) {
+ suitable_starting_points.pop_front();
+ }
+ assert(backlog_size >= hls_backlog_margin);
+ while (!fragments.empty() &&
+ bytes_received - fragments[0].byte_position > (backlog_size - hls_backlog_margin)) {
+ fragments.pop_front();
+ ++first_fragment_index;
+ clear_hls_playlist_cache();
+ }
+}
+
+void Stream::add_data_deferred(const char *data, size_t bytes, uint16_t metacube_flags, const RationalPTS &pts)
+{
+ // For regular output, we don't want to send the client twice
+ // (it's already sent out together with the HTTP header).
+ // However, for Metacube output, we need to send it so that
+ // the Cubemap instance in the other end has a chance to update it.
+ // It may come twice in its stream, but Cubemap doesn't care.
+ if (encoding == Stream::STREAM_ENCODING_RAW &&
+ (metacube_flags & METACUBE_FLAGS_HEADER) != 0) {
+ return;
+ }
+
+ lock_guard<mutex> lock(queued_data_mutex);
+
+ DataElement data_element;
+ data_element.metacube_flags = metacube_flags;
+ data_element.pts = pts;
+
+ if (encoding == Stream::STREAM_ENCODING_METACUBE) {
+ // Construct a PTS metadata block. (We'll avoid sending it out
+ // if we don't have a valid PTS.)
+ metacube2_pts_packet pts_packet;
+ pts_packet.type = htobe64(METACUBE_METADATA_TYPE_NEXT_BLOCK_PTS);
+ pts_packet.pts = htobe64(pts.pts);
+ pts_packet.timebase_num = htobe64(pts.timebase_num);
+ pts_packet.timebase_den = htobe64(pts.timebase_den);
+
+ metacube2_block_header pts_hdr;
+ memcpy(pts_hdr.sync, METACUBE2_SYNC, sizeof(pts_hdr.sync));
+ pts_hdr.size = htonl(sizeof(pts_packet));
+ pts_hdr.flags = htons(METACUBE_FLAGS_METADATA);
+ pts_hdr.csum = htons(metacube2_compute_crc(&pts_hdr));
+
+ // Add a Metacube block header before the data.
+ metacube2_block_header hdr;
+ memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync));