X-Git-Url: https://git.sesse.net/?p=cubemap;a=blobdiff_plain;f=stream.cpp;h=3a4ccc3ed88065a87738796e00e0b0cda627fb55;hp=4be673c864410e9ddfda17675e9d9137b672da71;hb=50651c954803c1941e6ad1bb494712891c18f7d2;hpb=e0b47eba2f5ec1aca1d02adc9fb4ffc7293d5c0f diff --git a/stream.cpp b/stream.cpp index 4be673c..3a4ccc3 100644 --- a/stream.cpp +++ b/stream.cpp @@ -1,27 +1,31 @@ #include #include +#include #include +#include #include #include -#include +#include +#include #include +#include #include #include "log.h" -#include "metacube.h" +#include "metacube2.h" #include "state.pb.h" #include "stream.h" #include "util.h" using namespace std; -Stream::Stream(const string &stream_id, size_t backlog_size, Encoding encoding) - : stream_id(stream_id), +Stream::Stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Encoding encoding, Encoding src_encoding) + : url(url), encoding(encoding), + src_encoding(src_encoding), data_fd(make_tempfile("")), backlog_size(backlog_size), - bytes_received(0), - mark_pool(NULL) + prebuffering_bytes(prebuffering_bytes) { if (data_fd == -1) { exit(1); @@ -36,30 +40,27 @@ Stream::~Stream() } Stream::Stream(const StreamProto &serialized, int data_fd) - : stream_id(serialized.stream_id()), + : url(serialized.url()), http_header(serialized.http_header()), stream_header(serialized.stream_header()), encoding(Stream::STREAM_ENCODING_RAW), // Will be changed later. data_fd(data_fd), backlog_size(serialized.backlog_size()), - bytes_received(serialized.bytes_received()), - mark_pool(NULL) + prebuffering_bytes(serialized.prebuffering_bytes()), + bytes_received(serialized.bytes_received()) { if (data_fd == -1) { exit(1); } - // Split old-style headers into HTTP and video headers. - if (!serialized.header().empty()) { - string header = serialized.header(); - size_t split = header.find("\r\n\r\n"); - if (split == string::npos) { - http_header = header; - stream_header = ""; - } else { - http_header = header.substr(0, split + 2); // Split off the second \r\n. - stream_header = header.substr(split, string::npos); + for (ssize_t point : serialized.suitable_starting_point()) { + if (point == -1) { + // Can happen when upgrading from before 1.1.3, + // where this was an optional field with -1 signifying + // "no such point". + continue; } + suitable_starting_points.push_back(point); } } @@ -70,8 +71,12 @@ StreamProto Stream::serialize() serialized.set_stream_header(stream_header); serialized.add_data_fds(data_fd); serialized.set_backlog_size(backlog_size); + serialized.set_prebuffering_bytes(prebuffering_bytes); serialized.set_bytes_received(bytes_received); - serialized.set_stream_id(stream_id); + for (size_t point : suitable_starting_points) { + serialized.add_suitable_starting_point(point); + } + serialized.set_url(url); data_fd = -1; return serialized; } @@ -111,7 +116,15 @@ void Stream::set_backlog_size(size_t new_size) // Now cheat a bit by rewinding, and adding all the old data back. bytes_received -= existing_data.size(); - add_data_raw(existing_data.data(), existing_data.size()); + DataElement data_element; + data_element.data.iov_base = const_cast(existing_data.data()); + data_element.data.iov_len = existing_data.size(); + data_element.metacube_flags = 0; // Ignored by add_data_raw(). + + vector data_elements; + data_elements.push_back(data_element); + add_data_raw(data_elements); + remove_obsolete_starting_points(); } void Stream::put_client_to_sleep(Client *client) @@ -119,62 +132,128 @@ void Stream::put_client_to_sleep(Client *client) sleeping_clients.push_back(client); } -void Stream::add_data_raw(const char *data, ssize_t bytes) +// Return a new set of iovecs that contains only the first bytes of . +vector collect_iovecs(const vector &data, size_t bytes_wanted) { - size_t pos = bytes_received % backlog_size; - bytes_received += bytes; - - if (pos + bytes > backlog_size) { - ssize_t to_copy = backlog_size - pos; - while (to_copy > 0) { - int ret = pwrite(data_fd, data, to_copy, pos); - if (ret == -1 && errno == EINTR) { - continue; - } - if (ret == -1) { - log_perror("pwrite"); - // Dazed and confused, but trying to continue... - break; - } - pos += ret; - data += ret; - to_copy -= ret; - bytes -= ret; + vector ret; + size_t max_iovecs = min(data.size(), IOV_MAX); + for (size_t i = 0; i < max_iovecs && bytes_wanted > 0; ++i) { + if (data[i].data.iov_len <= bytes_wanted) { + // Consume the entire iovec. + ret.push_back(data[i].data); + bytes_wanted -= data[i].data.iov_len; + } else { + // Take only parts of this iovec. + iovec iov; + iov.iov_base = data[i].data.iov_base; + iov.iov_len = bytes_wanted; + ret.push_back(iov); + bytes_wanted = 0; } - pos = 0; } + return ret; +} - while (bytes > 0) { - int ret = pwrite(data_fd, data, bytes, pos); - if (ret == -1 && errno == EINTR) { - continue; +// Return a new set of iovecs that contains all of except the first bytes. +vector remove_iovecs(const vector &data, size_t bytes_wanted) +{ + vector ret; + size_t i; + for (i = 0; i < data.size() && bytes_wanted > 0; ++i) { + if (data[i].data.iov_len <= bytes_wanted) { + // Consume the entire iovec. + bytes_wanted -= data[i].data.iov_len; + } else { + // Take only parts of this iovec. + Stream::DataElement data_element; + data_element.data.iov_base = reinterpret_cast(data[i].data.iov_base) + bytes_wanted; + data_element.data.iov_len = data[i].data.iov_len - bytes_wanted; + data_element.metacube_flags = METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START; + ret.push_back(data_element); + bytes_wanted = 0; } + } + + // Add the rest of the iovecs unchanged. + ret.insert(ret.end(), data.begin() + i, data.end()); + return ret; +} + +void Stream::add_data_raw(const vector &orig_data) +{ + vector data = orig_data; + while (!data.empty()) { + size_t pos = bytes_received % backlog_size; + + // Collect as many iovecs as we can before we hit the point + // where the circular buffer wraps around. + vector to_write = collect_iovecs(data, backlog_size - pos); + ssize_t ret; + do { + ret = pwritev(data_fd, to_write.data(), to_write.size(), pos); + } while (ret == -1 && errno == EINTR); + if (ret == -1) { - log_perror("pwrite"); + log_perror("pwritev"); // Dazed and confused, but trying to continue... - break; + return; } - pos += ret; - data += ret; - bytes -= ret; + bytes_received += ret; + + // Remove the data that was actually written from the set of iovecs. + data = remove_iovecs(data, ret); } } -void Stream::add_data_deferred(const char *data, size_t bytes) +void Stream::remove_obsolete_starting_points() { - if (encoding == Stream::STREAM_ENCODING_RAW) { - queued_data.append(string(data, data + bytes)); - } else if (encoding == STREAM_ENCODING_METACUBE) { - metacube_block_header hdr; - memcpy(hdr.sync, METACUBE_SYNC, sizeof(hdr.sync)); + // We could do a binary search here (std::lower_bound), but it seems + // overkill for removing what's probably only a few points. + while (!suitable_starting_points.empty() && + bytes_received - suitable_starting_points[0] > backlog_size) { + suitable_starting_points.pop_front(); + } +} + +void Stream::add_data_deferred(const char *data, size_t bytes, uint16_t metacube_flags) +{ + // For regular output, we don't want to send the client twice + // (it's already sent out together with the HTTP header). + // However, for Metacube output, we need to send it so that + // the Cubemap instance in the other end has a chance to update it. + // It may come twice in its stream, but Cubemap doesn't care. + if (encoding == Stream::STREAM_ENCODING_RAW && + (metacube_flags & METACUBE_FLAGS_HEADER) != 0) { + return; + } + + lock_guard lock(queued_data_mutex); + + DataElement data_element; + data_element.metacube_flags = metacube_flags; + + if (encoding == Stream::STREAM_ENCODING_METACUBE) { + // Add a Metacube block header before the data. + metacube2_block_header hdr; + memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync)); hdr.size = htonl(bytes); - hdr.flags = htonl(0); + hdr.flags = htons(metacube_flags); + hdr.csum = htons(metacube2_compute_crc(&hdr)); + + data_element.data.iov_base = new char[bytes + sizeof(hdr)]; + data_element.data.iov_len = bytes + sizeof(hdr); + + memcpy(data_element.data.iov_base, &hdr, sizeof(hdr)); + memcpy(reinterpret_cast(data_element.data.iov_base) + sizeof(hdr), data, bytes); - char *block = new char[bytes + sizeof(hdr)]; - memcpy(block, &hdr, sizeof(hdr)); - memcpy(block + sizeof(hdr), data, bytes); - queued_data.append(string(block, block + bytes + sizeof(hdr))); - delete[] block; + queued_data.push_back(data_element); + } else if (encoding == Stream::STREAM_ENCODING_RAW) { + // Just add the data itself. + data_element.data.iov_base = new char[bytes]; + memcpy(data_element.data.iov_base, data, bytes); + data_element.data.iov_len = bytes; + + queued_data.push_back(data_element); } else { assert(false); } @@ -182,12 +261,47 @@ void Stream::add_data_deferred(const char *data, size_t bytes) void Stream::process_queued_data() { - if (queued_data.empty()) { - return; + vector queued_data_copy; + + // Hold the lock for as short as possible, since add_data_raw() can possibly + // write to disk, which might disturb the input thread. + { + lock_guard lock(queued_data_mutex); + if (queued_data.empty()) { + return; + } + + swap(queued_data, queued_data_copy); } - add_data_raw(queued_data.data(), queued_data.size()); - queued_data.clear(); + // Add suitable starting points for the stream, if the queued data + // contains such starting points. Note that we drop starting points + // if they're less than 10 kB apart, so that we don't get a huge + // amount of them for e.g. each and every MPEG-TS 188-byte cell. + // The 10 kB value is somewhat arbitrary, but at least it should make + // the RAM cost of saving the position ~0.1% (or less) of the actual + // data, and 10 kB is a very fine granularity in most streams. + static const int minimum_start_point_distance = 10240; + size_t byte_position = bytes_received; + for (const DataElement &elem : queued_data_copy) { + if ((elem.metacube_flags & METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START) == 0) { + size_t num_points = suitable_starting_points.size(); + if (num_points >= 2 && + suitable_starting_points[num_points - 1] - suitable_starting_points[num_points - 2] < minimum_start_point_distance) { + // p[n-1] - p[n-2] < 10 kB, so drop p[n-1]. + suitable_starting_points.pop_back(); + } + suitable_starting_points.push_back(byte_position); + } + byte_position += elem.data.iov_len; + } + + add_data_raw(queued_data_copy); + remove_obsolete_starting_points(); + for (const DataElement &elem : queued_data_copy) { + char *data = reinterpret_cast(elem.data.iov_base); + delete[] data; + } // We have more data, so wake up all clients. if (to_process.empty()) {