4 #include <netinet/in.h>
15 #include "metacube2.h"
16 #include "mutexlock.h"
23 Stream::Stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Encoding encoding, Encoding src_encoding)
26 src_encoding(src_encoding),
27 data_fd(make_tempfile("")),
28 backlog_size(backlog_size),
29 prebuffering_bytes(prebuffering_bytes),
37 pthread_mutex_init(&queued_data_mutex, NULL);
47 Stream::Stream(const StreamProto &serialized, int data_fd)
48 : url(serialized.url()),
49 http_header(serialized.http_header()),
50 stream_header(serialized.stream_header()),
51 encoding(Stream::STREAM_ENCODING_RAW), // Will be changed later.
53 backlog_size(serialized.backlog_size()),
54 prebuffering_bytes(serialized.prebuffering_bytes()),
55 bytes_received(serialized.bytes_received()),
62 for (int i = 0; i < serialized.suitable_starting_point_size(); ++i) {
63 ssize_t point = serialized.suitable_starting_point(i);
65 // Can happen when upgrading from before 1.1.3,
66 // where this was an optional field with -1 signifying
70 suitable_starting_points.push_back(point);
73 pthread_mutex_init(&queued_data_mutex, NULL);
76 StreamProto Stream::serialize()
78 StreamProto serialized;
79 serialized.set_http_header(http_header);
80 serialized.set_stream_header(stream_header);
81 serialized.add_data_fds(data_fd);
82 serialized.set_backlog_size(backlog_size);
83 serialized.set_prebuffering_bytes(prebuffering_bytes);
84 serialized.set_bytes_received(bytes_received);
85 for (size_t i = 0; i < suitable_starting_points.size(); ++i) {
86 serialized.add_suitable_starting_point(suitable_starting_points[i]);
88 serialized.set_url(url);
93 void Stream::set_backlog_size(size_t new_size)
95 if (backlog_size == new_size) {
100 if (!read_tempfile_and_close(data_fd, &existing_data)) {
104 // Unwrap the data so it's no longer circular.
105 if (bytes_received <= backlog_size) {
106 existing_data.resize(bytes_received);
108 size_t pos = bytes_received % backlog_size;
109 existing_data = existing_data.substr(pos, string::npos) +
110 existing_data.substr(0, pos);
113 // See if we need to discard data.
114 if (new_size < existing_data.size()) {
115 size_t to_discard = existing_data.size() - new_size;
116 existing_data = existing_data.substr(to_discard, string::npos);
119 // Create a new, empty data file.
120 data_fd = make_tempfile("");
124 backlog_size = new_size;
126 // Now cheat a bit by rewinding, and adding all the old data back.
127 bytes_received -= existing_data.size();
128 DataElement data_element;
129 data_element.data.iov_base = const_cast<char *>(existing_data.data());
130 data_element.data.iov_len = existing_data.size();
131 data_element.metacube_flags = 0; // Ignored by add_data_raw().
133 vector<DataElement> data_elements;
134 data_elements.push_back(data_element);
135 add_data_raw(data_elements);
136 remove_obsolete_starting_points();
139 void Stream::put_client_to_sleep(Client *client)
141 sleeping_clients.push_back(client);
144 // Return a new set of iovecs that contains only the first <bytes_wanted> bytes of <data>.
145 vector<iovec> collect_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
148 size_t max_iovecs = min<size_t>(data.size(), IOV_MAX);
149 for (size_t i = 0; i < max_iovecs && bytes_wanted > 0; ++i) {
150 if (data[i].data.iov_len <= bytes_wanted) {
151 // Consume the entire iovec.
152 ret.push_back(data[i].data);
153 bytes_wanted -= data[i].data.iov_len;
155 // Take only parts of this iovec.
157 iov.iov_base = data[i].data.iov_base;
158 iov.iov_len = bytes_wanted;
166 // Return a new set of iovecs that contains all of <data> except the first <bytes_wanted> bytes.
167 vector<Stream::DataElement> remove_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
169 vector<Stream::DataElement> ret;
171 for (i = 0; i < data.size() && bytes_wanted > 0; ++i) {
172 if (data[i].data.iov_len <= bytes_wanted) {
173 // Consume the entire iovec.
174 bytes_wanted -= data[i].data.iov_len;
176 // Take only parts of this iovec.
177 Stream::DataElement data_element;
178 data_element.data.iov_base = reinterpret_cast<char *>(data[i].data.iov_base) + bytes_wanted;
179 data_element.data.iov_len = data[i].data.iov_len - bytes_wanted;
180 data_element.metacube_flags = METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START;
181 ret.push_back(data_element);
186 // Add the rest of the iovecs unchanged.
187 ret.insert(ret.end(), data.begin() + i, data.end());
191 void Stream::add_data_raw(const vector<DataElement> &orig_data)
193 vector<DataElement> data = orig_data;
194 while (!data.empty()) {
195 size_t pos = bytes_received % backlog_size;
197 // Collect as many iovecs as we can before we hit the point
198 // where the circular buffer wraps around.
199 vector<iovec> to_write = collect_iovecs(data, backlog_size - pos);
202 ret = pwritev(data_fd, to_write.data(), to_write.size(), pos);
203 } while (ret == -1 && errno == EINTR);
206 log_perror("pwritev");
207 // Dazed and confused, but trying to continue...
210 bytes_received += ret;
212 // Remove the data that was actually written from the set of iovecs.
213 data = remove_iovecs(data, ret);
217 void Stream::remove_obsolete_starting_points()
219 // We could do a binary search here (std::lower_bound), but it seems
220 // overkill for removing what's probably only a few points.
221 while (!suitable_starting_points.empty() &&
222 bytes_received - suitable_starting_points[0] > backlog_size) {
223 suitable_starting_points.pop_front();
227 void Stream::add_data_deferred(const char *data, size_t bytes, uint16_t metacube_flags)
229 // For regular output, we don't want to send the client twice
230 // (it's already sent out together with the HTTP header).
231 // However, for Metacube output, we need to send it so that
232 // the Cubemap instance in the other end has a chance to update it.
233 // It may come twice in its stream, but Cubemap doesn't care.
234 if (encoding == Stream::STREAM_ENCODING_RAW &&
235 (metacube_flags & METACUBE_FLAGS_HEADER) != 0) {
239 MutexLock lock(&queued_data_mutex);
241 DataElement data_element;
242 data_element.metacube_flags = metacube_flags;
244 if (encoding == Stream::STREAM_ENCODING_METACUBE) {
245 // Add a Metacube block header before the data.
246 metacube2_block_header hdr;
247 memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync));
248 hdr.size = htonl(bytes);
249 hdr.flags = htons(metacube_flags);
250 hdr.csum = htons(metacube2_compute_crc(&hdr));
252 data_element.data.iov_base = new char[bytes + sizeof(hdr)];
253 data_element.data.iov_len = bytes + sizeof(hdr);
255 memcpy(data_element.data.iov_base, &hdr, sizeof(hdr));
256 memcpy(reinterpret_cast<char *>(data_element.data.iov_base) + sizeof(hdr), data, bytes);
258 queued_data.push_back(data_element);
259 } else if (encoding == Stream::STREAM_ENCODING_RAW) {
260 // Just add the data itself.
261 data_element.data.iov_base = new char[bytes];
262 memcpy(data_element.data.iov_base, data, bytes);
263 data_element.data.iov_len = bytes;
265 queued_data.push_back(data_element);
271 void Stream::process_queued_data()
273 vector<DataElement> queued_data_copy;
275 // Hold the lock for as short as possible, since add_data_raw() can possibly
276 // write to disk, which might disturb the input thread.
278 MutexLock lock(&queued_data_mutex);
279 if (queued_data.empty()) {
283 swap(queued_data, queued_data_copy);
286 // Add suitable starting points for the stream, if the queued data
287 // contains such starting points. Note that we drop starting points
288 // if they're less than 10 kB apart, so that we don't get a huge
289 // amount of them for e.g. each and every MPEG-TS 188-byte cell.
290 // The 10 kB value is somewhat arbitrary, but at least it should make
291 // the RAM cost of saving the position ~0.1% (or less) of the actual
292 // data, and 10 kB is a very fine granularity in most streams.
293 static const int minimum_start_point_distance = 10240;
294 size_t byte_position = bytes_received;
295 for (size_t i = 0; i < queued_data_copy.size(); ++i) {
296 if ((queued_data_copy[i].metacube_flags & METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START) == 0) {
297 size_t num_points = suitable_starting_points.size();
298 if (num_points >= 2 &&
299 suitable_starting_points[num_points - 1] - suitable_starting_points[num_points - 2] < minimum_start_point_distance) {
300 // p[n-1] - p[n-2] < 10 kB, so drop p[n-1].
301 suitable_starting_points.pop_back();
303 suitable_starting_points.push_back(byte_position);
305 byte_position += queued_data_copy[i].data.iov_len;
308 add_data_raw(queued_data_copy);
309 remove_obsolete_starting_points();
310 for (size_t i = 0; i < queued_data_copy.size(); ++i) {
311 char *data = reinterpret_cast<char *>(queued_data_copy[i].data.iov_base);
315 // We have more data, so wake up all clients.
316 if (to_process.empty()) {
317 swap(sleeping_clients, to_process);
319 to_process.insert(to_process.end(), sleeping_clients.begin(), sleeping_clients.end());
320 sleeping_clients.clear();