]> git.sesse.net Git - cubemap/blob - stream.cpp
Create $(libdir) on make install.
[cubemap] / stream.cpp
1 #include <assert.h>
2 #include <errno.h>
3 #include <fcntl.h>
4 #include <inttypes.h>
5 #include <limits.h>
6 #include <math.h>
7 #include <netinet/in.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <sys/types.h>
12 #include <algorithm>
13 #include <string>
14 #include <queue>
15 #include <vector>
16
17 #include "log.h"
18 #include "metacube2.h"
19 #include "state.pb.h"
20 #include "stream.h"
21 #include "util.h"
22
23 using namespace std;
24
25 Stream::Stream(const string &url,
26                size_t backlog_size,
27                uint64_t prebuffering_bytes,
28                Encoding encoding,
29                Encoding src_encoding,
30                unsigned hls_frag_duration,
31                size_t hls_backlog_margin,
32                const std::string &allow_origin)
33         : url(url),
34           encoding(encoding),
35           src_encoding(src_encoding),
36           allow_origin(allow_origin),
37           data_fd(make_tempfile("")),
38           backlog_size(backlog_size),
39           prebuffering_bytes(prebuffering_bytes),
40           hls_frag_duration(hls_frag_duration),
41           hls_backlog_margin(hls_backlog_margin)
42 {
43         if (data_fd == -1) {
44                 exit(1);
45         }
46 }
47
48 Stream::~Stream()
49 {
50         if (data_fd != -1) {
51                 safe_close(data_fd);
52         }
53 }
54
55 Stream::Stream(const StreamProto &serialized, int data_fd)
56         : url(serialized.url()),
57           unavailable(serialized.unavailable()),
58           http_header(serialized.http_header()),
59           stream_header(serialized.stream_header()),
60           encoding(Stream::STREAM_ENCODING_RAW),  // Will be changed later.
61           data_fd(data_fd),
62           backlog_size(serialized.backlog_size()),
63           bytes_received(serialized.bytes_received()),
64           first_fragment_index(serialized.first_fragment_index()),
65           discontinuity_counter(serialized.discontinuity_counter())
66 {
67         if (data_fd == -1) {
68                 exit(1);
69         }
70
71         // Set the close-on-exec parameter back on the backlog fd.
72         fcntl(data_fd, F_SETFD, FD_CLOEXEC);
73
74         for (ssize_t point : serialized.suitable_starting_point()) {
75                 if (point == -1) {
76                         // Can happen when upgrading from before 1.1.3,
77                         // where this was an optional field with -1 signifying
78                         // "no such point".
79                         continue;
80                 }
81                 suitable_starting_points.push_back(point);
82         }
83
84         for (const FragmentStartProto &fragment : serialized.fragment()) {
85                 fragments.push_back(FragmentStart { size_t(fragment.byte_position()), fragment.pts(), fragment.begins_header() });
86         }
87 }
88
89 StreamProto Stream::serialize()
90 {
91         StreamProto serialized;
92         serialized.set_unavailable(unavailable);
93         serialized.set_http_header(http_header);
94         serialized.set_stream_header(stream_header);
95         serialized.add_data_fds(data_fd);
96         serialized.set_backlog_size(backlog_size);
97         serialized.set_bytes_received(bytes_received);
98         for (size_t point : suitable_starting_points) {
99                 serialized.add_suitable_starting_point(point);
100         }
101         for (const FragmentStart &fragment : fragments) {
102                 FragmentStartProto *proto = serialized.add_fragment();
103                 proto->set_byte_position(fragment.byte_position);
104                 proto->set_pts(fragment.pts);
105                 proto->set_begins_header(fragment.begins_header);
106         }
107         serialized.set_first_fragment_index(first_fragment_index);
108         serialized.set_discontinuity_counter(discontinuity_counter);
109
110         // Unset the close-on-exec flag for the backlog fd.
111         // (This can't leak into a child, since there's only one thread left.)
112         fcntl(data_fd, F_SETFD, 0);
113
114         serialized.set_url(url);
115         data_fd = -1;
116         return serialized;
117 }
118         
119 void Stream::set_backlog_size(size_t new_size)
120 {
121         if (backlog_size == new_size) {
122                 return;
123         }
124
125         string existing_data;
126         if (!read_tempfile_and_close(data_fd, &existing_data)) {
127                 exit(1);
128         }
129
130         // Unwrap the data so it's no longer circular.
131         if (bytes_received <= backlog_size) {
132                 existing_data.resize(bytes_received);
133         } else {
134                 size_t pos = bytes_received % backlog_size;
135                 existing_data = existing_data.substr(pos, string::npos) +
136                         existing_data.substr(0, pos);
137         }
138
139         // See if we need to discard data.
140         if (new_size < existing_data.size()) {
141                 size_t to_discard = existing_data.size() - new_size;
142                 existing_data = existing_data.substr(to_discard, string::npos);
143         }
144
145         // Create a new, empty data file.
146         data_fd = make_tempfile("");
147         if (data_fd == -1) {
148                 exit(1);
149         }
150         backlog_size = new_size;
151
152         // Now cheat a bit by rewinding, and adding all the old data back.
153         bytes_received -= existing_data.size();
154         DataElement data_element;
155         data_element.data.iov_base = const_cast<char *>(existing_data.data());
156         data_element.data.iov_len = existing_data.size();
157         data_element.metacube_flags = 0;  // Ignored by add_data_raw().
158
159         vector<DataElement> data_elements;
160         data_elements.push_back(data_element);
161         add_data_raw(data_elements);
162         remove_obsolete_starting_points();
163 }
164
165 void Stream::set_header(const std::string &new_http_header, const std::string &new_stream_header)
166 {
167         unavailable = false;
168         http_header = new_http_header;
169         if (new_stream_header == stream_header) {
170                 return;
171         }
172
173         // We cannot start at any of the older starting points anymore,
174         // since they'd get the wrong header for the stream (not to mention
175         // that a changed header probably means the stream restarted,
176         // which means any client starting on the old one would probably
177         // stop playing properly at the change point). Next block
178         // should be a suitable starting point (if not, something is
179         // pretty strange), so it will fill up again soon enough.
180         suitable_starting_points.clear();
181
182         // HLS, on the other hand, can deal with discontinuities and multiple
183         // headers. At least in theory (client support varies wildly).
184         if (!fragments.empty()) {
185                 // Commit the old header to the backlog, so that we can serve it
186                 // for all the old fragments for as long as they exist.
187                 if (!stream_header.empty()) {
188                         // End the current fragment and make a new one for the header.
189                         fragments.push_back(Stream::FragmentStart { bytes_received, 0.0, true });
190                         process_queued_data();
191                         Stream::DataElement elem;
192                         elem.data.iov_base = (char *)stream_header.data();
193                         elem.data.iov_len = stream_header.size();
194                         add_data_raw({ elem });
195                         remove_obsolete_starting_points();
196
197                         // The discontinuity counter will be increased when
198                         // this header goes out of the backlog.
199                 }
200                 clear_hls_playlist_cache();
201         }
202         stream_header = new_stream_header;
203 }
204
205 void Stream::put_client_to_sleep(Client *client)
206 {
207         sleeping_clients.push_back(client);
208 }
209
210 // Return a new set of iovecs that contains only the first <bytes_wanted> bytes of <data>.
211 vector<iovec> collect_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
212 {
213         vector<iovec> ret;
214         size_t max_iovecs = min<size_t>(data.size(), IOV_MAX);
215         for (size_t i = 0; i < max_iovecs && bytes_wanted > 0; ++i) {
216                 if (data[i].data.iov_len <= bytes_wanted) {
217                         // Consume the entire iovec.
218                         ret.push_back(data[i].data);
219                         bytes_wanted -= data[i].data.iov_len;
220                 } else {
221                         // Take only parts of this iovec.
222                         iovec iov;
223                         iov.iov_base = data[i].data.iov_base;
224                         iov.iov_len = bytes_wanted;
225                         ret.push_back(iov);
226                         bytes_wanted = 0;
227                 }
228         }
229         return ret;
230 }
231
232 // Return a new set of iovecs that contains all of <data> except the first <bytes_wanted> bytes.
233 vector<Stream::DataElement> remove_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
234 {
235         vector<Stream::DataElement> ret;
236         size_t i;
237         for (i = 0; i < data.size() && bytes_wanted > 0; ++i) {
238                 if (data[i].data.iov_len <= bytes_wanted) {
239                         // Consume the entire iovec.
240                         bytes_wanted -= data[i].data.iov_len;
241                 } else {
242                         // Take only parts of this iovec.
243                         Stream::DataElement data_element;
244                         data_element.data.iov_base = reinterpret_cast<char *>(data[i].data.iov_base) + bytes_wanted;
245                         data_element.data.iov_len = data[i].data.iov_len - bytes_wanted;
246                         data_element.metacube_flags = METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START;
247                         data_element.pts = RationalPTS();
248                         ret.push_back(data_element);
249                         bytes_wanted = 0;
250                 }
251         }
252
253         // Add the rest of the iovecs unchanged.
254         ret.insert(ret.end(), data.begin() + i, data.end());
255         return ret;
256 }
257
258 void Stream::add_data_raw(const vector<DataElement> &orig_data)
259 {
260         vector<DataElement> data = orig_data;
261         while (!data.empty()) {
262                 size_t pos = bytes_received % backlog_size;
263
264                 // Collect as many iovecs as we can before we hit the point
265                 // where the circular buffer wraps around.
266                 vector<iovec> to_write = collect_iovecs(data, backlog_size - pos);
267                 ssize_t ret;
268                 do {
269                         ret = pwritev(data_fd, to_write.data(), to_write.size(), pos);
270                 } while (ret == -1 && errno == EINTR);
271
272                 if (ret == -1) {
273                         log_perror("pwritev");
274                         // Dazed and confused, but trying to continue...
275                         return;
276                 }
277                 bytes_received += ret;
278
279                 // Remove the data that was actually written from the set of iovecs.
280                 data = remove_iovecs(data, ret);
281         }
282 }
283
284 void Stream::remove_obsolete_starting_points()
285 {
286         // We could do a binary search here (std::lower_bound), but it seems
287         // overkill for removing what's probably only a few points.
288         while (!suitable_starting_points.empty() &&
289                bytes_received - suitable_starting_points[0] > backlog_size) {
290                 suitable_starting_points.pop_front();
291         }
292         assert(backlog_size >= hls_backlog_margin);
293         while (!fragments.empty() &&
294                bytes_received - fragments[0].byte_position > (backlog_size - hls_backlog_margin)) {
295                 if (fragments[0].begins_header) {
296                         ++discontinuity_counter;
297                 } else {
298                         ++first_fragment_index;
299                 }
300                 fragments.pop_front();
301                 clear_hls_playlist_cache();
302         }
303 }
304
305 void Stream::add_data_deferred(const char *data, size_t bytes, uint16_t metacube_flags, const RationalPTS &pts)
306 {
307         // For regular output, we don't want to send the client twice
308         // (it's already sent out together with the HTTP header).
309         // However, for Metacube output, we need to send it so that
310         // the Cubemap instance in the other end has a chance to update it.
311         // It may come twice in its stream, but Cubemap doesn't care.
312         if (encoding == Stream::STREAM_ENCODING_RAW &&
313             (metacube_flags & METACUBE_FLAGS_HEADER) != 0) {
314                 return;
315         }
316
317         lock_guard<mutex> lock(queued_data_mutex);
318
319         DataElement data_element;
320         data_element.metacube_flags = metacube_flags;
321         data_element.pts = pts;
322
323         if (encoding == Stream::STREAM_ENCODING_METACUBE) {
324                 // Construct a PTS metadata block. (We'll avoid sending it out
325                 // if we don't have a valid PTS.)
326                 metacube2_pts_packet pts_packet;
327                 pts_packet.type = htobe64(METACUBE_METADATA_TYPE_NEXT_BLOCK_PTS);
328                 pts_packet.pts = htobe64(pts.pts);
329                 pts_packet.timebase_num = htobe64(pts.timebase_num);
330                 pts_packet.timebase_den = htobe64(pts.timebase_den);
331
332                 metacube2_block_header pts_hdr;
333                 memcpy(pts_hdr.sync, METACUBE2_SYNC, sizeof(pts_hdr.sync));
334                 pts_hdr.size = htonl(sizeof(pts_packet));
335                 pts_hdr.flags = htons(METACUBE_FLAGS_METADATA);
336                 pts_hdr.csum = htons(metacube2_compute_crc(&pts_hdr));
337
338                 // Add a Metacube block header before the data.
339                 metacube2_block_header hdr;
340                 memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync));
341                 hdr.size = htonl(bytes);
342                 hdr.flags = htons(metacube_flags);
343                 hdr.csum = htons(metacube2_compute_crc(&hdr));
344
345                 data_element.data.iov_len = bytes + sizeof(hdr);
346                 if (pts.timebase_num != 0) {
347                         data_element.data.iov_len += sizeof(pts_hdr) + sizeof(pts_packet);
348                 }
349                 data_element.data.iov_base = new char[data_element.data.iov_len];
350
351                 char *ptr = reinterpret_cast<char *>(data_element.data.iov_base);
352                 if (pts.timebase_num != 0) {
353                         memcpy(ptr, &pts_hdr, sizeof(pts_hdr));
354                         ptr += sizeof(pts_hdr);
355                         memcpy(ptr, &pts_packet, sizeof(pts_packet));
356                         ptr += sizeof(pts_packet);
357                 }
358
359                 memcpy(ptr, &hdr, sizeof(hdr));
360                 ptr += sizeof(hdr);
361                 memcpy(ptr, data, bytes);
362
363                 queued_data.push_back(data_element);
364         } else if (encoding == Stream::STREAM_ENCODING_RAW) {
365                 // Just add the data itself.
366                 data_element.data.iov_base = new char[bytes];
367                 memcpy(data_element.data.iov_base, data, bytes);
368                 data_element.data.iov_len = bytes;
369
370                 queued_data.push_back(data_element);
371         } else {
372                 assert(false);
373         }
374 }
375
376 void Stream::process_queued_data()
377 {
378         vector<DataElement> queued_data_copy;
379
380         // Hold the lock for as short as possible, since add_data_raw() can possibly
381         // write to disk, which might disturb the input thread.
382         {
383                 lock_guard<mutex> lock(queued_data_mutex);
384                 if (queued_data.empty()) {
385                         return;
386                 }
387
388                 swap(queued_data, queued_data_copy);
389         }
390
391         // Add suitable starting points for the stream, if the queued data
392         // contains such starting points. Note that we drop starting points
393         // if they're less than 10 kB apart, so that we don't get a huge
394         // amount of them for e.g. each and every MPEG-TS 188-byte cell.
395         // The 10 kB value is somewhat arbitrary, but at least it should make
396         // the RAM cost of saving the position ~0.1% (or less) of the actual
397         // data, and 10 kB is a very fine granularity in most streams.
398         static const int minimum_start_point_distance = 10240;
399         size_t byte_position = bytes_received;
400         bool need_hls_clear = false;
401         for (const DataElement &elem : queued_data_copy) {
402                 if ((elem.metacube_flags & METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START) == 0) {
403                         size_t num_points = suitable_starting_points.size();
404                         if (num_points >= 2 &&
405                             suitable_starting_points[num_points - 1] - suitable_starting_points[num_points - 2] < minimum_start_point_distance) {
406                                 // p[n-1] - p[n-2] < 10 kB, so drop p[n-1].
407                                 suitable_starting_points.pop_back();
408                         }
409                         suitable_starting_points.push_back(byte_position);
410
411                         if (elem.pts.timebase_num != 0) {
412                                 need_hls_clear |= add_fragment_boundary(byte_position, elem.pts);
413                         }
414                 }
415                 byte_position += elem.data.iov_len;
416         }
417         if (need_hls_clear) {
418                 clear_hls_playlist_cache();
419         }
420
421         add_data_raw(queued_data_copy);
422         remove_obsolete_starting_points();
423         for (const DataElement &elem : queued_data_copy) {
424                 char *data = reinterpret_cast<char *>(elem.data.iov_base);
425                 delete[] data;
426         }
427
428         // We have more data, so wake up all clients.
429         if (to_process.empty()) {
430                 swap(sleeping_clients, to_process);
431         } else {
432                 to_process.insert(to_process.end(), sleeping_clients.begin(), sleeping_clients.end());
433                 sleeping_clients.clear();
434         }
435 }
436
437 bool Stream::add_fragment_boundary(size_t byte_position, const RationalPTS &pts)
438 {
439         double pts_double = double(pts.pts) * pts.timebase_den / pts.timebase_num;
440
441         if (fragments.size() <= 1 ||
442             fragments[fragments.size() - 1].begins_header ||
443             fragments[fragments.size() - 2].begins_header) {
444                 // Just starting up, so try to establish the first in-progress fragment.
445                 fragments.push_back(FragmentStart{ byte_position, pts_double, false });
446                 return false;
447         }
448
449         // Keep extending the in-progress fragment as long as we do not
450         // exceed the target duration by more than half a second
451         // (RFC 8216 4.3.3.1) and we get closer to the target by doing so.
452         // Note that in particular, this means we'll always extend
453         // as long as we don't exceed the target duration.
454         double current_duration = pts_double - fragments[fragments.size() - 1].pts;
455         double candidate_duration = pts_double - fragments[fragments.size() - 2].pts;
456         if (lrintf(candidate_duration) <= hls_frag_duration &&
457             fabs(candidate_duration - hls_frag_duration) < fabs(current_duration - hls_frag_duration)) {
458                 fragments.back() = FragmentStart{ byte_position, pts_double, false };
459                 return false;
460         } else {
461                 // Extending the in-progress fragment would make it too long,
462                 // so finalize it and start a new in-progress fragment.
463                 fragments.push_back(FragmentStart{ byte_position, pts_double, false });
464                 return true;
465         }
466 }
467
468 void Stream::clear_hls_playlist_cache()
469 {
470         hls_playlist_http10.reset();
471         hls_playlist_http11_close.reset();
472         hls_playlist_http11_persistent.reset();
473 }
474
475 shared_ptr<const string> Stream::generate_hls_playlist(bool http_11, bool close_after_response)
476 {
477         char buf[256];
478         snprintf(buf, sizeof(buf),
479                 "#EXTM3U\r\n"
480                 "#EXT-X-VERSION:7\r\n"
481                 "#EXT-X-TARGETDURATION:%u\r\n"
482                 "#EXT-X-MEDIA-SEQUENCE:%" PRIu64 "\r\n"
483                 "#EXT-X-DISCONTINUITY-SEQUENCE:%" PRIu64 "\r\n",
484                 hls_frag_duration,
485                 first_fragment_index,
486                 discontinuity_counter);
487
488         string playlist = buf;
489
490         if (fragments.size() >= 3) {
491                 bool printed_header_for_this_group = false;
492                 bool printed_first_header = false;
493                 for (size_t i = 0; i < fragments.size() - 2; ++i) {
494                         char buf[256];
495
496                         if (fragments[i].begins_header) {
497                                 // End of this group. (We've already printed the header
498                                 // as part of the previous group.)
499                                 printed_header_for_this_group = false;
500                                 continue;
501                         }
502                         if (!printed_header_for_this_group) {
503                                 // Look forward until we find the header for this group (if any).
504                                 for (size_t j = i + 1; j < fragments.size() - 1; ++j) {
505                                         if (fragments[j].begins_header) {
506                                                 if (printed_first_header) {
507                                                         playlist += "#EXT-X-DISCONTINUITY\r\n";
508                                                 }
509                                                 snprintf(buf, sizeof(buf),
510                                                         "#EXT-X-MAP:URI=\"%s?frag=%" PRIu64 "-%" PRIu64 "\"\r\n",
511                                                         url.c_str(), fragments[j].byte_position,
512                                                         fragments[j + 1].byte_position);
513                                                 playlist += buf;
514                                                 printed_first_header = true;
515                                                 printed_header_for_this_group = true;
516                                                 break;
517                                         }
518                                 }
519
520                                 if (!printed_header_for_this_group && !stream_header.empty()) {
521                                         if (printed_first_header) {
522                                                 playlist += "#EXT-X-DISCONTINUITY\r\n";
523                                         }
524                                         snprintf(buf, sizeof(buf), "#EXT-X-MAP:URI=\"%s?frag=header\"\r\n", url.c_str());
525                                         playlist += buf;
526                                 }
527
528                                 // Even if we didn't find anything, we don't want to search again for each fragment.
529                                 printed_first_header = true;
530                                 printed_header_for_this_group = true;
531                         }
532
533                         if (fragments[i + 1].begins_header) {
534                                 // Since we only have start pts for each block and not duration,
535                                 // we have no idea how long this fragment is; the encoder restarted
536                                 // before it got to output the next pts. However, it's likely
537                                 // to be very short, so instead of trying to guess, we just skip it.
538                                 continue;
539                         }
540
541                         snprintf(buf, sizeof(buf), "#EXTINF:%f,\r\n%s?frag=%" PRIu64 "-%" PRIu64 "\r\n",
542                                 fragments[i + 1].pts - fragments[i].pts,
543                                 url.c_str(),
544                                 fragments[i].byte_position,
545                                 fragments[i + 1].byte_position);
546                         playlist += buf;
547                 }
548         }
549
550         string response;
551         if (http_11) {
552                 response = "HTTP/1.1 200 OK\r\n";
553                 if (close_after_response) {
554                         response.append("Connection: close\r\n");
555                 }
556         } else {
557                 assert(close_after_response);
558                 response = "HTTP/1.0 200 OK\r\n";
559         }
560         snprintf(buf, sizeof(buf), "Content-Length: %zu\r\n", playlist.size());
561         response.append(buf);
562         response.append("Content-Type: application/x-mpegURL\r\n");
563         if (!allow_origin.empty()) {
564                 response.append("Access-Control-Allow-Origin: ");
565                 response.append(allow_origin);
566                 response.append("\r\n");
567         }
568         response.append("\r\n");
569         response.append(move(playlist));
570
571         return shared_ptr<const string>(new string(move(response)));
572 }