]> git.sesse.net Git - cubemap/blob - stream.cpp
Open up for C++11.
[cubemap] / stream.cpp
1 #include <assert.h>
2 #include <errno.h>
3 #include <limits.h>
4 #include <netinet/in.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <sys/types.h>
9 #include <algorithm>
10 #include <string>
11 #include <queue>
12 #include <vector>
13
14 #include "log.h"
15 #include "metacube2.h"
16 #include "mutexlock.h"
17 #include "state.pb.h"
18 #include "stream.h"
19 #include "util.h"
20
21 using namespace std;
22
23 Stream::Stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Encoding encoding, Encoding src_encoding)
24         : url(url),
25           encoding(encoding),
26           src_encoding(src_encoding),
27           data_fd(make_tempfile("")),
28           backlog_size(backlog_size),
29           prebuffering_bytes(prebuffering_bytes),
30           bytes_received(0),
31           pacing_rate(~0U)
32 {
33         if (data_fd == -1) {
34                 exit(1);
35         }
36
37         pthread_mutex_init(&queued_data_mutex, NULL);
38 }
39
40 Stream::~Stream()
41 {
42         if (data_fd != -1) {
43                 safe_close(data_fd);
44         }
45 }
46
47 Stream::Stream(const StreamProto &serialized, int data_fd)
48         : url(serialized.url()),
49           http_header(serialized.http_header()),
50           stream_header(serialized.stream_header()),
51           encoding(Stream::STREAM_ENCODING_RAW),  // Will be changed later.
52           data_fd(data_fd),
53           backlog_size(serialized.backlog_size()),
54           prebuffering_bytes(serialized.prebuffering_bytes()),
55           bytes_received(serialized.bytes_received()),
56           pacing_rate(~0U)
57 {
58         if (data_fd == -1) {
59                 exit(1);
60         }
61
62         for (int i = 0; i < serialized.suitable_starting_point_size(); ++i) {
63                 ssize_t point = serialized.suitable_starting_point(i);
64                 if (point == -1) {
65                         // Can happen when upgrading from before 1.1.3,
66                         // where this was an optional field with -1 signifying
67                         // "no such point".
68                         continue;
69                 }
70                 suitable_starting_points.push_back(point);
71         }
72
73         pthread_mutex_init(&queued_data_mutex, NULL);
74 }
75
76 StreamProto Stream::serialize()
77 {
78         StreamProto serialized;
79         serialized.set_http_header(http_header);
80         serialized.set_stream_header(stream_header);
81         serialized.add_data_fds(data_fd);
82         serialized.set_backlog_size(backlog_size);
83         serialized.set_prebuffering_bytes(prebuffering_bytes);
84         serialized.set_bytes_received(bytes_received);
85         for (size_t i = 0; i < suitable_starting_points.size(); ++i) {
86                 serialized.add_suitable_starting_point(suitable_starting_points[i]);
87         }
88         serialized.set_url(url);
89         data_fd = -1;
90         return serialized;
91 }
92         
93 void Stream::set_backlog_size(size_t new_size)
94 {
95         if (backlog_size == new_size) {
96                 return;
97         }
98
99         string existing_data;
100         if (!read_tempfile_and_close(data_fd, &existing_data)) {
101                 exit(1);
102         }
103
104         // Unwrap the data so it's no longer circular.
105         if (bytes_received <= backlog_size) {
106                 existing_data.resize(bytes_received);
107         } else {
108                 size_t pos = bytes_received % backlog_size;
109                 existing_data = existing_data.substr(pos, string::npos) +
110                         existing_data.substr(0, pos);
111         }
112
113         // See if we need to discard data.
114         if (new_size < existing_data.size()) {
115                 size_t to_discard = existing_data.size() - new_size;
116                 existing_data = existing_data.substr(to_discard, string::npos);
117         }
118
119         // Create a new, empty data file.
120         data_fd = make_tempfile("");
121         if (data_fd == -1) {
122                 exit(1);
123         }
124         backlog_size = new_size;
125
126         // Now cheat a bit by rewinding, and adding all the old data back.
127         bytes_received -= existing_data.size();
128         DataElement data_element;
129         data_element.data.iov_base = const_cast<char *>(existing_data.data());
130         data_element.data.iov_len = existing_data.size();
131         data_element.metacube_flags = 0;  // Ignored by add_data_raw().
132
133         vector<DataElement> data_elements;
134         data_elements.push_back(data_element);
135         add_data_raw(data_elements);
136         remove_obsolete_starting_points();
137 }
138
139 void Stream::put_client_to_sleep(Client *client)
140 {
141         sleeping_clients.push_back(client);
142 }
143
144 // Return a new set of iovecs that contains only the first <bytes_wanted> bytes of <data>.
145 vector<iovec> collect_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
146 {
147         vector<iovec> ret;
148         size_t max_iovecs = min<size_t>(data.size(), IOV_MAX);
149         for (size_t i = 0; i < max_iovecs && bytes_wanted > 0; ++i) {
150                 if (data[i].data.iov_len <= bytes_wanted) {
151                         // Consume the entire iovec.
152                         ret.push_back(data[i].data);
153                         bytes_wanted -= data[i].data.iov_len;
154                 } else {
155                         // Take only parts of this iovec.
156                         iovec iov;
157                         iov.iov_base = data[i].data.iov_base;
158                         iov.iov_len = bytes_wanted;
159                         ret.push_back(iov);
160                         bytes_wanted = 0;
161                 }
162         }
163         return ret;
164 }
165
166 // Return a new set of iovecs that contains all of <data> except the first <bytes_wanted> bytes.
167 vector<Stream::DataElement> remove_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
168 {
169         vector<Stream::DataElement> ret;
170         size_t i;
171         for (i = 0; i < data.size() && bytes_wanted > 0; ++i) {
172                 if (data[i].data.iov_len <= bytes_wanted) {
173                         // Consume the entire iovec.
174                         bytes_wanted -= data[i].data.iov_len;
175                 } else {
176                         // Take only parts of this iovec.
177                         Stream::DataElement data_element;
178                         data_element.data.iov_base = reinterpret_cast<char *>(data[i].data.iov_base) + bytes_wanted;
179                         data_element.data.iov_len = data[i].data.iov_len - bytes_wanted;
180                         data_element.metacube_flags = METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START;
181                         ret.push_back(data_element);
182                         bytes_wanted = 0;
183                 }
184         }
185
186         // Add the rest of the iovecs unchanged.
187         ret.insert(ret.end(), data.begin() + i, data.end());
188         return ret;
189 }
190
191 void Stream::add_data_raw(const vector<DataElement> &orig_data)
192 {
193         vector<DataElement> data = orig_data;
194         while (!data.empty()) {
195                 size_t pos = bytes_received % backlog_size;
196
197                 // Collect as many iovecs as we can before we hit the point
198                 // where the circular buffer wraps around.
199                 vector<iovec> to_write = collect_iovecs(data, backlog_size - pos);
200                 ssize_t ret;
201                 do {
202                         ret = pwritev(data_fd, to_write.data(), to_write.size(), pos);
203                 } while (ret == -1 && errno == EINTR);
204
205                 if (ret == -1) {
206                         log_perror("pwritev");
207                         // Dazed and confused, but trying to continue...
208                         return;
209                 }
210                 bytes_received += ret;
211
212                 // Remove the data that was actually written from the set of iovecs.
213                 data = remove_iovecs(data, ret);
214         }
215 }
216
217 void Stream::remove_obsolete_starting_points()
218 {
219         // We could do a binary search here (std::lower_bound), but it seems
220         // overkill for removing what's probably only a few points.
221         while (!suitable_starting_points.empty() &&
222                bytes_received - suitable_starting_points[0] > backlog_size) {
223                 suitable_starting_points.pop_front();
224         }
225 }
226
227 void Stream::add_data_deferred(const char *data, size_t bytes, uint16_t metacube_flags)
228 {
229         // For regular output, we don't want to send the client twice
230         // (it's already sent out together with the HTTP header).
231         // However, for Metacube output, we need to send it so that
232         // the Cubemap instance in the other end has a chance to update it.
233         // It may come twice in its stream, but Cubemap doesn't care.
234         if (encoding == Stream::STREAM_ENCODING_RAW &&
235             (metacube_flags & METACUBE_FLAGS_HEADER) != 0) {
236                 return;
237         }
238
239         MutexLock lock(&queued_data_mutex);
240
241         DataElement data_element;
242         data_element.metacube_flags = metacube_flags;
243
244         if (encoding == Stream::STREAM_ENCODING_METACUBE) {
245                 // Add a Metacube block header before the data.
246                 metacube2_block_header hdr;
247                 memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync));
248                 hdr.size = htonl(bytes);
249                 hdr.flags = htons(metacube_flags);
250                 hdr.csum = htons(metacube2_compute_crc(&hdr));
251
252                 data_element.data.iov_base = new char[bytes + sizeof(hdr)];
253                 data_element.data.iov_len = bytes + sizeof(hdr);
254
255                 memcpy(data_element.data.iov_base, &hdr, sizeof(hdr));
256                 memcpy(reinterpret_cast<char *>(data_element.data.iov_base) + sizeof(hdr), data, bytes);
257
258                 queued_data.push_back(data_element);
259         } else if (encoding == Stream::STREAM_ENCODING_RAW) {
260                 // Just add the data itself.
261                 data_element.data.iov_base = new char[bytes];
262                 memcpy(data_element.data.iov_base, data, bytes);
263                 data_element.data.iov_len = bytes;
264
265                 queued_data.push_back(data_element);
266         } else {
267                 assert(false);
268         }
269 }
270
271 void Stream::process_queued_data()
272 {
273         vector<DataElement> queued_data_copy;
274
275         // Hold the lock for as short as possible, since add_data_raw() can possibly
276         // write to disk, which might disturb the input thread.
277         {
278                 MutexLock lock(&queued_data_mutex);
279                 if (queued_data.empty()) {
280                         return;
281                 }
282
283                 swap(queued_data, queued_data_copy);
284         }
285
286         // Add suitable starting points for the stream, if the queued data
287         // contains such starting points. Note that we drop starting points
288         // if they're less than 10 kB apart, so that we don't get a huge
289         // amount of them for e.g. each and every MPEG-TS 188-byte cell.
290         // The 10 kB value is somewhat arbitrary, but at least it should make
291         // the RAM cost of saving the position ~0.1% (or less) of the actual
292         // data, and 10 kB is a very fine granularity in most streams.
293         static const int minimum_start_point_distance = 10240;
294         size_t byte_position = bytes_received;
295         for (size_t i = 0; i < queued_data_copy.size(); ++i) {
296                 if ((queued_data_copy[i].metacube_flags & METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START) == 0) {
297                         size_t num_points = suitable_starting_points.size();
298                         if (num_points >= 2 &&
299                             suitable_starting_points[num_points - 1] - suitable_starting_points[num_points - 2] < minimum_start_point_distance) {
300                                 // p[n-1] - p[n-2] < 10 kB, so drop p[n-1].
301                                 suitable_starting_points.pop_back();
302                         }
303                         suitable_starting_points.push_back(byte_position);
304                 }
305                 byte_position += queued_data_copy[i].data.iov_len;
306         }
307
308         add_data_raw(queued_data_copy);
309         remove_obsolete_starting_points();
310         for (size_t i = 0; i < queued_data_copy.size(); ++i) {
311                 char *data = reinterpret_cast<char *>(queued_data_copy[i].data.iov_base);
312                 delete[] data;
313         }
314
315         // We have more data, so wake up all clients.
316         if (to_process.empty()) {
317                 swap(sleeping_clients, to_process);
318         } else {
319                 to_process.insert(to_process.end(), sleeping_clients.begin(), sleeping_clients.end());
320                 sleeping_clients.clear();
321         }
322 }