]> git.sesse.net Git - cubemap/blob - stream.cpp
Use C++11 std::mutex and std::lock_guard instead of our RAII wrapper.
[cubemap] / stream.cpp
1 #include <assert.h>
2 #include <errno.h>
3 #include <limits.h>
4 #include <netinet/in.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <sys/types.h>
9 #include <algorithm>
10 #include <string>
11 #include <queue>
12 #include <vector>
13
14 #include "log.h"
15 #include "metacube2.h"
16 #include "state.pb.h"
17 #include "stream.h"
18 #include "util.h"
19
20 using namespace std;
21
22 Stream::Stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Encoding encoding, Encoding src_encoding)
23         : url(url),
24           encoding(encoding),
25           src_encoding(src_encoding),
26           data_fd(make_tempfile("")),
27           backlog_size(backlog_size),
28           prebuffering_bytes(prebuffering_bytes)
29 {
30         if (data_fd == -1) {
31                 exit(1);
32         }
33 }
34
35 Stream::~Stream()
36 {
37         if (data_fd != -1) {
38                 safe_close(data_fd);
39         }
40 }
41
42 Stream::Stream(const StreamProto &serialized, int data_fd)
43         : url(serialized.url()),
44           http_header(serialized.http_header()),
45           stream_header(serialized.stream_header()),
46           encoding(Stream::STREAM_ENCODING_RAW),  // Will be changed later.
47           data_fd(data_fd),
48           backlog_size(serialized.backlog_size()),
49           prebuffering_bytes(serialized.prebuffering_bytes()),
50           bytes_received(serialized.bytes_received())
51 {
52         if (data_fd == -1) {
53                 exit(1);
54         }
55
56         for (ssize_t point : serialized.suitable_starting_point()) {
57                 if (point == -1) {
58                         // Can happen when upgrading from before 1.1.3,
59                         // where this was an optional field with -1 signifying
60                         // "no such point".
61                         continue;
62                 }
63                 suitable_starting_points.push_back(point);
64         }
65 }
66
67 StreamProto Stream::serialize()
68 {
69         StreamProto serialized;
70         serialized.set_http_header(http_header);
71         serialized.set_stream_header(stream_header);
72         serialized.add_data_fds(data_fd);
73         serialized.set_backlog_size(backlog_size);
74         serialized.set_prebuffering_bytes(prebuffering_bytes);
75         serialized.set_bytes_received(bytes_received);
76         for (size_t point : suitable_starting_points) {
77                 serialized.add_suitable_starting_point(point);
78         }
79         serialized.set_url(url);
80         data_fd = -1;
81         return serialized;
82 }
83         
84 void Stream::set_backlog_size(size_t new_size)
85 {
86         if (backlog_size == new_size) {
87                 return;
88         }
89
90         string existing_data;
91         if (!read_tempfile_and_close(data_fd, &existing_data)) {
92                 exit(1);
93         }
94
95         // Unwrap the data so it's no longer circular.
96         if (bytes_received <= backlog_size) {
97                 existing_data.resize(bytes_received);
98         } else {
99                 size_t pos = bytes_received % backlog_size;
100                 existing_data = existing_data.substr(pos, string::npos) +
101                         existing_data.substr(0, pos);
102         }
103
104         // See if we need to discard data.
105         if (new_size < existing_data.size()) {
106                 size_t to_discard = existing_data.size() - new_size;
107                 existing_data = existing_data.substr(to_discard, string::npos);
108         }
109
110         // Create a new, empty data file.
111         data_fd = make_tempfile("");
112         if (data_fd == -1) {
113                 exit(1);
114         }
115         backlog_size = new_size;
116
117         // Now cheat a bit by rewinding, and adding all the old data back.
118         bytes_received -= existing_data.size();
119         DataElement data_element;
120         data_element.data.iov_base = const_cast<char *>(existing_data.data());
121         data_element.data.iov_len = existing_data.size();
122         data_element.metacube_flags = 0;  // Ignored by add_data_raw().
123
124         vector<DataElement> data_elements;
125         data_elements.push_back(data_element);
126         add_data_raw(data_elements);
127         remove_obsolete_starting_points();
128 }
129
130 void Stream::put_client_to_sleep(Client *client)
131 {
132         sleeping_clients.push_back(client);
133 }
134
135 // Return a new set of iovecs that contains only the first <bytes_wanted> bytes of <data>.
136 vector<iovec> collect_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
137 {
138         vector<iovec> ret;
139         size_t max_iovecs = min<size_t>(data.size(), IOV_MAX);
140         for (size_t i = 0; i < max_iovecs && bytes_wanted > 0; ++i) {
141                 if (data[i].data.iov_len <= bytes_wanted) {
142                         // Consume the entire iovec.
143                         ret.push_back(data[i].data);
144                         bytes_wanted -= data[i].data.iov_len;
145                 } else {
146                         // Take only parts of this iovec.
147                         iovec iov;
148                         iov.iov_base = data[i].data.iov_base;
149                         iov.iov_len = bytes_wanted;
150                         ret.push_back(iov);
151                         bytes_wanted = 0;
152                 }
153         }
154         return ret;
155 }
156
157 // Return a new set of iovecs that contains all of <data> except the first <bytes_wanted> bytes.
158 vector<Stream::DataElement> remove_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
159 {
160         vector<Stream::DataElement> ret;
161         size_t i;
162         for (i = 0; i < data.size() && bytes_wanted > 0; ++i) {
163                 if (data[i].data.iov_len <= bytes_wanted) {
164                         // Consume the entire iovec.
165                         bytes_wanted -= data[i].data.iov_len;
166                 } else {
167                         // Take only parts of this iovec.
168                         Stream::DataElement data_element;
169                         data_element.data.iov_base = reinterpret_cast<char *>(data[i].data.iov_base) + bytes_wanted;
170                         data_element.data.iov_len = data[i].data.iov_len - bytes_wanted;
171                         data_element.metacube_flags = METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START;
172                         ret.push_back(data_element);
173                         bytes_wanted = 0;
174                 }
175         }
176
177         // Add the rest of the iovecs unchanged.
178         ret.insert(ret.end(), data.begin() + i, data.end());
179         return ret;
180 }
181
182 void Stream::add_data_raw(const vector<DataElement> &orig_data)
183 {
184         vector<DataElement> data = orig_data;
185         while (!data.empty()) {
186                 size_t pos = bytes_received % backlog_size;
187
188                 // Collect as many iovecs as we can before we hit the point
189                 // where the circular buffer wraps around.
190                 vector<iovec> to_write = collect_iovecs(data, backlog_size - pos);
191                 ssize_t ret;
192                 do {
193                         ret = pwritev(data_fd, to_write.data(), to_write.size(), pos);
194                 } while (ret == -1 && errno == EINTR);
195
196                 if (ret == -1) {
197                         log_perror("pwritev");
198                         // Dazed and confused, but trying to continue...
199                         return;
200                 }
201                 bytes_received += ret;
202
203                 // Remove the data that was actually written from the set of iovecs.
204                 data = remove_iovecs(data, ret);
205         }
206 }
207
208 void Stream::remove_obsolete_starting_points()
209 {
210         // We could do a binary search here (std::lower_bound), but it seems
211         // overkill for removing what's probably only a few points.
212         while (!suitable_starting_points.empty() &&
213                bytes_received - suitable_starting_points[0] > backlog_size) {
214                 suitable_starting_points.pop_front();
215         }
216 }
217
218 void Stream::add_data_deferred(const char *data, size_t bytes, uint16_t metacube_flags)
219 {
220         // For regular output, we don't want to send the client twice
221         // (it's already sent out together with the HTTP header).
222         // However, for Metacube output, we need to send it so that
223         // the Cubemap instance in the other end has a chance to update it.
224         // It may come twice in its stream, but Cubemap doesn't care.
225         if (encoding == Stream::STREAM_ENCODING_RAW &&
226             (metacube_flags & METACUBE_FLAGS_HEADER) != 0) {
227                 return;
228         }
229
230         lock_guard<mutex> lock(queued_data_mutex);
231
232         DataElement data_element;
233         data_element.metacube_flags = metacube_flags;
234
235         if (encoding == Stream::STREAM_ENCODING_METACUBE) {
236                 // Add a Metacube block header before the data.
237                 metacube2_block_header hdr;
238                 memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync));
239                 hdr.size = htonl(bytes);
240                 hdr.flags = htons(metacube_flags);
241                 hdr.csum = htons(metacube2_compute_crc(&hdr));
242
243                 data_element.data.iov_base = new char[bytes + sizeof(hdr)];
244                 data_element.data.iov_len = bytes + sizeof(hdr);
245
246                 memcpy(data_element.data.iov_base, &hdr, sizeof(hdr));
247                 memcpy(reinterpret_cast<char *>(data_element.data.iov_base) + sizeof(hdr), data, bytes);
248
249                 queued_data.push_back(data_element);
250         } else if (encoding == Stream::STREAM_ENCODING_RAW) {
251                 // Just add the data itself.
252                 data_element.data.iov_base = new char[bytes];
253                 memcpy(data_element.data.iov_base, data, bytes);
254                 data_element.data.iov_len = bytes;
255
256                 queued_data.push_back(data_element);
257         } else {
258                 assert(false);
259         }
260 }
261
262 void Stream::process_queued_data()
263 {
264         vector<DataElement> queued_data_copy;
265
266         // Hold the lock for as short as possible, since add_data_raw() can possibly
267         // write to disk, which might disturb the input thread.
268         {
269                 lock_guard<mutex> lock(queued_data_mutex);
270                 if (queued_data.empty()) {
271                         return;
272                 }
273
274                 swap(queued_data, queued_data_copy);
275         }
276
277         // Add suitable starting points for the stream, if the queued data
278         // contains such starting points. Note that we drop starting points
279         // if they're less than 10 kB apart, so that we don't get a huge
280         // amount of them for e.g. each and every MPEG-TS 188-byte cell.
281         // The 10 kB value is somewhat arbitrary, but at least it should make
282         // the RAM cost of saving the position ~0.1% (or less) of the actual
283         // data, and 10 kB is a very fine granularity in most streams.
284         static const int minimum_start_point_distance = 10240;
285         size_t byte_position = bytes_received;
286         for (const DataElement &elem : queued_data_copy) {
287                 if ((elem.metacube_flags & METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START) == 0) {
288                         size_t num_points = suitable_starting_points.size();
289                         if (num_points >= 2 &&
290                             suitable_starting_points[num_points - 1] - suitable_starting_points[num_points - 2] < minimum_start_point_distance) {
291                                 // p[n-1] - p[n-2] < 10 kB, so drop p[n-1].
292                                 suitable_starting_points.pop_back();
293                         }
294                         suitable_starting_points.push_back(byte_position);
295                 }
296                 byte_position += elem.data.iov_len;
297         }
298
299         add_data_raw(queued_data_copy);
300         remove_obsolete_starting_points();
301         for (const DataElement &elem : queued_data_copy) {
302                 char *data = reinterpret_cast<char *>(elem.data.iov_base);
303                 delete[] data;
304         }
305
306         // We have more data, so wake up all clients.
307         if (to_process.empty()) {
308                 swap(sleeping_clients, to_process);
309         } else {
310                 to_process.insert(to_process.end(), sleeping_clients.begin(), sleeping_clients.end());
311                 sleeping_clients.clear();
312         }
313 }