]> git.sesse.net Git - cubemap/blob - stream.cpp
Track stream start suitability separately for each data block added.
[cubemap] / stream.cpp
1 #include <assert.h>
2 #include <errno.h>
3 #include <limits.h>
4 #include <netinet/in.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <sys/types.h>
9 #include <algorithm>
10 #include <string>
11 #include <vector>
12
13 #include "log.h"
14 #include "metacube2.h"
15 #include "mutexlock.h"
16 #include "state.pb.h"
17 #include "stream.h"
18 #include "util.h"
19
20 using namespace std;
21
22 Stream::Stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Encoding encoding)
23         : url(url),
24           encoding(encoding),
25           data_fd(make_tempfile("")),
26           backlog_size(backlog_size),
27           prebuffering_bytes(prebuffering_bytes),
28           bytes_received(0),
29           last_suitable_starting_point(-1),
30           pacing_rate(~0U)
31 {
32         if (data_fd == -1) {
33                 exit(1);
34         }
35
36         pthread_mutex_init(&queued_data_mutex, NULL);
37 }
38
39 Stream::~Stream()
40 {
41         if (data_fd != -1) {
42                 safe_close(data_fd);
43         }
44 }
45
46 Stream::Stream(const StreamProto &serialized, int data_fd)
47         : url(serialized.url()),
48           http_header(serialized.http_header()),
49           stream_header(serialized.stream_header()),
50           encoding(Stream::STREAM_ENCODING_RAW),  // Will be changed later.
51           data_fd(data_fd),
52           backlog_size(serialized.backlog_size()),
53           prebuffering_bytes(serialized.prebuffering_bytes()),
54           bytes_received(serialized.bytes_received()),
55           pacing_rate(~0U)
56 {
57         if (data_fd == -1) {
58                 exit(1);
59         }
60
61         assert(serialized.has_last_suitable_starting_point());
62         last_suitable_starting_point = serialized.last_suitable_starting_point();
63
64         pthread_mutex_init(&queued_data_mutex, NULL);
65 }
66
67 StreamProto Stream::serialize()
68 {
69         StreamProto serialized;
70         serialized.set_http_header(http_header);
71         serialized.set_stream_header(stream_header);
72         serialized.add_data_fds(data_fd);
73         serialized.set_backlog_size(backlog_size);
74         serialized.set_prebuffering_bytes(prebuffering_bytes);
75         serialized.set_bytes_received(bytes_received);
76         serialized.set_last_suitable_starting_point(last_suitable_starting_point);
77         serialized.set_url(url);
78         data_fd = -1;
79         return serialized;
80 }
81         
82 void Stream::set_backlog_size(size_t new_size)
83 {
84         if (backlog_size == new_size) {
85                 return;
86         }
87
88         string existing_data;
89         if (!read_tempfile_and_close(data_fd, &existing_data)) {
90                 exit(1);
91         }
92
93         // Unwrap the data so it's no longer circular.
94         if (bytes_received <= backlog_size) {
95                 existing_data.resize(bytes_received);
96         } else {
97                 size_t pos = bytes_received % backlog_size;
98                 existing_data = existing_data.substr(pos, string::npos) +
99                         existing_data.substr(0, pos);
100         }
101
102         // See if we need to discard data.
103         if (new_size < existing_data.size()) {
104                 size_t to_discard = existing_data.size() - new_size;
105                 existing_data = existing_data.substr(to_discard, string::npos);
106         }
107
108         // Create a new, empty data file.
109         data_fd = make_tempfile("");
110         if (data_fd == -1) {
111                 exit(1);
112         }
113         backlog_size = new_size;
114
115         // Now cheat a bit by rewinding, and adding all the old data back.
116         bytes_received -= existing_data.size();
117
118         size_t bytes_before_suitable_starting_point;
119         if (last_suitable_starting_point == -1) {
120                 bytes_before_suitable_starting_point = existing_data.size();
121         } else if (size_t(last_suitable_starting_point) < backlog_size) {
122                 bytes_before_suitable_starting_point = 0;
123         } else {
124                 bytes_before_suitable_starting_point = last_suitable_starting_point - backlog_size;
125         }
126
127         vector<DataElement> data_elements;
128         if (bytes_before_suitable_starting_point > 0) {
129                 // There's really no usable data here (except for ?backlog=1 users),
130                 // but we need to get the accounting right anyway.
131                 DataElement data_element;
132                 data_element.data.iov_base = const_cast<char *>(existing_data.data());
133                 data_element.data.iov_len = bytes_before_suitable_starting_point;
134                 data_element.suitable_for_stream_start = NOT_SUITABLE_FOR_STREAM_START;
135                 data_elements.push_back(data_element);
136         }
137         if (bytes_before_suitable_starting_point < existing_data.size()) {
138                 DataElement data_element;
139                 data_element.data.iov_base = const_cast<char *>(existing_data.data() + bytes_before_suitable_starting_point);
140                 data_element.data.iov_len = existing_data.size() - bytes_before_suitable_starting_point;
141                 data_element.suitable_for_stream_start = SUITABLE_FOR_STREAM_START;
142                 data_elements.push_back(data_element);
143         }
144
145         last_suitable_starting_point = -1;
146         add_data_raw(data_elements);
147 }
148
149 void Stream::put_client_to_sleep(Client *client)
150 {
151         sleeping_clients.push_back(client);
152 }
153
154 // Return a new set of iovecs that contains only the first <bytes_wanted> bytes of <data>.
155 vector<iovec> collect_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
156 {
157         vector<iovec> ret;
158         size_t max_iovecs = std::min<size_t>(data.size(), IOV_MAX);
159         for (size_t i = 0; i < max_iovecs && bytes_wanted > 0; ++i) {
160                 if (data[i].data.iov_len <= bytes_wanted) {
161                         // Consume the entire iovec.
162                         ret.push_back(data[i].data);
163                         bytes_wanted -= data[i].data.iov_len;
164                 } else {
165                         // Take only parts of this iovec.
166                         iovec iov;
167                         iov.iov_base = data[i].data.iov_base;
168                         iov.iov_len = bytes_wanted;
169                         ret.push_back(iov);
170                         bytes_wanted = 0;
171                 }
172         }
173         return ret;
174 }
175
176 // Return a new set of iovecs that contains all of <data> except the first <bytes_wanted> bytes.
177 vector<Stream::DataElement> remove_iovecs(const vector<Stream::DataElement> &data, size_t bytes_wanted)
178 {
179         vector<Stream::DataElement> ret;
180         size_t i;
181         for (i = 0; i < data.size() && bytes_wanted > 0; ++i) {
182                 if (data[i].data.iov_len <= bytes_wanted) {
183                         // Consume the entire iovec.
184                         bytes_wanted -= data[i].data.iov_len;
185                 } else {
186                         // Take only parts of this iovec.
187                         Stream::DataElement data_element;
188                         data_element.data.iov_base = reinterpret_cast<char *>(data[i].data.iov_base) + bytes_wanted;
189                         data_element.data.iov_len = data[i].data.iov_len - bytes_wanted;
190                         data_element.suitable_for_stream_start = NOT_SUITABLE_FOR_STREAM_START;
191                         ret.push_back(data_element);
192                         bytes_wanted = 0;
193                 }
194         }
195
196         // Add the rest of the iovecs unchanged.
197         ret.insert(ret.end(), data.begin() + i, data.end());
198         return ret;
199 }
200
201 void Stream::add_data_raw(const vector<DataElement> &orig_data)
202 {
203         vector<DataElement> data = orig_data;
204         while (!data.empty()) {
205                 size_t pos = bytes_received % backlog_size;
206
207                 // Collect as many iovecs as we can before we hit the point
208                 // where the circular buffer wraps around.
209                 vector<iovec> to_write = collect_iovecs(data, backlog_size - pos);
210                 ssize_t ret;
211                 do {
212                         ret = pwritev(data_fd, to_write.data(), to_write.size(), pos);
213                 } while (ret == -1 && errno == EINTR);
214
215                 if (ret == -1) {
216                         log_perror("pwritev");
217                         // Dazed and confused, but trying to continue...
218                         return;
219                 }
220                 bytes_received += ret;
221
222                 // Remove the data that was actually written from the set of iovecs.
223                 data = remove_iovecs(data, ret);
224         }
225 }
226
227 void Stream::add_data_deferred(const char *data, size_t bytes, StreamStartSuitability suitable_for_stream_start)
228 {
229         MutexLock lock(&queued_data_mutex);
230         assert(suitable_for_stream_start == SUITABLE_FOR_STREAM_START ||
231                suitable_for_stream_start == NOT_SUITABLE_FOR_STREAM_START);
232
233         DataElement data_element;
234         data_element.suitable_for_stream_start = suitable_for_stream_start;
235
236         if (encoding == Stream::STREAM_ENCODING_METACUBE) {
237                 // Add a Metacube block header before the data.
238                 metacube2_block_header hdr;
239                 memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync));
240                 hdr.size = htonl(bytes);
241                 hdr.flags = htons(0);
242                 if (suitable_for_stream_start == NOT_SUITABLE_FOR_STREAM_START) {
243                         hdr.flags |= htons(METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START);
244                 }
245                 hdr.csum = htons(metacube2_compute_crc(&hdr));
246
247                 data_element.data.iov_base = new char[bytes + sizeof(hdr)];
248                 data_element.data.iov_len = bytes + sizeof(hdr);
249
250                 memcpy(data_element.data.iov_base, &hdr, sizeof(hdr));
251                 memcpy(reinterpret_cast<char *>(data_element.data.iov_base) + sizeof(hdr), data, bytes);
252
253                 queued_data.push_back(data_element);
254         } else if (encoding == Stream::STREAM_ENCODING_RAW) {
255                 // Just add the data itself.
256                 data_element.data.iov_base = new char[bytes];
257                 memcpy(data_element.data.iov_base, data, bytes);
258                 data_element.data.iov_len = bytes;
259
260                 queued_data.push_back(data_element);
261         } else {
262                 assert(false);
263         }
264 }
265
266 void Stream::process_queued_data()
267 {
268         std::vector<DataElement> queued_data_copy;
269
270         // Hold the lock for as short as possible, since add_data_raw() can possibly
271         // write to disk, which might disturb the input thread.
272         {
273                 MutexLock lock(&queued_data_mutex);
274                 if (queued_data.empty()) {
275                         return;
276                 }
277
278                 swap(queued_data, queued_data_copy);
279         }
280
281         // Update the last suitable starting point for the stream,
282         // if the queued data contains such a starting point.
283         size_t byte_position = bytes_received;
284         for (size_t i = 0; i < queued_data_copy.size(); ++i) {
285                 if (queued_data_copy[i].suitable_for_stream_start == SUITABLE_FOR_STREAM_START) {
286                         last_suitable_starting_point = byte_position;
287                 }
288                 byte_position += queued_data_copy[i].data.iov_len;
289         }
290
291         add_data_raw(queued_data_copy);
292         for (size_t i = 0; i < queued_data_copy.size(); ++i) {
293                 char *data = reinterpret_cast<char *>(queued_data_copy[i].data.iov_base);
294                 delete[] data;
295         }
296
297         // We have more data, so wake up all clients.
298         if (to_process.empty()) {
299                 swap(sleeping_clients, to_process);
300         } else {
301                 to_process.insert(to_process.end(), sleeping_clients.begin(), sleeping_clients.end());
302                 sleeping_clients.clear();
303         }
304 }