]> git.sesse.net Git - cubemap/blob - stream.cpp
4840b06b070b84eaf20e70053eff4c38efb86240
[cubemap] / stream.cpp
1 #include <assert.h>
2 #include <errno.h>
3 #include <netinet/in.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <sys/types.h>
8 #include <algorithm>
9 #include <string>
10 #include <vector>
11
12 #include "log.h"
13 #include "metacube2.h"
14 #include "state.pb.h"
15 #include "stream.h"
16 #include "util.h"
17
18 using namespace std;
19
20 Stream::Stream(const string &url, size_t backlog_size, Encoding encoding)
21         : url(url),
22           encoding(encoding),
23           data_fd(make_tempfile("")),
24           backlog_size(backlog_size),
25           bytes_received(0),
26           last_suitable_starting_point(-1),
27           mark_pool(NULL),
28           queued_data_last_starting_point(-1)
29 {
30         if (data_fd == -1) {
31                 exit(1);
32         }
33 }
34
35 Stream::~Stream()
36 {
37         if (data_fd != -1) {
38                 safe_close(data_fd);
39         }
40 }
41
42 Stream::Stream(const StreamProto &serialized, int data_fd)
43         : url(serialized.url()),
44           http_header(serialized.http_header()),
45           stream_header(serialized.stream_header()),
46           encoding(Stream::STREAM_ENCODING_RAW),  // Will be changed later.
47           data_fd(data_fd),
48           backlog_size(serialized.backlog_size()),
49           bytes_received(serialized.bytes_received()),
50           mark_pool(NULL),
51           queued_data_last_starting_point(-1)
52 {
53         if (data_fd == -1) {
54                 exit(1);
55         }
56
57         // Split old-style headers into HTTP and video headers.
58         if (!serialized.header().empty()) {
59                 string header = serialized.header();
60                 size_t split = header.find("\r\n\r\n");
61                 if (split == string::npos) {
62                         http_header = header;
63                         stream_header = "";
64                 } else {
65                         http_header = header.substr(0, split + 2);  // Split off the second \r\n.
66                         stream_header = header.substr(split, string::npos);
67                 }
68         }
69
70         // Older versions did not set last_suitable_starting_point.
71         if (serialized.has_last_suitable_starting_point()) {
72                 last_suitable_starting_point = serialized.last_suitable_starting_point();
73         } else {
74                 last_suitable_starting_point = bytes_received;
75         }
76 }
77
78 StreamProto Stream::serialize()
79 {
80         StreamProto serialized;
81         serialized.set_http_header(http_header);
82         serialized.set_stream_header(stream_header);
83         serialized.add_data_fds(data_fd);
84         serialized.set_backlog_size(backlog_size);
85         serialized.set_bytes_received(bytes_received);
86         serialized.set_last_suitable_starting_point(last_suitable_starting_point);
87         serialized.set_url(url);
88         data_fd = -1;
89         return serialized;
90 }
91         
92 void Stream::set_backlog_size(size_t new_size)
93 {
94         if (backlog_size == new_size) {
95                 return;
96         }
97
98         string existing_data;
99         if (!read_tempfile_and_close(data_fd, &existing_data)) {
100                 exit(1);
101         }
102
103         // Unwrap the data so it's no longer circular.
104         if (bytes_received <= backlog_size) {
105                 existing_data.resize(bytes_received);
106         } else {
107                 size_t pos = bytes_received % backlog_size;
108                 existing_data = existing_data.substr(pos, string::npos) +
109                         existing_data.substr(0, pos);
110         }
111
112         // See if we need to discard data.
113         if (new_size < existing_data.size()) {
114                 size_t to_discard = existing_data.size() - new_size;
115                 existing_data = existing_data.substr(to_discard, string::npos);
116         }
117
118         // Create a new, empty data file.
119         data_fd = make_tempfile("");
120         if (data_fd == -1) {
121                 exit(1);
122         }
123         backlog_size = new_size;
124
125         // Now cheat a bit by rewinding, and adding all the old data back.
126         bytes_received -= existing_data.size();
127         iovec iov;
128         iov.iov_base = const_cast<char *>(existing_data.data());
129         iov.iov_len = existing_data.size();
130
131         vector<iovec> iovs;
132         iovs.push_back(iov);
133         add_data_raw(iovs);
134 }
135
136 void Stream::put_client_to_sleep(Client *client)
137 {
138         sleeping_clients.push_back(client);
139 }
140
141 // Return a new set of iovecs that contains only the first <bytes_wanted> bytes of <data>.
142 vector<iovec> collect_iovecs(const vector<iovec> &data, size_t bytes_wanted)
143 {
144         vector<iovec> ret;
145         size_t max_iovecs = std::min<size_t>(data.size(), IOV_MAX);
146         for (size_t i = 0; i < max_iovecs && bytes_wanted > 0; ++i) {
147                 if (data[i].iov_len <= bytes_wanted) {
148                         // Consume the entire iovec.
149                         ret.push_back(data[i]);
150                         bytes_wanted -= data[i].iov_len;
151                 } else {
152                         // Take only parts of this iovec.
153                         iovec iov;
154                         iov.iov_base = data[i].iov_base;
155                         iov.iov_len = bytes_wanted;     
156                         ret.push_back(iov);
157                         bytes_wanted = 0;
158                 }
159         }
160         return ret;
161 }
162
163 // Return a new set of iovecs that contains all of <data> except the first <bytes_wanted> bytes.
164 vector<iovec> remove_iovecs(const vector<iovec> &data, size_t bytes_wanted)
165 {
166         vector<iovec> ret;
167         size_t i;
168         for (i = 0; i < data.size() && bytes_wanted > 0; ++i) {
169                 if (data[i].iov_len <= bytes_wanted) {
170                         // Consume the entire iovec.
171                         bytes_wanted -= data[i].iov_len;
172                 } else {
173                         // Take only parts of this iovec.
174                         iovec iov;
175                         iov.iov_base = reinterpret_cast<char *>(data[i].iov_base) + bytes_wanted;
176                         iov.iov_len = data[i].iov_len - bytes_wanted;
177                         ret.push_back(iov);
178                         bytes_wanted = 0;
179                 }
180         }
181
182         // Add the rest of the iovecs unchanged.
183         ret.insert(ret.end(), data.begin() + i, data.end());
184         return ret;
185 }
186
187 void Stream::add_data_raw(const vector<iovec> &orig_data)
188 {
189         vector<iovec> data = orig_data;
190         while (!data.empty()) {
191                 size_t pos = bytes_received % backlog_size;
192
193                 // Collect as many iovecs as we can before we hit the point
194                 // where the circular buffer wraps around.
195                 vector<iovec> to_write = collect_iovecs(data, backlog_size - pos);
196                 ssize_t ret;
197                 do {
198                         ret = pwritev(data_fd, to_write.data(), to_write.size(), pos);
199                 } while (ret == -1 && errno == EINTR);
200
201                 if (ret == -1) {
202                         log_perror("pwritev");
203                         // Dazed and confused, but trying to continue...
204                         return;
205                 }
206                 bytes_received += ret;
207
208                 // Remove the data that was actually written from the set of iovecs.
209                 data = remove_iovecs(data, ret);
210         }
211 }
212
213 void Stream::add_data_deferred(const char *data, size_t bytes, StreamStartSuitability suitable_for_stream_start)
214 {
215         assert(suitable_for_stream_start == SUITABLE_FOR_STREAM_START ||
216                suitable_for_stream_start == NOT_SUITABLE_FOR_STREAM_START);
217         if (suitable_for_stream_start == SUITABLE_FOR_STREAM_START) {
218                 queued_data_last_starting_point = queued_data.size();
219         }
220
221         if (encoding == Stream::STREAM_ENCODING_METACUBE) {
222                 // Add a Metacube block header before the data.
223                 metacube2_block_header hdr;
224                 memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync));
225                 hdr.size = htonl(bytes);
226                 hdr.flags = htons(0);
227                 if (suitable_for_stream_start == NOT_SUITABLE_FOR_STREAM_START) {
228                         hdr.flags |= htons(METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START);
229                 }
230                 hdr.csum = htons(metacube2_compute_crc(&hdr));
231
232                 iovec iov;
233                 iov.iov_base = new char[bytes + sizeof(hdr)];
234                 iov.iov_len = bytes + sizeof(hdr);
235
236                 memcpy(iov.iov_base, &hdr, sizeof(hdr));
237                 memcpy(reinterpret_cast<char *>(iov.iov_base) + sizeof(hdr), data, bytes);
238
239                 queued_data.push_back(iov);
240         } else if (encoding == Stream::STREAM_ENCODING_RAW) {
241                 // Just add the data itself.
242                 iovec iov;
243                 iov.iov_base = new char[bytes];
244                 memcpy(iov.iov_base, data, bytes);
245                 iov.iov_len = bytes;
246
247                 queued_data.push_back(iov);
248         } else {
249                 assert(false);
250         }
251 }
252
253 void Stream::process_queued_data()
254 {
255         if (queued_data.empty()) {
256                 return;
257         }
258
259         // Update the last suitable starting point for the stream,
260         // if the queued data contains such a starting point.
261         assert(queued_data_last_starting_point < ssize_t(queued_data.size()));
262         if (queued_data_last_starting_point >= 0) {
263                 last_suitable_starting_point = bytes_received;
264                 for (int i = 0; i < queued_data_last_starting_point; ++i) {
265                         last_suitable_starting_point += queued_data[i].iov_len;
266                 }
267         }
268
269         add_data_raw(queued_data);
270         for (size_t i = 0; i < queued_data.size(); ++i) {
271                 char *data = reinterpret_cast<char *>(queued_data[i].iov_base);
272                 delete[] data;
273         }
274         queued_data.clear();
275         queued_data_last_starting_point = -1;
276
277         // We have more data, so wake up all clients.
278         if (to_process.empty()) {
279                 swap(sleeping_clients, to_process);
280         } else {
281                 to_process.insert(to_process.end(), sleeping_clients.begin(), sleeping_clients.end());
282                 sleeping_clients.clear();
283         }
284 }