]> git.sesse.net Git - cubemap/blob - stream.cpp
4be673c864410e9ddfda17675e9d9137b672da71
[cubemap] / stream.cpp
1 #include <assert.h>
2 #include <errno.h>
3 #include <netinet/in.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <unistd.h>
7 #include <string>
8 #include <vector>
9
10 #include "log.h"
11 #include "metacube.h"
12 #include "state.pb.h"
13 #include "stream.h"
14 #include "util.h"
15
16 using namespace std;
17
18 Stream::Stream(const string &stream_id, size_t backlog_size, Encoding encoding)
19         : stream_id(stream_id),
20           encoding(encoding),
21           data_fd(make_tempfile("")),
22           backlog_size(backlog_size),
23           bytes_received(0),
24           mark_pool(NULL)
25 {
26         if (data_fd == -1) {
27                 exit(1);
28         }
29 }
30
31 Stream::~Stream()
32 {
33         if (data_fd != -1) {
34                 safe_close(data_fd);
35         }
36 }
37
38 Stream::Stream(const StreamProto &serialized, int data_fd)
39         : stream_id(serialized.stream_id()),
40           http_header(serialized.http_header()),
41           stream_header(serialized.stream_header()),
42           encoding(Stream::STREAM_ENCODING_RAW),  // Will be changed later.
43           data_fd(data_fd),
44           backlog_size(serialized.backlog_size()),
45           bytes_received(serialized.bytes_received()),
46           mark_pool(NULL)
47 {
48         if (data_fd == -1) {
49                 exit(1);
50         }
51
52         // Split old-style headers into HTTP and video headers.
53         if (!serialized.header().empty()) {
54                 string header = serialized.header();
55                 size_t split = header.find("\r\n\r\n");
56                 if (split == string::npos) {
57                         http_header = header;
58                         stream_header = "";
59                 } else {
60                         http_header = header.substr(0, split + 2);  // Split off the second \r\n.
61                         stream_header = header.substr(split, string::npos);
62                 }
63         }
64 }
65
66 StreamProto Stream::serialize()
67 {
68         StreamProto serialized;
69         serialized.set_http_header(http_header);
70         serialized.set_stream_header(stream_header);
71         serialized.add_data_fds(data_fd);
72         serialized.set_backlog_size(backlog_size);
73         serialized.set_bytes_received(bytes_received);
74         serialized.set_stream_id(stream_id);
75         data_fd = -1;
76         return serialized;
77 }
78         
79 void Stream::set_backlog_size(size_t new_size)
80 {
81         if (backlog_size == new_size) {
82                 return;
83         }
84
85         string existing_data;
86         if (!read_tempfile_and_close(data_fd, &existing_data)) {
87                 exit(1);
88         }
89
90         // Unwrap the data so it's no longer circular.
91         if (bytes_received <= backlog_size) {
92                 existing_data.resize(bytes_received);
93         } else {
94                 size_t pos = bytes_received % backlog_size;
95                 existing_data = existing_data.substr(pos, string::npos) +
96                         existing_data.substr(0, pos);
97         }
98
99         // See if we need to discard data.
100         if (new_size < existing_data.size()) {
101                 size_t to_discard = existing_data.size() - new_size;
102                 existing_data = existing_data.substr(to_discard, string::npos);
103         }
104
105         // Create a new, empty data file.
106         data_fd = make_tempfile("");
107         if (data_fd == -1) {
108                 exit(1);
109         }
110         backlog_size = new_size;
111
112         // Now cheat a bit by rewinding, and adding all the old data back.
113         bytes_received -= existing_data.size();
114         add_data_raw(existing_data.data(), existing_data.size());
115 }
116
117 void Stream::put_client_to_sleep(Client *client)
118 {
119         sleeping_clients.push_back(client);
120 }
121
122 void Stream::add_data_raw(const char *data, ssize_t bytes)
123 {
124         size_t pos = bytes_received % backlog_size;
125         bytes_received += bytes;
126
127         if (pos + bytes > backlog_size) {
128                 ssize_t to_copy = backlog_size - pos;
129                 while (to_copy > 0) {
130                         int ret = pwrite(data_fd, data, to_copy, pos);
131                         if (ret == -1 && errno == EINTR) {
132                                 continue;
133                         }
134                         if (ret == -1) {
135                                 log_perror("pwrite");
136                                 // Dazed and confused, but trying to continue...
137                                 break;
138                         }
139                         pos += ret;
140                         data += ret;
141                         to_copy -= ret;
142                         bytes -= ret;
143                 }
144                 pos = 0;
145         }
146
147         while (bytes > 0) {
148                 int ret = pwrite(data_fd, data, bytes, pos);
149                 if (ret == -1 && errno == EINTR) {
150                         continue;
151                 }
152                 if (ret == -1) {
153                         log_perror("pwrite");
154                         // Dazed and confused, but trying to continue...
155                         break;
156                 }
157                 pos += ret;
158                 data += ret;
159                 bytes -= ret;
160         }
161 }
162
163 void Stream::add_data_deferred(const char *data, size_t bytes)
164 {
165         if (encoding == Stream::STREAM_ENCODING_RAW) {
166                 queued_data.append(string(data, data + bytes));
167         } else if (encoding == STREAM_ENCODING_METACUBE) {
168                 metacube_block_header hdr;
169                 memcpy(hdr.sync, METACUBE_SYNC, sizeof(hdr.sync));
170                 hdr.size = htonl(bytes);
171                 hdr.flags = htonl(0);
172
173                 char *block = new char[bytes + sizeof(hdr)];
174                 memcpy(block, &hdr, sizeof(hdr));
175                 memcpy(block + sizeof(hdr), data, bytes);
176                 queued_data.append(string(block, block + bytes + sizeof(hdr)));
177                 delete[] block;
178         } else {
179                 assert(false);
180         }
181 }
182
183 void Stream::process_queued_data()
184 {
185         if (queued_data.empty()) {
186                 return;
187         }
188
189         add_data_raw(queued_data.data(), queued_data.size());
190         queued_data.clear();
191
192         // We have more data, so wake up all clients.
193         if (to_process.empty()) {
194                 swap(sleeping_clients, to_process);
195         } else {
196                 to_process.insert(to_process.end(), sleeping_clients.begin(), sleeping_clients.end());
197                 sleeping_clients.clear();
198         }
199 }