]> git.sesse.net Git - cubemap/blob - server.cpp
f47de543e2b69a1350138ab0cb3c0f979fe07484
[cubemap] / server.cpp
1 #include <stdio.h>
2 #include <string.h>
3 #include <stdint.h>
4 #include <assert.h>
5 #include <arpa/inet.h>
6 #include <curl/curl.h>
7 #include <sys/socket.h>
8 #include <pthread.h>
9 #include <sys/types.h>
10 #include <sys/ioctl.h>
11 #include <sys/epoll.h>
12 #include <errno.h>
13 #include <vector>
14 #include <string>
15 #include <map>
16 #include <algorithm>
17
18 #include "metacube.h"
19 #include "server.h"
20 #include "mutexlock.h"
21 #include "state.pb.h"
22
23 using namespace std;
24
25 Client::Client(int sock)
26         : sock(sock),
27           state(Client::READING_REQUEST),
28           header_bytes_sent(0),
29           bytes_sent(0)
30 {
31         request.reserve(1024);
32 }
33         
34 Client::Client(const ClientProto &serialized)
35         : sock(serialized.sock()),
36           state(State(serialized.state())),
37           request(serialized.request()),
38           stream_id(serialized.stream_id()),
39           header(serialized.header()),
40           header_bytes_sent(serialized.header_bytes_sent()),
41           bytes_sent(serialized.bytes_sent())
42 {
43 }
44
45 ClientProto Client::serialize() const
46 {
47         ClientProto serialized;
48         serialized.set_sock(sock);
49         serialized.set_state(state);
50         serialized.set_request(request);
51         serialized.set_stream_id(stream_id);
52         serialized.set_header(header);
53         serialized.set_header_bytes_sent(serialized.header_bytes_sent());
54         serialized.set_bytes_sent(bytes_sent);
55         return serialized;
56 }
57
58 Stream::Stream(const string &stream_id)
59         : stream_id(stream_id),
60           data(new char[BACKLOG_SIZE]),
61           data_size(0)
62 {
63         memset(data, 0, BACKLOG_SIZE);
64 }
65
66 Stream::~Stream()
67 {
68         delete[] data;
69 }
70
71 Stream::Stream(const StreamProto &serialized)
72         : header(serialized.header()),
73           data(new char[BACKLOG_SIZE]),
74           data_size(serialized.data_size())
75 {
76         assert(serialized.data().size() == BACKLOG_SIZE);
77         memcpy(data, serialized.data().data(), BACKLOG_SIZE);
78 }
79
80 StreamProto Stream::serialize() const
81 {
82         StreamProto serialized;
83         serialized.set_header(header);
84         serialized.set_data(string(data, data + BACKLOG_SIZE));
85         serialized.set_data_size(data_size);
86         return serialized;
87 }
88
89 Server::Server()
90 {
91         pthread_mutex_init(&mutex, NULL);
92
93         epoll_fd = epoll_create(1024);  // Size argument is ignored.
94         if (epoll_fd == -1) {
95                 perror("epoll_fd");
96                 exit(1);
97         }
98 }
99
100 void Server::run()
101 {
102         should_stop = false;
103         
104         // Joinable is already the default, but it's good to be certain.
105         pthread_attr_t attr;
106         pthread_attr_init(&attr);
107         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
108         pthread_create(&worker_thread, &attr, Server::do_work_thunk, this);
109 }
110         
111 void Server::stop()
112 {
113         {
114                 MutexLock lock(&mutex);
115                 should_stop = true;
116         }
117
118         if (pthread_join(worker_thread, NULL) == -1) {
119                 perror("pthread_join");
120                 exit(1);
121         }
122 }
123
124 void *Server::do_work_thunk(void *arg)
125 {
126         Server *server = static_cast<Server *>(arg);
127         server->do_work();
128         return NULL;
129 }
130
131 void Server::do_work()
132 {
133         for ( ;; ) {
134                 int nfds = epoll_wait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS);
135                 if (nfds == -1) {
136                         perror("epoll_wait");
137                         exit(1);
138                 }
139
140                 MutexLock lock(&mutex);  // We release the mutex between iterations.
141         
142                 if (should_stop) {
143                         return;
144                 }
145         
146                 for (int i = 0; i < nfds; ++i) {
147                         int fd = events[i].data.fd;
148                         assert(clients.count(fd) != 0);
149                         Client *client = &clients[fd];
150
151                         if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
152                                 close_client(client);
153                                 continue;
154                         }
155
156                         process_client(client);
157                 }
158         }
159 }
160
161 CubemapStateProto Server::serialize() const
162 {
163         CubemapStateProto serialized;
164         for (map<int, Client>::const_iterator client_it = clients.begin();
165              client_it != clients.end();
166              ++client_it) {
167                 serialized.add_clients()->MergeFrom(client_it->second.serialize());
168         }
169         for (map<string, Stream *>::const_iterator stream_it = streams.begin();
170              stream_it != streams.end();
171              ++stream_it) {
172                 serialized.add_streams()->MergeFrom(stream_it->second->serialize());
173         }
174         return serialized;
175 }
176
177 void Server::add_client(int sock)
178 {
179         MutexLock lock(&mutex);
180         clients.insert(make_pair(sock, Client(sock)));
181
182         // Start listening on data from this socket.
183         epoll_event ev;
184         ev.events = EPOLLIN | EPOLLRDHUP;
185         ev.data.u64 = 0;  // Keep Valgrind happy.
186         ev.data.fd = sock;
187         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
188                 perror("epoll_ctl(EPOLL_CTL_ADD)");
189                 exit(1);
190         }
191 }
192         
193 void Server::add_stream(const string &stream_id)
194 {
195         MutexLock lock(&mutex);
196         streams.insert(make_pair(stream_id, new Stream(stream_id)));
197 }
198         
199 void Server::set_header(const string &stream_id, const string &header)
200 {
201         MutexLock lock(&mutex);
202         find_stream(stream_id)->header = header;
203
204         // If there are clients we haven't sent anything to yet, we should give
205         // them the header, so push back into the SENDING_HEADER state.
206         for (map<int, Client>::iterator client_it = clients.begin();
207              client_it != clients.end();
208              ++client_it) {
209                 Client *client = &client_it->second;
210                 if (client->state == Client::SENDING_DATA &&
211                     client->bytes_sent == 0) {
212                         construct_header(client);
213                 }
214         }
215 }
216         
217 void Server::add_data(const string &stream_id, const char *data, size_t bytes)
218 {
219         if (bytes == 0) {
220                 return;
221         }
222
223         MutexLock lock(&mutex);
224         Stream *stream = find_stream(stream_id);
225         size_t pos = stream->data_size % BACKLOG_SIZE;
226         stream->data_size += bytes;
227
228         if (pos + bytes > BACKLOG_SIZE) {
229                 size_t to_copy = BACKLOG_SIZE - pos;
230                 memcpy(stream->data + pos, data, to_copy);
231                 data += to_copy;
232                 bytes -= to_copy;
233                 pos = 0;
234         }
235
236         memcpy(stream->data + pos, data, bytes);
237         wake_up_all_clients();
238 }
239         
240 void Server::process_client(Client *client)
241 {
242         switch (client->state) {
243         case Client::READING_REQUEST: {
244                 // Try to read more of the request.
245                 char buf[1024];
246                 int ret = read(client->sock, buf, sizeof(buf));
247                 if (ret == -1) {
248                         perror("read");
249                         close_client(client);
250                         return;
251                 }
252                 if (ret == 0) {
253                         // No data? This really means that we were triggered for something else than
254                         // POLLIN (which suggests a logic error in epoll).
255                         fprintf(stderr, "WARNING: fd %d returned unexpectedly 0 bytes!\n", client->sock);
256                         close_client(client);
257                         return;
258                 }
259
260                 // Guard against overlong requests gobbling up all of our space.
261                 if (client->request.size() + ret > MAX_CLIENT_REQUEST) {
262                         fprintf(stderr, "WARNING: fd %d sent overlong request!\n", client->sock);
263                         close_client(client);
264                         return;
265                 }       
266
267                 // See if we have \r\n\r\n anywhere in the request. We start three bytes
268                 // before what we just appended, in case we just got the final character.
269                 size_t existing_req_bytes = client->request.size();
270                 client->request.append(string(buf, buf + ret));
271         
272                 size_t start_at = (existing_req_bytes >= 3 ? existing_req_bytes - 3 : 0);
273                 const char *ptr = reinterpret_cast<char *>(
274                         memmem(client->request.data() + start_at, client->request.size() - start_at,
275                                "\r\n\r\n", 4));
276                 if (ptr == NULL) {
277                         // OK, we don't have the entire header yet. Fine; we'll get it later.
278                         return;
279                 }
280
281                 if (ptr != client->request.data() + client->request.size() - 4) {
282                         fprintf(stderr, "WARNING: fd %d had junk data after request!\n", client->sock);
283                         close_client(client);
284                         return;
285                 }
286
287                 parse_request(client);
288                 construct_header(client);
289                 break;
290         }
291         case Client::SENDING_HEADER: {
292                 int ret = write(client->sock,
293                                 client->header.data() + client->header_bytes_sent,
294                                 client->header.size() - client->header_bytes_sent);
295                 if (ret == -1) {
296                         perror("write");
297                         close_client(client);
298                         return;
299                 }
300                 
301                 client->header_bytes_sent += ret;
302                 assert(client->header_bytes_sent <= client->header.size());
303
304                 if (client->header_bytes_sent < client->header.size()) {
305                         // We haven't sent all yet. Fine; we'll do that later.
306                         return;
307                 }
308
309                 // We're done sending the header! Clear the entire header to release some memory.
310                 client->header.clear();
311
312                 // Start sending from the end. In other words, we won't send any of the backlog,
313                 // but we'll start sending immediately as we get data.
314                 client->state = Client::SENDING_DATA;
315                 client->bytes_sent = find_stream(client->stream_id)->data_size;
316                 break;
317         }
318         case Client::SENDING_DATA: {
319                 // See if there's some data we've lost. Ideally, we should drop to a block boundary,
320                 // but resync will be the mux's problem.
321                 const Stream &stream = *find_stream(client->stream_id);
322                 size_t bytes_to_send = stream.data_size - client->bytes_sent;
323                 if (bytes_to_send > BACKLOG_SIZE) {
324                         fprintf(stderr, "WARNING: fd %d lost %lld bytes, maybe too slow connection\n",
325                                 client->sock,
326                                 (long long int)(bytes_to_send - BACKLOG_SIZE));
327                         client->bytes_sent = find_stream(client->stream_id)->data_size - BACKLOG_SIZE;
328                         bytes_to_send = BACKLOG_SIZE;
329                 }
330
331                 // See if we need to split across the circular buffer.
332                 ssize_t ret;
333                 if ((client->bytes_sent % BACKLOG_SIZE) + bytes_to_send > BACKLOG_SIZE) {
334                         size_t bytes_first_part = BACKLOG_SIZE - (client->bytes_sent % BACKLOG_SIZE);
335
336                         iovec iov[2];
337                         iov[0].iov_base = const_cast<char *>(stream.data + (client->bytes_sent % BACKLOG_SIZE));
338                         iov[0].iov_len = bytes_first_part;
339
340                         iov[1].iov_base = const_cast<char *>(stream.data);
341                         iov[1].iov_len = bytes_to_send - bytes_first_part;
342
343                         ret = writev(client->sock, iov, 2);
344                 } else {
345                         ret = write(client->sock,
346                                     stream.data + (client->bytes_sent % BACKLOG_SIZE),
347                                     bytes_to_send);
348                 }
349                 if (ret == -1) {
350                         perror("write/writev");
351                         close_client(client);
352                         return;
353                 }
354                 client->bytes_sent += ret;
355
356                 if (client->bytes_sent == stream.data_size) {
357                         // We don't have any more data for this client, so put it to sleep.
358                         put_client_to_sleep(client);
359                 }
360                 break;
361         }
362         default:
363                 assert(false);
364         }
365 }
366
367 void Server::parse_request(Client *client)
368 {
369         // TODO: Actually parse the request. :-)
370         client->stream_id = "stream";
371         client->request.clear();
372 }
373
374 void Server::construct_header(Client *client)
375 {
376         client->header = "HTTP/1.0 200 OK\r\nContent-type: video/x-flv\r\nCache-Control: no-cache\r\n\r\n" +
377                 find_stream(client->stream_id)->header;
378
379         // Switch states.
380         client->state = Client::SENDING_HEADER;
381
382         epoll_event ev;
383         ev.events = EPOLLOUT | EPOLLRDHUP;
384         ev.data.u64 = 0;  // Keep Valgrind happy.
385         ev.data.fd = client->sock;
386
387         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
388                 perror("epoll_ctl(EPOLL_CTL_MOD)");
389                 exit(1);
390         }
391 }
392         
393 void Server::close_client(Client *client)
394 {
395         if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, NULL) == -1) {
396                 perror("epoll_ctl(EPOLL_CTL_DEL)");
397                 exit(1);
398         }
399
400         // This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
401         vector<int>::iterator new_end =
402                 remove(sleeping_clients.begin(), sleeping_clients.end(), client->sock);
403         sleeping_clients.erase(new_end, sleeping_clients.end());
404         
405         // Bye-bye!
406         close(client->sock);
407         clients.erase(client->sock);
408 }
409         
410 void Server::put_client_to_sleep(Client *client)
411 {
412         epoll_event ev;
413         ev.events = EPOLLRDHUP;
414         ev.data.u64 = 0;  // Keep Valgrind happy.
415         ev.data.fd = client->sock;
416
417         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
418                 perror("epoll_ctl(EPOLL_CTL_MOD)");
419                 exit(1);
420         }
421
422         sleeping_clients.push_back(client->sock);
423 }
424
425 void Server::wake_up_all_clients()
426 {
427         for (unsigned i = 0; i < sleeping_clients.size(); ++i) {
428                 epoll_event ev;
429                 ev.events = EPOLLOUT | EPOLLRDHUP;
430                 ev.data.u64 = 0;  // Keep Valgrind happy.
431                 ev.data.fd = sleeping_clients[i];
432                 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, sleeping_clients[i], &ev) == -1) {
433                         perror("epoll_ctl(EPOLL_CTL_MOD)");
434                         exit(1);
435                 }
436         }
437         sleeping_clients.clear();
438 }
439         
440 Stream *Server::find_stream(const string &stream_id)
441 {
442         map<string, Stream *>::iterator it = streams.find(stream_id);
443         assert(it != streams.end());
444         return it->second;
445 }