]> git.sesse.net Git - cubemap/blob - server.cpp
e26a85b3361f257da8756b78b5ef03783148c1d3
[cubemap] / server.cpp
1 #include <stdio.h>
2 #include <string.h>
3 #include <stdint.h>
4 #include <assert.h>
5 #include <arpa/inet.h>
6 #include <curl/curl.h>
7 #include <sys/socket.h>
8 #include <pthread.h>
9 #include <sys/types.h>
10 #include <sys/ioctl.h>
11 #include <sys/epoll.h>
12 #include <errno.h>
13 #include <vector>
14 #include <string>
15 #include <map>
16 #include <algorithm>
17
18 #include "metacube.h"
19 #include "server.h"
20 #include "mutexlock.h"
21 #include "parse.h"
22 #include "state.pb.h"
23
24 using namespace std;
25
26 Client::Client(int sock)
27         : sock(sock),
28           state(Client::READING_REQUEST),
29           header_or_error_bytes_sent(0),
30           bytes_sent(0)
31 {
32         request.reserve(1024);
33 }
34         
35 Client::Client(const ClientProto &serialized)
36         : sock(serialized.sock()),
37           state(State(serialized.state())),
38           request(serialized.request()),
39           stream_id(serialized.stream_id()),
40           header_or_error(serialized.header_or_error()),
41           header_or_error_bytes_sent(serialized.header_or_error_bytes_sent()),
42           bytes_sent(serialized.bytes_sent())
43 {
44 }
45
46 ClientProto Client::serialize() const
47 {
48         ClientProto serialized;
49         serialized.set_sock(sock);
50         serialized.set_state(state);
51         serialized.set_request(request);
52         serialized.set_stream_id(stream_id);
53         serialized.set_header_or_error(header_or_error);
54         serialized.set_header_or_error_bytes_sent(serialized.header_or_error_bytes_sent());
55         serialized.set_bytes_sent(bytes_sent);
56         return serialized;
57 }
58
59 Stream::Stream(const string &stream_id)
60         : stream_id(stream_id),
61           data(new char[BACKLOG_SIZE]),
62           data_size(0)
63 {
64         memset(data, 0, BACKLOG_SIZE);
65 }
66
67 Stream::~Stream()
68 {
69         delete[] data;
70 }
71
72 Stream::Stream(const StreamProto &serialized)
73         : stream_id(serialized.stream_id()),
74           header(serialized.header()),
75           data(new char[BACKLOG_SIZE]),
76           data_size(serialized.data_size())
77 {
78         assert(serialized.data().size() == BACKLOG_SIZE);
79         memcpy(data, serialized.data().data(), BACKLOG_SIZE);
80 }
81
82 StreamProto Stream::serialize() const
83 {
84         StreamProto serialized;
85         serialized.set_header(header);
86         serialized.set_data(string(data, data + BACKLOG_SIZE));
87         serialized.set_data_size(data_size);
88         serialized.set_stream_id(stream_id);
89         return serialized;
90 }
91
92 Server::Server()
93 {
94         pthread_mutex_init(&mutex, NULL);
95
96         epoll_fd = epoll_create(1024);  // Size argument is ignored.
97         if (epoll_fd == -1) {
98                 perror("epoll_fd");
99                 exit(1);
100         }
101 }
102
103 Server::~Server()
104 {
105         close(epoll_fd);
106 }
107
108 void Server::run()
109 {
110         should_stop = false;
111         
112         // Joinable is already the default, but it's good to be certain.
113         pthread_attr_t attr;
114         pthread_attr_init(&attr);
115         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
116         pthread_create(&worker_thread, &attr, Server::do_work_thunk, this);
117 }
118         
119 void Server::stop()
120 {
121         {
122                 MutexLock lock(&mutex);
123                 should_stop = true;
124         }
125
126         if (pthread_join(worker_thread, NULL) == -1) {
127                 perror("pthread_join");
128                 exit(1);
129         }
130 }
131
132 void *Server::do_work_thunk(void *arg)
133 {
134         Server *server = static_cast<Server *>(arg);
135         server->do_work();
136         return NULL;
137 }
138
139 void Server::do_work()
140 {
141         for ( ;; ) {
142                 int nfds = epoll_wait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS);
143                 if (nfds == -1 && errno == EINTR) {
144                         continue;
145                 }
146                 if (nfds == -1) {
147                         perror("epoll_wait");
148                         exit(1);
149                 }
150
151                 MutexLock lock(&mutex);  // We release the mutex between iterations.
152         
153                 if (should_stop) {
154                         return;
155                 }
156         
157                 for (int i = 0; i < nfds; ++i) {
158                         int fd = events[i].data.fd;
159                         assert(clients.count(fd) != 0);
160                         Client *client = &clients[fd];
161
162                         if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
163                                 close_client(client);
164                                 continue;
165                         }
166
167                         process_client(client);
168                 }
169         }
170 }
171
172 CubemapStateProto Server::serialize() const
173 {
174         CubemapStateProto serialized;
175         for (map<int, Client>::const_iterator client_it = clients.begin();
176              client_it != clients.end();
177              ++client_it) {
178                 serialized.add_clients()->MergeFrom(client_it->second.serialize());
179         }
180         for (map<string, Stream *>::const_iterator stream_it = streams.begin();
181              stream_it != streams.end();
182              ++stream_it) {
183                 serialized.add_streams()->MergeFrom(stream_it->second->serialize());
184         }
185         return serialized;
186 }
187
188 void Server::add_client(int sock)
189 {
190         MutexLock lock(&mutex);
191         clients.insert(make_pair(sock, Client(sock)));
192
193         // Start listening on data from this socket.
194         epoll_event ev;
195         ev.events = EPOLLIN | EPOLLRDHUP;
196         ev.data.u64 = 0;  // Keep Valgrind happy.
197         ev.data.fd = sock;
198         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
199                 perror("epoll_ctl(EPOLL_CTL_ADD)");
200                 exit(1);
201         }
202 }
203
204 void Server::add_client_from_serialized(const ClientProto &client)
205 {
206         MutexLock lock(&mutex);
207         clients.insert(make_pair(client.sock(), Client(client)));
208
209         // Start listening on data from this socket.
210         epoll_event ev;
211         if (client.state() == Client::READING_REQUEST) {
212                 ev.events = EPOLLIN | EPOLLRDHUP;
213         } else {
214                 // If we don't have more data for this client, we'll be putting it into
215                 // the sleeping array again soon.
216                 ev.events = EPOLLOUT | EPOLLRDHUP;
217         }
218         ev.data.u64 = 0;  // Keep Valgrind happy.
219         ev.data.fd = client.sock();
220         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client.sock(), &ev) == -1) {
221                 perror("epoll_ctl(EPOLL_CTL_ADD)");
222                 exit(1);
223         }
224 }
225
226 void Server::add_stream(const string &stream_id)
227 {
228         MutexLock lock(&mutex);
229         streams.insert(make_pair(stream_id, new Stream(stream_id)));
230 }
231
232 void Server::add_stream_from_serialized(const StreamProto &stream)
233 {
234         MutexLock lock(&mutex);
235         streams.insert(make_pair(stream.stream_id(), new Stream(stream)));
236 }
237         
238 void Server::set_header(const string &stream_id, const string &header)
239 {
240         MutexLock lock(&mutex);
241         find_stream(stream_id)->header = header;
242
243         // If there are clients we haven't sent anything to yet, we should give
244         // them the header, so push back into the SENDING_HEADER state.
245         for (map<int, Client>::iterator client_it = clients.begin();
246              client_it != clients.end();
247              ++client_it) {
248                 Client *client = &client_it->second;
249                 if (client->state == Client::SENDING_DATA &&
250                     client->bytes_sent == 0) {
251                         construct_header(client);
252                 }
253         }
254 }
255         
256 void Server::add_data(const string &stream_id, const char *data, size_t bytes)
257 {
258         if (bytes == 0) {
259                 return;
260         }
261
262         MutexLock lock(&mutex);
263         Stream *stream = find_stream(stream_id);
264         size_t pos = stream->data_size % BACKLOG_SIZE;
265         stream->data_size += bytes;
266
267         if (pos + bytes > BACKLOG_SIZE) {
268                 size_t to_copy = BACKLOG_SIZE - pos;
269                 memcpy(stream->data + pos, data, to_copy);
270                 data += to_copy;
271                 bytes -= to_copy;
272                 pos = 0;
273         }
274
275         memcpy(stream->data + pos, data, bytes);
276         wake_up_all_clients();
277 }
278         
279 void Server::process_client(Client *client)
280 {
281         switch (client->state) {
282         case Client::READING_REQUEST: {
283                 // Try to read more of the request.
284                 char buf[1024];
285                 int ret = read(client->sock, buf, sizeof(buf));
286                 if (ret == -1) {
287                         perror("read");
288                         close_client(client);
289                         return;
290                 }
291                 if (ret == 0) {
292                         // No data? This really means that we were triggered for something else than
293                         // POLLIN (which suggests a logic error in epoll).
294                         fprintf(stderr, "WARNING: fd %d returned unexpectedly 0 bytes!\n", client->sock);
295                         close_client(client);
296                         return;
297                 }
298
299                 // Guard against overlong requests gobbling up all of our space.
300                 if (client->request.size() + ret > MAX_CLIENT_REQUEST) {
301                         fprintf(stderr, "WARNING: fd %d sent overlong request!\n", client->sock);
302                         close_client(client);
303                         return;
304                 }       
305
306                 // See if we have \r\n\r\n anywhere in the request. We start three bytes
307                 // before what we just appended, in case we just got the final character.
308                 size_t existing_req_bytes = client->request.size();
309                 client->request.append(string(buf, buf + ret));
310         
311                 size_t start_at = (existing_req_bytes >= 3 ? existing_req_bytes - 3 : 0);
312                 const char *ptr = reinterpret_cast<char *>(
313                         memmem(client->request.data() + start_at, client->request.size() - start_at,
314                                "\r\n\r\n", 4));
315                 if (ptr == NULL) {
316                         // OK, we don't have the entire header yet. Fine; we'll get it later.
317                         return;
318                 }
319
320                 if (ptr != client->request.data() + client->request.size() - 4) {
321                         fprintf(stderr, "WARNING: fd %d had junk data after request!\n", client->sock);
322                         close_client(client);
323                         return;
324                 }
325
326                 int error_code = parse_request(client);
327                 if (error_code == 200) {
328                         construct_header(client);
329                 } else {
330                         construct_error(client, error_code);
331                 }
332                 break;
333         }
334         case Client::SENDING_ERROR:
335         case Client::SENDING_HEADER: {
336                 int ret = write(client->sock,
337                                 client->header_or_error.data() + client->header_or_error_bytes_sent,
338                                 client->header_or_error.size() - client->header_or_error_bytes_sent);
339                 if (ret == -1) {
340                         perror("write");
341                         close_client(client);
342                         return;
343                 }
344                 
345                 client->header_or_error_bytes_sent += ret;
346                 assert(client->header_or_error_bytes_sent <= client->header_or_error.size());
347
348                 if (client->header_or_error_bytes_sent < client->header_or_error.size()) {
349                         // We haven't sent all yet. Fine; we'll do that later.
350                         return;
351                 }
352
353                 // We're done sending the header or error! Clear it to release some memory.
354                 client->header_or_error.clear();
355
356                 if (client->state == Client::SENDING_ERROR) {
357                         close_client(client);
358                 } else {
359                         // Start sending from the end. In other words, we won't send any of the backlog,
360                         // but we'll start sending immediately as we get data.
361                         client->state = Client::SENDING_DATA;
362                         client->bytes_sent = find_stream(client->stream_id)->data_size;
363                 }
364                 break;
365         }
366         case Client::SENDING_DATA: {
367                 // See if there's some data we've lost. Ideally, we should drop to a block boundary,
368                 // but resync will be the mux's problem.
369                 const Stream &stream = *find_stream(client->stream_id);
370                 size_t bytes_to_send = stream.data_size - client->bytes_sent;
371                 if (bytes_to_send > BACKLOG_SIZE) {
372                         fprintf(stderr, "WARNING: fd %d lost %lld bytes, maybe too slow connection\n",
373                                 client->sock,
374                                 (long long int)(bytes_to_send - BACKLOG_SIZE));
375                         client->bytes_sent = find_stream(client->stream_id)->data_size - BACKLOG_SIZE;
376                         bytes_to_send = BACKLOG_SIZE;
377                 }
378
379                 // See if we need to split across the circular buffer.
380                 ssize_t ret;
381                 if ((client->bytes_sent % BACKLOG_SIZE) + bytes_to_send > BACKLOG_SIZE) {
382                         size_t bytes_first_part = BACKLOG_SIZE - (client->bytes_sent % BACKLOG_SIZE);
383
384                         iovec iov[2];
385                         iov[0].iov_base = const_cast<char *>(stream.data + (client->bytes_sent % BACKLOG_SIZE));
386                         iov[0].iov_len = bytes_first_part;
387
388                         iov[1].iov_base = const_cast<char *>(stream.data);
389                         iov[1].iov_len = bytes_to_send - bytes_first_part;
390
391                         ret = writev(client->sock, iov, 2);
392                 } else {
393                         ret = write(client->sock,
394                                     stream.data + (client->bytes_sent % BACKLOG_SIZE),
395                                     bytes_to_send);
396                 }
397                 if (ret == -1) {
398                         perror("write/writev");
399                         close_client(client);
400                         return;
401                 }
402                 client->bytes_sent += ret;
403
404                 if (client->bytes_sent == stream.data_size) {
405                         // We don't have any more data for this client, so put it to sleep.
406                         put_client_to_sleep(client);
407                 }
408                 break;
409         }
410         default:
411                 assert(false);
412         }
413 }
414
415 int Server::parse_request(Client *client)
416 {
417         vector<string> lines = split_lines(client->request);
418         if (lines.empty()) {
419                 return 400;  // Bad request (empty).
420         }
421
422         vector<string> request_tokens = split_tokens(lines[0]);
423         if (request_tokens.size() < 2) {
424                 return 400;  // Bad request (empty).
425         }
426         if (request_tokens[0] != "GET") {
427                 return 400;  // Should maybe be 405 instead?
428         }
429         if (streams.count(request_tokens[1]) == 0) {
430                 return 404;  // Not found.
431         }
432
433         client->stream_id = request_tokens[1];
434         client->request.clear();
435
436         return 200;  // OK!
437 }
438
439 void Server::construct_header(Client *client)
440 {
441         client->header_or_error = "HTTP/1.0 200 OK\r\nContent-type: video/x-flv\r\nCache-Control: no-cache\r\n\r\n" +
442                 find_stream(client->stream_id)->header;
443
444         // Switch states.
445         client->state = Client::SENDING_HEADER;
446
447         epoll_event ev;
448         ev.events = EPOLLOUT | EPOLLRDHUP;
449         ev.data.u64 = 0;  // Keep Valgrind happy.
450         ev.data.fd = client->sock;
451
452         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
453                 perror("epoll_ctl(EPOLL_CTL_MOD)");
454                 exit(1);
455         }
456 }
457         
458 void Server::construct_error(Client *client, int error_code)
459 {
460         char error[256];
461         snprintf(error, 256, "HTTP/1.0 %d Error\r\nContent-type: text/plain\r\n\r\nSomething went wrong. Sorry.\r\n",
462                 error_code);
463         client->header_or_error = error;
464
465         // Switch states.
466         client->state = Client::SENDING_ERROR;
467
468         epoll_event ev;
469         ev.events = EPOLLOUT | EPOLLRDHUP;
470         ev.data.u64 = 0;  // Keep Valgrind happy.
471         ev.data.fd = client->sock;
472
473         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
474                 perror("epoll_ctl(EPOLL_CTL_MOD)");
475                 exit(1);
476         }
477 }
478         
479 void Server::close_client(Client *client)
480 {
481         if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, NULL) == -1) {
482                 perror("epoll_ctl(EPOLL_CTL_DEL)");
483                 exit(1);
484         }
485
486         // This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
487         vector<int>::iterator new_end =
488                 remove(sleeping_clients.begin(), sleeping_clients.end(), client->sock);
489         sleeping_clients.erase(new_end, sleeping_clients.end());
490         
491         // Bye-bye!
492         close(client->sock);
493         clients.erase(client->sock);
494 }
495         
496 void Server::put_client_to_sleep(Client *client)
497 {
498         epoll_event ev;
499         ev.events = EPOLLRDHUP;
500         ev.data.u64 = 0;  // Keep Valgrind happy.
501         ev.data.fd = client->sock;
502
503         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
504                 perror("epoll_ctl(EPOLL_CTL_MOD)");
505                 exit(1);
506         }
507
508         sleeping_clients.push_back(client->sock);
509 }
510
511 void Server::wake_up_all_clients()
512 {
513         for (unsigned i = 0; i < sleeping_clients.size(); ++i) {
514                 epoll_event ev;
515                 ev.events = EPOLLOUT | EPOLLRDHUP;
516                 ev.data.u64 = 0;  // Keep Valgrind happy.
517                 ev.data.fd = sleeping_clients[i];
518                 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, sleeping_clients[i], &ev) == -1) {
519                         perror("epoll_ctl(EPOLL_CTL_MOD)");
520                         exit(1);
521                 }
522         }
523         sleeping_clients.clear();
524 }
525         
526 Stream *Server::find_stream(const string &stream_id)
527 {
528         map<string, Stream *>::iterator it = streams.find(stream_id);
529         assert(it != streams.end());
530         return it->second;
531 }