]> git.sesse.net Git - cubemap/blob - server.cpp
Change from level-triggered to edge-triggered epoll mode. More than halves CPU usage.
[cubemap] / server.cpp
1 #include <stdio.h>
2 #include <string.h>
3 #include <stdint.h>
4 #include <assert.h>
5 #include <arpa/inet.h>
6 #include <curl/curl.h>
7 #include <sys/socket.h>
8 #include <pthread.h>
9 #include <sys/types.h>
10 #include <sys/ioctl.h>
11 #include <sys/epoll.h>
12 #include <errno.h>
13 #include <vector>
14 #include <string>
15 #include <map>
16 #include <algorithm>
17
18 #include "metacube.h"
19 #include "server.h"
20 #include "mutexlock.h"
21 #include "parse.h"
22 #include "state.pb.h"
23
24 using namespace std;
25
26 Client::Client(int sock)
27         : sock(sock),
28           state(Client::READING_REQUEST),
29           header_or_error_bytes_sent(0),
30           bytes_sent(0)
31 {
32         request.reserve(1024);
33 }
34         
35 Client::Client(const ClientProto &serialized)
36         : sock(serialized.sock()),
37           state(State(serialized.state())),
38           request(serialized.request()),
39           stream_id(serialized.stream_id()),
40           header_or_error(serialized.header_or_error()),
41           header_or_error_bytes_sent(serialized.header_or_error_bytes_sent()),
42           bytes_sent(serialized.bytes_sent())
43 {
44 }
45
46 ClientProto Client::serialize() const
47 {
48         ClientProto serialized;
49         serialized.set_sock(sock);
50         serialized.set_state(state);
51         serialized.set_request(request);
52         serialized.set_stream_id(stream_id);
53         serialized.set_header_or_error(header_or_error);
54         serialized.set_header_or_error_bytes_sent(serialized.header_or_error_bytes_sent());
55         serialized.set_bytes_sent(bytes_sent);
56         return serialized;
57 }
58
59 Stream::Stream(const string &stream_id)
60         : stream_id(stream_id),
61           data(new char[BACKLOG_SIZE]),
62           data_size(0)
63 {
64         memset(data, 0, BACKLOG_SIZE);
65 }
66
67 Stream::~Stream()
68 {
69         delete[] data;
70 }
71
72 Stream::Stream(const StreamProto &serialized)
73         : stream_id(serialized.stream_id()),
74           header(serialized.header()),
75           data(new char[BACKLOG_SIZE]),
76           data_size(serialized.data_size())
77 {
78         assert(serialized.data().size() == BACKLOG_SIZE);
79         memcpy(data, serialized.data().data(), BACKLOG_SIZE);
80 }
81
82 StreamProto Stream::serialize() const
83 {
84         StreamProto serialized;
85         serialized.set_header(header);
86         serialized.set_data(string(data, data + BACKLOG_SIZE));
87         serialized.set_data_size(data_size);
88         serialized.set_stream_id(stream_id);
89         return serialized;
90 }
91
92 Server::Server()
93 {
94         pthread_mutex_init(&mutex, NULL);
95
96         epoll_fd = epoll_create(1024);  // Size argument is ignored.
97         if (epoll_fd == -1) {
98                 perror("epoll_fd");
99                 exit(1);
100         }
101 }
102
103 Server::~Server()
104 {
105         int ret;
106         do {
107                 ret = close(epoll_fd);
108         } while (ret == -1 && errno == EINTR);
109
110         if (ret == -1) {
111                 perror("close(epoll_fd)");
112         }
113 }
114
115 void Server::run()
116 {
117         should_stop = false;
118         
119         // Joinable is already the default, but it's good to be certain.
120         pthread_attr_t attr;
121         pthread_attr_init(&attr);
122         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
123         pthread_create(&worker_thread, &attr, Server::do_work_thunk, this);
124 }
125         
126 void Server::stop()
127 {
128         {
129                 MutexLock lock(&mutex);
130                 should_stop = true;
131         }
132
133         if (pthread_join(worker_thread, NULL) == -1) {
134                 perror("pthread_join");
135                 exit(1);
136         }
137 }
138
139 void *Server::do_work_thunk(void *arg)
140 {
141         Server *server = static_cast<Server *>(arg);
142         server->do_work();
143         return NULL;
144 }
145
146 void Server::do_work()
147 {
148         for ( ;; ) {
149                 int nfds = epoll_wait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS);
150                 if (nfds == -1 && errno == EINTR) {
151                         continue;
152                 }
153                 if (nfds == -1) {
154                         perror("epoll_wait");
155                         exit(1);
156                 }
157
158                 MutexLock lock(&mutex);  // We release the mutex between iterations.
159         
160                 if (should_stop) {
161                         return;
162                 }
163         
164                 for (int i = 0; i < nfds; ++i) {
165                         int fd = events[i].data.fd;
166                         assert(clients.count(fd) != 0);
167                         Client *client = &clients[fd];
168
169                         if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
170                                 close_client(client);
171                                 continue;
172                         }
173
174                         process_client(client);
175                 }
176         }
177 }
178
179 CubemapStateProto Server::serialize() const
180 {
181         CubemapStateProto serialized;
182         for (map<int, Client>::const_iterator client_it = clients.begin();
183              client_it != clients.end();
184              ++client_it) {
185                 serialized.add_clients()->MergeFrom(client_it->second.serialize());
186         }
187         for (map<string, Stream *>::const_iterator stream_it = streams.begin();
188              stream_it != streams.end();
189              ++stream_it) {
190                 serialized.add_streams()->MergeFrom(stream_it->second->serialize());
191         }
192         return serialized;
193 }
194
195 void Server::add_client(int sock)
196 {
197         MutexLock lock(&mutex);
198         clients.insert(make_pair(sock, Client(sock)));
199
200         // Start listening on data from this socket.
201         epoll_event ev;
202         ev.events = EPOLLIN | EPOLLRDHUP;
203         ev.data.u64 = 0;  // Keep Valgrind happy.
204         ev.data.fd = sock;
205         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
206                 perror("epoll_ctl(EPOLL_CTL_ADD)");
207                 exit(1);
208         }
209 }
210
211 void Server::add_client_from_serialized(const ClientProto &client)
212 {
213         MutexLock lock(&mutex);
214         clients.insert(make_pair(client.sock(), Client(client)));
215
216         // Start listening on data from this socket.
217         epoll_event ev;
218         if (client.state() == Client::READING_REQUEST) {
219                 ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
220         } else {
221                 // If we don't have more data for this client, we'll be putting it into
222                 // the sleeping array again soon.
223                 ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
224         }
225         ev.data.u64 = 0;  // Keep Valgrind happy.
226         ev.data.fd = client.sock();
227         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client.sock(), &ev) == -1) {
228                 perror("epoll_ctl(EPOLL_CTL_ADD)");
229                 exit(1);
230         }
231 }
232
233 void Server::add_stream(const string &stream_id)
234 {
235         MutexLock lock(&mutex);
236         streams.insert(make_pair(stream_id, new Stream(stream_id)));
237 }
238
239 void Server::add_stream_from_serialized(const StreamProto &stream)
240 {
241         MutexLock lock(&mutex);
242         streams.insert(make_pair(stream.stream_id(), new Stream(stream)));
243 }
244         
245 void Server::set_header(const string &stream_id, const string &header)
246 {
247         MutexLock lock(&mutex);
248         find_stream(stream_id)->header = header;
249
250         // If there are clients we haven't sent anything to yet, we should give
251         // them the header, so push back into the SENDING_HEADER state.
252         for (map<int, Client>::iterator client_it = clients.begin();
253              client_it != clients.end();
254              ++client_it) {
255                 Client *client = &client_it->second;
256                 if (client->state == Client::SENDING_DATA &&
257                     client->bytes_sent == 0) {
258                         construct_header(client);
259                 }
260         }
261 }
262         
263 void Server::add_data(const string &stream_id, const char *data, size_t bytes)
264 {
265         if (bytes == 0) {
266                 return;
267         }
268
269         MutexLock lock(&mutex);
270         Stream *stream = find_stream(stream_id);
271         size_t pos = stream->data_size % BACKLOG_SIZE;
272         stream->data_size += bytes;
273
274         if (pos + bytes > BACKLOG_SIZE) {
275                 size_t to_copy = BACKLOG_SIZE - pos;
276                 memcpy(stream->data + pos, data, to_copy);
277                 data += to_copy;
278                 bytes -= to_copy;
279                 pos = 0;
280         }
281
282         memcpy(stream->data + pos, data, bytes);
283         wake_up_all_clients();
284 }
285
286 // See the .h file for postconditions after this function.      
287 void Server::process_client(Client *client)
288 {
289         switch (client->state) {
290         case Client::READING_REQUEST: {
291 read_request_again:
292                 // Try to read more of the request.
293                 char buf[1024];
294                 int ret;
295                 do {
296                         ret = read(client->sock, buf, sizeof(buf));
297                 } while (ret == -1 && errno == EINTR);
298
299                 if (ret == -1 && errno == EAGAIN) {
300                         // No more data right now. Nothing to do.
301                         // This is postcondition #2.
302                         return;
303                 }
304                 if (ret == -1) {
305                         perror("read");
306                         close_client(client);
307                         return;
308                 }
309                 if (ret == 0) {
310                         // OK, the socket is closed.
311                         close_client(client);
312                         return;
313                 }
314
315                 // Guard against overlong requests gobbling up all of our space.
316                 if (client->request.size() + ret > MAX_CLIENT_REQUEST) {
317                         fprintf(stderr, "WARNING: fd %d sent overlong request!\n", client->sock);
318                         close_client(client);
319                         return;
320                 }       
321
322                 // See if we have \r\n\r\n anywhere in the request. We start three bytes
323                 // before what we just appended, in case we just got the final character.
324                 size_t existing_req_bytes = client->request.size();
325                 client->request.append(string(buf, buf + ret));
326         
327                 size_t start_at = (existing_req_bytes >= 3 ? existing_req_bytes - 3 : 0);
328                 const char *ptr = reinterpret_cast<char *>(
329                         memmem(client->request.data() + start_at, client->request.size() - start_at,
330                                "\r\n\r\n", 4));
331                 if (ptr == NULL) {
332                         // OK, we don't have the entire header yet. Fine; we'll get it later.
333                         // See if there's more data for us.
334                         goto read_request_again;
335                 }
336
337                 if (ptr != client->request.data() + client->request.size() - 4) {
338                         fprintf(stderr, "WARNING: fd %d had junk data after request!\n", client->sock);
339                         close_client(client);
340                         return;
341                 }
342
343                 int error_code = parse_request(client);
344                 if (error_code == 200) {
345                         construct_header(client);
346                 } else {
347                         construct_error(client, error_code);
348                 }
349
350                 // We've changed states, so fall through.
351                 assert(client->state == Client::SENDING_ERROR ||
352                        client->state == Client::SENDING_HEADER);
353         }
354         case Client::SENDING_ERROR:
355         case Client::SENDING_HEADER: {
356 sending_header_or_error_again:
357                 int ret;
358                 do {
359                         ret = write(client->sock,
360                                     client->header_or_error.data() + client->header_or_error_bytes_sent,
361                                     client->header_or_error.size() - client->header_or_error_bytes_sent);
362                 } while (ret == -1 && errno == EINTR);
363
364                 if (ret == -1 && errno == EAGAIN) {
365                         // We're out of socket space, so now we're at the “low edge” of epoll's
366                         // edge triggering. epoll will tell us when there is more room, so for now,
367                         // just return.
368                         // This is postcondition #4.
369                         return;
370                 }
371
372                 if (ret == -1) {
373                         // Error! Postcondition #1.
374                         perror("write");
375                         close_client(client);
376                         return;
377                 }
378                 
379                 client->header_or_error_bytes_sent += ret;
380                 assert(client->header_or_error_bytes_sent <= client->header_or_error.size());
381
382                 if (client->header_or_error_bytes_sent < client->header_or_error.size()) {
383                         // We haven't sent all yet. Fine; go another round.
384                         goto sending_header_or_error_again;
385                 }
386
387                 // We're done sending the header or error! Clear it to release some memory.
388                 client->header_or_error.clear();
389
390                 if (client->state == Client::SENDING_ERROR) {
391                         // We're done sending the error, so now close.  
392                         // This is postcondition #1.
393                         close_client(client);
394                         return;
395                 }
396
397                 // Start sending from the end. In other words, we won't send any of the backlog,
398                 // but we'll start sending immediately as we get data.
399                 // This is postcondition #3.
400                 client->state = Client::SENDING_DATA;
401                 client->bytes_sent = find_stream(client->stream_id)->data_size;
402                 sleeping_clients.push_back(client);
403                 return;
404         }
405         case Client::SENDING_DATA: {
406                 // See if there's some data we've lost. Ideally, we should drop to a block boundary,
407                 // but resync will be the mux's problem.
408                 const Stream &stream = *find_stream(client->stream_id);
409                 size_t bytes_to_send = stream.data_size - client->bytes_sent;
410                 if (bytes_to_send > BACKLOG_SIZE) {
411                         fprintf(stderr, "WARNING: fd %d lost %lld bytes, maybe too slow connection\n",
412                                 client->sock,
413                                 (long long int)(bytes_to_send - BACKLOG_SIZE));
414                         client->bytes_sent = find_stream(client->stream_id)->data_size - BACKLOG_SIZE;
415                         bytes_to_send = BACKLOG_SIZE;
416                 }
417
418                 // See if we need to split across the circular buffer.
419                 ssize_t ret;
420                 if ((client->bytes_sent % BACKLOG_SIZE) + bytes_to_send > BACKLOG_SIZE) {
421                         size_t bytes_first_part = BACKLOG_SIZE - (client->bytes_sent % BACKLOG_SIZE);
422
423                         iovec iov[2];
424                         iov[0].iov_base = const_cast<char *>(stream.data + (client->bytes_sent % BACKLOG_SIZE));
425                         iov[0].iov_len = bytes_first_part;
426
427                         iov[1].iov_base = const_cast<char *>(stream.data);
428                         iov[1].iov_len = bytes_to_send - bytes_first_part;
429
430                         do {
431                                 ret = writev(client->sock, iov, 2);
432                         } while (ret == -1 && errno == EINTR);
433                 } else {
434                         do {
435                                 ret = write(client->sock,
436                                             stream.data + (client->bytes_sent % BACKLOG_SIZE),
437                                             bytes_to_send);
438                         } while (ret == -1 && errno == EINTR);
439                 }
440                 if (ret == -1 && errno == EAGAIN) {
441                         // We're out of socket space, so return; epoll will wake us up
442                         // when there is more room.
443                         // This is postcondition #4.
444                         return;
445                 }
446                 if (ret == -1) {
447                         // Error, close; postcondition #1.
448                         perror("write/writev");
449                         close_client(client);
450                         return;
451                 }
452                 client->bytes_sent += ret;
453
454                 if (client->bytes_sent == stream.data_size) {
455                         // We don't have any more data for this client, so put it to sleep.
456                         // This is postcondition #3.
457                         put_client_to_sleep(client);
458                 } else {
459                         // XXX: Do we need to go another round here to explicitly
460                         // get the EAGAIN?
461                 }
462                 break;
463         }
464         default:
465                 assert(false);
466         }
467 }
468
469 int Server::parse_request(Client *client)
470 {
471         vector<string> lines = split_lines(client->request);
472         if (lines.empty()) {
473                 return 400;  // Bad request (empty).
474         }
475
476         vector<string> request_tokens = split_tokens(lines[0]);
477         if (request_tokens.size() < 2) {
478                 return 400;  // Bad request (empty).
479         }
480         if (request_tokens[0] != "GET") {
481                 return 400;  // Should maybe be 405 instead?
482         }
483         if (streams.count(request_tokens[1]) == 0) {
484                 return 404;  // Not found.
485         }
486
487         client->stream_id = request_tokens[1];
488         client->request.clear();
489
490         return 200;  // OK!
491 }
492
493 void Server::construct_header(Client *client)
494 {
495         client->header_or_error = "HTTP/1.0 200 OK\r\nContent-type: video/x-flv\r\nCache-Control: no-cache\r\n\r\n" +
496                 find_stream(client->stream_id)->header;
497
498         // Switch states.
499         client->state = Client::SENDING_HEADER;
500
501         epoll_event ev;
502         ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
503         ev.data.u64 = 0;  // Keep Valgrind happy.
504         ev.data.fd = client->sock;
505
506         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
507                 perror("epoll_ctl(EPOLL_CTL_MOD)");
508                 exit(1);
509         }
510 }
511         
512 void Server::construct_error(Client *client, int error_code)
513 {
514         char error[256];
515         snprintf(error, 256, "HTTP/1.0 %d Error\r\nContent-type: text/plain\r\n\r\nSomething went wrong. Sorry.\r\n",
516                 error_code);
517         client->header_or_error = error;
518
519         // Switch states.
520         client->state = Client::SENDING_ERROR;
521
522         epoll_event ev;
523         ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
524         ev.data.u64 = 0;  // Keep Valgrind happy.
525         ev.data.fd = client->sock;
526
527         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
528                 perror("epoll_ctl(EPOLL_CTL_MOD)");
529                 exit(1);
530         }
531 }
532         
533 void Server::close_client(Client *client)
534 {
535         if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, NULL) == -1) {
536                 perror("epoll_ctl(EPOLL_CTL_DEL)");
537                 exit(1);
538         }
539
540         // This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
541         vector<Client *>::iterator new_end =
542                 remove(sleeping_clients.begin(), sleeping_clients.end(), client);
543         sleeping_clients.erase(new_end, sleeping_clients.end());
544         
545         // Bye-bye!
546         int ret;
547         do {
548                 ret = close(client->sock);
549         } while (ret == -1 && errno == EINTR);
550
551         if (ret == -1) {
552                 perror("close");
553         }
554
555         clients.erase(client->sock);
556 }
557         
558 void Server::put_client_to_sleep(Client *client)
559 {
560         sleeping_clients.push_back(client);
561 }
562
563 void Server::wake_up_all_clients()
564 {
565         vector<Client *> to_process;
566         swap(sleeping_clients, to_process);
567         for (unsigned i = 0; i < to_process.size(); ++i) {
568                 process_client(to_process[i]);
569         }
570 }
571         
572 Stream *Server::find_stream(const string &stream_id)
573 {
574         map<string, Stream *>::iterator it = streams.find(stream_id);
575         assert(it != streams.end());
576         return it->second;
577 }