1f5477e9baa92066bbf2d8736e27901d32d2c9eb
[cubemap] / server.cpp
1 #include <assert.h>
2 #include <errno.h>
3 #include <netinet/in.h>
4 #include <pthread.h>
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <sys/epoll.h>
10 #include <sys/sendfile.h>
11 #include <sys/socket.h>
12 #include <sys/types.h>
13 #include <unistd.h>
14 #include <algorithm>
15 #include <map>
16 #include <string>
17 #include <utility>
18 #include <vector>
19
20 #include "accesslog.h"
21 #include "log.h"
22 #include "markpool.h"
23 #include "metacube.h"
24 #include "mutexlock.h"
25 #include "parse.h"
26 #include "server.h"
27 #include "state.pb.h"
28 #include "stream.h"
29 #include "util.h"
30
31 using namespace std;
32
33 extern AccessLogThread *access_log;
34
35 Server::Server()
36 {
37         pthread_mutex_init(&mutex, NULL);
38         pthread_mutex_init(&queued_data_mutex, NULL);
39
40         epoll_fd = epoll_create(1024);  // Size argument is ignored.
41         if (epoll_fd == -1) {
42                 log_perror("epoll_fd");
43                 exit(1);
44         }
45 }
46
47 Server::~Server()
48 {
49         for (size_t i = 0; i < streams.size(); ++i) {   
50                 delete streams[i];
51         }
52
53         safe_close(epoll_fd);
54 }
55
56 vector<ClientStats> Server::get_client_stats() const
57 {
58         vector<ClientStats> ret;
59
60         MutexLock lock(&mutex);
61         for (map<int, Client>::const_iterator client_it = clients.begin();
62              client_it != clients.end();
63              ++client_it) {
64                 ret.push_back(client_it->second.get_stats());
65         }
66         return ret;
67 }
68
69 void Server::do_work()
70 {
71         while (!should_stop()) {
72                 // Wait until there's activity on at least one of the fds,
73                 // or 20 ms (about one frame at 50 fps) has elapsed.
74                 //
75                 // We could in theory wait forever and rely on wakeup()
76                 // from add_client_deferred() and add_data_deferred(),
77                 // but wakeup is a pretty expensive operation, and the
78                 // two threads might end up fighting over a lock, so it's
79                 // seemingly (much) more efficient to just have a timeout here.
80                 int nfds = epoll_pwait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS, &sigset_without_usr1_block);
81                 if (nfds == -1 && errno != EINTR) {
82                         log_perror("epoll_wait");
83                         exit(1);
84                 }
85
86                 MutexLock lock(&mutex);  // We release the mutex between iterations.
87         
88                 process_queued_data();
89
90                 for (int i = 0; i < nfds; ++i) {
91                         Client *client = reinterpret_cast<Client *>(events[i].data.u64);
92
93                         if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
94                                 close_client(client);
95                                 continue;
96                         }
97
98                         process_client(client);
99                 }
100
101                 for (size_t i = 0; i < streams.size(); ++i) {   
102                         vector<Client *> to_process;
103                         swap(streams[i]->to_process, to_process);
104                         for (size_t i = 0; i < to_process.size(); ++i) {
105                                 process_client(to_process[i]);
106                         }
107                 }
108         }
109 }
110
111 CubemapStateProto Server::serialize()
112 {
113         // We don't serialize anything queued, so empty the queues.
114         process_queued_data();
115
116         // Set all clients in a consistent state before serializing
117         // (ie., they have no remaining lost data). Otherwise, increasing
118         // the backlog could take clients into a newly valid area of the backlog,
119         // sending a stream of zeros instead of skipping the data as it should.
120         //
121         // TODO: Do this when clients are added back from serialized state instead;
122         // it would probably be less wasteful.
123         for (map<int, Client>::iterator client_it = clients.begin();
124              client_it != clients.end();
125              ++client_it) {
126                 skip_lost_data(&client_it->second);
127         }
128
129         CubemapStateProto serialized;
130         for (map<int, Client>::const_iterator client_it = clients.begin();
131              client_it != clients.end();
132              ++client_it) {
133                 serialized.add_clients()->MergeFrom(client_it->second.serialize());
134         }
135         for (size_t i = 0; i < streams.size(); ++i) {   
136                 serialized.add_streams()->MergeFrom(streams[i]->serialize());
137         }
138         return serialized;
139 }
140
141 void Server::add_client_deferred(int sock)
142 {
143         MutexLock lock(&queued_data_mutex);
144         queued_add_clients.push_back(sock);
145 }
146
147 void Server::add_client(int sock)
148 {
149         pair<map<int, Client>::iterator, bool> ret =
150                 clients.insert(make_pair(sock, Client(sock)));
151         assert(ret.second == true);  // Should not already exist.
152         Client *client_ptr = &ret.first->second;
153
154         // Start listening on data from this socket.
155         epoll_event ev;
156         ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
157         ev.data.u64 = reinterpret_cast<uint64_t>(client_ptr);
158         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
159                 log_perror("epoll_ctl(EPOLL_CTL_ADD)");
160                 exit(1);
161         }
162
163         process_client(client_ptr);
164 }
165
166 void Server::add_client_from_serialized(const ClientProto &client)
167 {
168         MutexLock lock(&mutex);
169         Stream *stream;
170         int stream_index = lookup_stream_by_url(client.url());
171         if (stream_index == -1) {
172                 assert(client.state() != Client::SENDING_DATA);
173                 stream = NULL;
174         } else {
175                 stream = streams[stream_index];
176         }
177         pair<map<int, Client>::iterator, bool> ret =
178                 clients.insert(make_pair(client.sock(), Client(client, stream)));
179         assert(ret.second == true);  // Should not already exist.
180         Client *client_ptr = &ret.first->second;
181
182         // Start listening on data from this socket.
183         epoll_event ev;
184         if (client.state() == Client::READING_REQUEST) {
185                 ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
186         } else {
187                 // If we don't have more data for this client, we'll be putting it into
188                 // the sleeping array again soon.
189                 ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
190         }
191         ev.data.u64 = reinterpret_cast<uint64_t>(client_ptr);
192         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client.sock(), &ev) == -1) {
193                 log_perror("epoll_ctl(EPOLL_CTL_ADD)");
194                 exit(1);
195         }
196
197         if (client_ptr->state == Client::SENDING_DATA && 
198             client_ptr->stream_pos == client_ptr->stream->bytes_received) {
199                 client_ptr->stream->put_client_to_sleep(client_ptr);
200         } else {
201                 process_client(client_ptr);
202         }
203 }
204
205 int Server::lookup_stream_by_url(const std::string &url) const
206 {
207         map<string, int>::const_iterator url_it = url_map.find(url);
208         if (url_it == url_map.end()) {
209                 return -1;
210         }
211         return url_it->second;
212 }
213
214 int Server::add_stream(const string &url, size_t backlog_size, Stream::Encoding encoding)
215 {
216         MutexLock lock(&mutex);
217         url_map.insert(make_pair(url, streams.size()));
218         streams.push_back(new Stream(url, backlog_size, encoding));
219         return streams.size() - 1;
220 }
221
222 int Server::add_stream_from_serialized(const StreamProto &stream, int data_fd)
223 {
224         MutexLock lock(&mutex);
225         url_map.insert(make_pair(stream.url(), streams.size()));
226         streams.push_back(new Stream(stream, data_fd));
227         return streams.size() - 1;
228 }
229         
230 void Server::set_backlog_size(int stream_index, size_t new_size)
231 {
232         MutexLock lock(&mutex);
233         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
234         streams[stream_index]->set_backlog_size(new_size);
235 }
236         
237 void Server::set_encoding(int stream_index, Stream::Encoding encoding)
238 {
239         MutexLock lock(&mutex);
240         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
241         streams[stream_index]->encoding = encoding;
242 }
243         
244 void Server::set_header(int stream_index, const string &http_header, const string &stream_header)
245 {
246         MutexLock lock(&mutex);
247         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
248         streams[stream_index]->http_header = http_header;
249         streams[stream_index]->stream_header = stream_header;
250
251         // If there are clients we haven't sent anything to yet, we should give
252         // them the header, so push back into the SENDING_HEADER state.
253         for (map<int, Client>::iterator client_it = clients.begin();
254              client_it != clients.end();
255              ++client_it) {
256                 Client *client = &client_it->second;
257                 if (client->state == Client::SENDING_DATA &&
258                     client->stream_pos == 0) {
259                         construct_header(client);
260                 }
261         }
262 }
263         
264 void Server::set_mark_pool(int stream_index, MarkPool *mark_pool)
265 {
266         MutexLock lock(&mutex);
267         assert(clients.empty());
268         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
269         streams[stream_index]->mark_pool = mark_pool;
270 }
271
272 void Server::add_data_deferred(int stream_index, const char *data, size_t bytes)
273 {
274         MutexLock lock(&queued_data_mutex);
275         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
276         streams[stream_index]->add_data_deferred(data, bytes);
277 }
278
279 // See the .h file for postconditions after this function.      
280 void Server::process_client(Client *client)
281 {
282         switch (client->state) {
283         case Client::READING_REQUEST: {
284 read_request_again:
285                 // Try to read more of the request.
286                 char buf[1024];
287                 int ret;
288                 do {
289                         ret = read(client->sock, buf, sizeof(buf));
290                 } while (ret == -1 && errno == EINTR);
291
292                 if (ret == -1 && errno == EAGAIN) {
293                         // No more data right now. Nothing to do.
294                         // This is postcondition #2.
295                         return;
296                 }
297                 if (ret == -1) {
298                         log_perror("read");
299                         close_client(client);
300                         return;
301                 }
302                 if (ret == 0) {
303                         // OK, the socket is closed.
304                         close_client(client);
305                         return;
306                 }
307
308                 RequestParseStatus status = wait_for_double_newline(&client->request, buf, ret);
309         
310                 switch (status) {
311                 case RP_OUT_OF_SPACE:
312                         log(WARNING, "[%s] Client sent overlong request!", client->remote_addr.c_str());
313                         close_client(client);
314                         return;
315                 case RP_NOT_FINISHED_YET:
316                         // OK, we don't have the entire header yet. Fine; we'll get it later.
317                         // See if there's more data for us.
318                         goto read_request_again;
319                 case RP_EXTRA_DATA:
320                         log(WARNING, "[%s] Junk data after request!", client->remote_addr.c_str());
321                         close_client(client);
322                         return;
323                 case RP_FINISHED:
324                         break;
325                 }
326
327                 assert(status == RP_FINISHED);
328
329                 int error_code = parse_request(client);
330                 if (error_code == 200) {
331                         construct_header(client);
332                 } else {
333                         construct_error(client, error_code);
334                 }
335
336                 // We've changed states, so fall through.
337                 assert(client->state == Client::SENDING_ERROR ||
338                        client->state == Client::SENDING_HEADER);
339         }
340         case Client::SENDING_ERROR:
341         case Client::SENDING_HEADER: {
342 sending_header_or_error_again:
343                 int ret;
344                 do {
345                         ret = write(client->sock,
346                                     client->header_or_error.data() + client->header_or_error_bytes_sent,
347                                     client->header_or_error.size() - client->header_or_error_bytes_sent);
348                 } while (ret == -1 && errno == EINTR);
349
350                 if (ret == -1 && errno == EAGAIN) {
351                         // We're out of socket space, so now we're at the “low edge” of epoll's
352                         // edge triggering. epoll will tell us when there is more room, so for now,
353                         // just return.
354                         // This is postcondition #4.
355                         return;
356                 }
357
358                 if (ret == -1) {
359                         // Error! Postcondition #1.
360                         log_perror("write");
361                         close_client(client);
362                         return;
363                 }
364                 
365                 client->header_or_error_bytes_sent += ret;
366                 assert(client->header_or_error_bytes_sent <= client->header_or_error.size());
367
368                 if (client->header_or_error_bytes_sent < client->header_or_error.size()) {
369                         // We haven't sent all yet. Fine; go another round.
370                         goto sending_header_or_error_again;
371                 }
372
373                 // We're done sending the header or error! Clear it to release some memory.
374                 client->header_or_error.clear();
375
376                 if (client->state == Client::SENDING_ERROR) {
377                         // We're done sending the error, so now close.  
378                         // This is postcondition #1.
379                         close_client(client);
380                         return;
381                 }
382
383                 // Start sending from the end. In other words, we won't send any of the backlog,
384                 // but we'll start sending immediately as we get data.
385                 // This is postcondition #3.
386                 client->state = Client::SENDING_DATA;
387                 client->stream_pos = client->stream->bytes_received;
388                 client->stream->put_client_to_sleep(client);
389                 return;
390         }
391         case Client::SENDING_DATA: {
392                 skip_lost_data(client);
393                 Stream *stream = client->stream;
394
395 sending_data_again:
396                 size_t bytes_to_send = stream->bytes_received - client->stream_pos;
397                 assert(bytes_to_send <= stream->backlog_size);
398                 if (bytes_to_send == 0) {
399                         return;
400                 }
401
402                 // See if we need to split across the circular buffer.
403                 bool more_data = false;
404                 if ((client->stream_pos % stream->backlog_size) + bytes_to_send > stream->backlog_size) {
405                         bytes_to_send = stream->backlog_size - (client->stream_pos % stream->backlog_size);
406                         more_data = true;
407                 }
408
409                 ssize_t ret;
410                 do {
411                         loff_t offset = client->stream_pos % stream->backlog_size;
412                         ret = sendfile(client->sock, stream->data_fd, &offset, bytes_to_send);
413                 } while (ret == -1 && errno == EINTR);
414
415                 if (ret == -1 && errno == EAGAIN) {
416                         // We're out of socket space, so return; epoll will wake us up
417                         // when there is more room.
418                         // This is postcondition #4.
419                         return;
420                 }
421                 if (ret == -1) {
422                         // Error, close; postcondition #1.
423                         log_perror("sendfile");
424                         close_client(client);
425                         return;
426                 }
427                 client->stream_pos += ret;
428                 client->bytes_sent += ret;
429
430                 if (client->stream_pos == stream->bytes_received) {
431                         // We don't have any more data for this client, so put it to sleep.
432                         // This is postcondition #3.
433                         stream->put_client_to_sleep(client);
434                 } else if (more_data && size_t(ret) == bytes_to_send) {
435                         goto sending_data_again;
436                 }
437                 break;
438         }
439         default:
440                 assert(false);
441         }
442 }
443
444 // See if there's some data we've lost. Ideally, we should drop to a block boundary,
445 // but resync will be the mux's problem.
446 void Server::skip_lost_data(Client *client)
447 {
448         Stream *stream = client->stream;
449         size_t bytes_to_send = stream->bytes_received - client->stream_pos;
450         if (bytes_to_send > stream->backlog_size) {
451                 size_t bytes_lost = bytes_to_send - stream->backlog_size;
452                 client->stream_pos = stream->bytes_received - stream->backlog_size;
453                 client->bytes_lost += bytes_lost;
454                 ++client->num_loss_events;
455
456                 double loss_fraction = double(client->bytes_lost) / double(client->bytes_lost + client->bytes_sent);
457                 log(WARNING, "[%s] Client lost %lld bytes (total loss: %.2f%%), maybe too slow connection",
458                         client->remote_addr.c_str(),
459                         (long long int)(bytes_lost),
460                         100.0 * loss_fraction);
461         }
462 }
463
464 int Server::parse_request(Client *client)
465 {
466         vector<string> lines = split_lines(client->request);
467         if (lines.empty()) {
468                 return 400;  // Bad request (empty).
469         }
470
471         vector<string> request_tokens = split_tokens(lines[0]);
472         if (request_tokens.size() < 2) {
473                 return 400;  // Bad request (empty).
474         }
475         if (request_tokens[0] != "GET") {
476                 return 400;  // Should maybe be 405 instead?
477         }
478
479         map<string, int>::const_iterator url_map_it = url_map.find(request_tokens[1]);
480         if (url_map_it == url_map.end()) {
481                 return 404;  // Not found.
482         }
483
484         client->url = request_tokens[1];
485         client->stream = streams[url_map_it->second];
486         if (client->stream->mark_pool != NULL) {
487                 client->fwmark = client->stream->mark_pool->get_mark();
488         } else {
489                 client->fwmark = 0;  // No mark.
490         }
491         if (setsockopt(client->sock, SOL_SOCKET, SO_MARK, &client->fwmark, sizeof(client->fwmark)) == -1) {                          
492                 if (client->fwmark != 0) {
493                         log_perror("setsockopt(SO_MARK)");
494                 }
495         }
496         client->request.clear();
497
498         return 200;  // OK!
499 }
500
501 void Server::construct_header(Client *client)
502 {
503         Stream *stream = client->stream;
504         if (stream->encoding == Stream::STREAM_ENCODING_RAW) {
505                 client->header_or_error = stream->http_header +
506                         "\r\n" +
507                         stream->stream_header;
508         } else if (stream->encoding == Stream::STREAM_ENCODING_METACUBE) {
509                 client->header_or_error = stream->http_header +
510                         "Content-encoding: metacube\r\n" +
511                         "\r\n";
512                 if (!stream->stream_header.empty()) {
513                         metacube_block_header hdr;
514                         memcpy(hdr.sync, METACUBE_SYNC, sizeof(hdr.sync));
515                         hdr.size = htonl(stream->stream_header.size());
516                         hdr.flags = htonl(METACUBE_FLAGS_HEADER);
517                         client->header_or_error.append(
518                                 string(reinterpret_cast<char *>(&hdr), sizeof(hdr)));
519                 }
520                 client->header_or_error.append(stream->stream_header);
521         } else {
522                 assert(false);
523         }
524
525         // Switch states.
526         client->state = Client::SENDING_HEADER;
527
528         epoll_event ev;
529         ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
530         ev.data.u64 = reinterpret_cast<uint64_t>(client);
531
532         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
533                 log_perror("epoll_ctl(EPOLL_CTL_MOD)");
534                 exit(1);
535         }
536 }
537         
538 void Server::construct_error(Client *client, int error_code)
539 {
540         char error[256];
541         snprintf(error, 256, "HTTP/1.0 %d Error\r\nContent-type: text/plain\r\n\r\nSomething went wrong. Sorry.\r\n",
542                 error_code);
543         client->header_or_error = error;
544
545         // Switch states.
546         client->state = Client::SENDING_ERROR;
547
548         epoll_event ev;
549         ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
550         ev.data.u64 = reinterpret_cast<uint64_t>(client);
551
552         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
553                 log_perror("epoll_ctl(EPOLL_CTL_MOD)");
554                 exit(1);
555         }
556 }
557
558 template<class T>
559 void delete_from(vector<T> *v, T elem)
560 {
561         typename vector<T>::iterator new_end = remove(v->begin(), v->end(), elem);
562         v->erase(new_end, v->end());
563 }
564         
565 void Server::close_client(Client *client)
566 {
567         if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, NULL) == -1) {
568                 log_perror("epoll_ctl(EPOLL_CTL_DEL)");
569                 exit(1);
570         }
571
572         // This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
573         if (client->stream != NULL) {
574                 delete_from(&client->stream->sleeping_clients, client);
575                 delete_from(&client->stream->to_process, client);
576                 if (client->stream->mark_pool != NULL) {
577                         int fwmark = client->fwmark;
578                         client->stream->mark_pool->release_mark(fwmark);
579                 }
580         }
581
582         // Log to access_log.
583         access_log->write(client->get_stats());
584
585         // Bye-bye!
586         safe_close(client->sock);
587
588         clients.erase(client->sock);
589 }
590         
591 void Server::process_queued_data()
592 {
593         MutexLock lock(&queued_data_mutex);
594
595         for (size_t i = 0; i < queued_add_clients.size(); ++i) {
596                 add_client(queued_add_clients[i]);
597         }
598         queued_add_clients.clear();     
599
600         for (size_t i = 0; i < streams.size(); ++i) {   
601                 streams[i]->process_queued_data();
602         }
603 }