]> git.sesse.net Git - cubemap/blob - server.cpp
dfd5b97dffb4e67c38c8a585df77269686e873a1
[cubemap] / server.cpp
1 #include <assert.h>
2 #include <errno.h>
3 #include <pthread.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 #include <sys/epoll.h>
7 #include <sys/sendfile.h>
8 #include <sys/socket.h>
9 #include <sys/types.h>
10 #include <time.h>
11 #include <unistd.h>
12 #include <algorithm>
13 #include <map>
14 #include <string>
15 #include <utility>
16 #include <vector>
17
18 #include "markpool.h"
19 #include "mutexlock.h"
20 #include "parse.h"
21 #include "server.h"
22 #include "state.pb.h"
23 #include "stream.h"
24
25 using namespace std;
26
27 Server::Server()
28 {
29         pthread_mutex_init(&mutex, NULL);
30         pthread_mutex_init(&queued_data_mutex, NULL);
31
32         epoll_fd = epoll_create(1024);  // Size argument is ignored.
33         if (epoll_fd == -1) {
34                 perror("epoll_fd");
35                 exit(1);
36         }
37 }
38
39 Server::~Server()
40 {
41         int ret;
42         do {
43                 ret = close(epoll_fd);
44         } while (ret == -1 && errno == EINTR);
45
46         if (ret == -1) {
47                 perror("close(epoll_fd)");
48         }
49 }
50
51 vector<ClientStats> Server::get_client_stats() const
52 {
53         vector<ClientStats> ret;
54
55         MutexLock lock(&mutex);
56         for (map<int, Client>::const_iterator client_it = clients.begin();
57              client_it != clients.end();
58              ++client_it) {
59                 ret.push_back(client_it->second.get_stats());
60         }
61         return ret;
62 }
63
64 void Server::do_work()
65 {
66         for ( ;; ) {
67                 int nfds = epoll_wait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS);
68                 if (nfds == -1 && errno == EINTR) {
69                         if (should_stop) {
70                                 return;
71                         }
72                         continue;
73                 }
74                 if (nfds == -1) {
75                         perror("epoll_wait");
76                         exit(1);
77                 }
78
79                 MutexLock lock(&mutex);  // We release the mutex between iterations.
80         
81                 process_queued_data();
82
83                 for (int i = 0; i < nfds; ++i) {
84                         int fd = events[i].data.fd;
85                         assert(clients.count(fd) != 0);
86                         Client *client = &clients[fd];
87
88                         if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
89                                 close_client(client);
90                                 continue;
91                         }
92
93                         process_client(client);
94                 }
95
96                 for (map<string, Stream *>::iterator stream_it = streams.begin();
97                      stream_it != streams.end();
98                      ++stream_it) {
99                         vector<Client *> to_process;
100                         swap(stream_it->second->to_process, to_process);
101                         for (size_t i = 0; i < to_process.size(); ++i) {
102                                 process_client(to_process[i]);
103                         }
104                 }
105
106                 if (should_stop) {
107                         return;
108                 }
109         }
110 }
111
112 CubemapStateProto Server::serialize()
113 {
114         // We don't serialize anything queued, so empty the queues.
115         process_queued_data();
116
117         CubemapStateProto serialized;
118         for (map<int, Client>::const_iterator client_it = clients.begin();
119              client_it != clients.end();
120              ++client_it) {
121                 serialized.add_clients()->MergeFrom(client_it->second.serialize());
122         }
123         for (map<string, Stream *>::const_iterator stream_it = streams.begin();
124              stream_it != streams.end();
125              ++stream_it) {
126                 serialized.add_streams()->MergeFrom(stream_it->second->serialize());
127         }
128         return serialized;
129 }
130
131 void Server::add_client_deferred(int sock)
132 {
133         MutexLock lock(&queued_data_mutex);
134         queued_add_clients.push_back(sock);
135 }
136
137 void Server::add_client(int sock)
138 {
139         clients.insert(make_pair(sock, Client(sock)));
140
141         // Start listening on data from this socket.
142         epoll_event ev;
143         ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
144         ev.data.u64 = 0;  // Keep Valgrind happy.
145         ev.data.fd = sock;
146         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
147                 perror("epoll_ctl(EPOLL_CTL_ADD)");
148                 exit(1);
149         }
150
151         process_client(&clients[sock]);
152 }
153
154 void Server::add_client_from_serialized(const ClientProto &client)
155 {
156         MutexLock lock(&mutex);
157         Stream *stream = find_stream(client.stream_id());
158         clients.insert(make_pair(client.sock(), Client(client, stream)));
159         Client *client_ptr = &clients[client.sock()];
160
161         // Start listening on data from this socket.
162         epoll_event ev;
163         if (client.state() == Client::READING_REQUEST) {
164                 ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
165         } else {
166                 // If we don't have more data for this client, we'll be putting it into
167                 // the sleeping array again soon.
168                 ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
169         }
170         ev.data.u64 = 0;  // Keep Valgrind happy.
171         ev.data.fd = client.sock();
172         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client.sock(), &ev) == -1) {
173                 perror("epoll_ctl(EPOLL_CTL_ADD)");
174                 exit(1);
175         }
176
177         if (client_ptr->state == Client::SENDING_DATA && 
178             client_ptr->bytes_sent == client_ptr->stream->bytes_received) {
179                 client_ptr->stream->put_client_to_sleep(client_ptr);
180         } else {
181                 process_client(client_ptr);
182         }
183 }
184
185 void Server::add_stream(const string &stream_id, size_t backlog_size)
186 {
187         MutexLock lock(&mutex);
188         streams.insert(make_pair(stream_id, new Stream(stream_id, backlog_size)));
189 }
190
191 void Server::add_stream_from_serialized(const StreamProto &stream)
192 {
193         MutexLock lock(&mutex);
194         streams.insert(make_pair(stream.stream_id(), new Stream(stream)));
195 }
196         
197 void Server::set_header(const string &stream_id, const string &header)
198 {
199         MutexLock lock(&mutex);
200         find_stream(stream_id)->header = header;
201
202         // If there are clients we haven't sent anything to yet, we should give
203         // them the header, so push back into the SENDING_HEADER state.
204         for (map<int, Client>::iterator client_it = clients.begin();
205              client_it != clients.end();
206              ++client_it) {
207                 Client *client = &client_it->second;
208                 if (client->state == Client::SENDING_DATA &&
209                     client->bytes_sent == 0) {
210                         construct_header(client);
211                 }
212         }
213 }
214         
215 void Server::set_mark_pool(const std::string &stream_id, MarkPool *mark_pool)
216 {
217         MutexLock lock(&mutex);
218         assert(clients.empty());
219         find_stream(stream_id)->mark_pool = mark_pool;
220 }
221
222 void Server::add_data_deferred(const string &stream_id, const char *data, size_t bytes)
223 {
224         MutexLock lock(&queued_data_mutex);
225         queued_data[stream_id].append(string(data, data + bytes));
226 }
227
228 void Server::add_data(const string &stream_id, const char *data, ssize_t bytes)
229 {
230         Stream *stream = find_stream(stream_id);
231         size_t pos = stream->bytes_received % stream->backlog_size;
232         stream->bytes_received += bytes;
233
234         if (pos + bytes > stream->backlog_size) {
235                 ssize_t to_copy = stream->backlog_size - pos;
236                 while (to_copy > 0) {
237                         int ret = pwrite(stream->data_fd, data, to_copy, pos);
238                         if (ret == -1 && errno == EINTR) {
239                                 continue;
240                         }
241                         if (ret == -1) {
242                                 perror("pwrite");
243                                 // Dazed and confused, but trying to continue...
244                                 break;
245                         }
246                         pos += ret;
247                         data += ret;
248                         to_copy -= ret;
249                         bytes -= ret;
250                 }
251                 pos = 0;
252         }
253
254         while (bytes > 0) {
255                 int ret = pwrite(stream->data_fd, data, bytes, pos);
256                 if (ret == -1 && errno == EINTR) {
257                         continue;
258                 }
259                 if (ret == -1) {
260                         perror("pwrite");
261                         // Dazed and confused, but trying to continue...
262                         break;
263                 }
264                 pos += ret;
265                 data += ret;
266                 bytes -= ret;
267         }
268
269         stream->wake_up_all_clients();
270 }
271
272 // See the .h file for postconditions after this function.      
273 void Server::process_client(Client *client)
274 {
275         switch (client->state) {
276         case Client::READING_REQUEST: {
277 read_request_again:
278                 // Try to read more of the request.
279                 char buf[1024];
280                 int ret;
281                 do {
282                         ret = read(client->sock, buf, sizeof(buf));
283                 } while (ret == -1 && errno == EINTR);
284
285                 if (ret == -1 && errno == EAGAIN) {
286                         // No more data right now. Nothing to do.
287                         // This is postcondition #2.
288                         return;
289                 }
290                 if (ret == -1) {
291                         perror("read");
292                         close_client(client);
293                         return;
294                 }
295                 if (ret == 0) {
296                         // OK, the socket is closed.
297                         close_client(client);
298                         return;
299                 }
300
301                 RequestParseStatus status = wait_for_double_newline(&client->request, buf, ret);
302         
303                 switch (status) {
304                 case RP_OUT_OF_SPACE:
305                         fprintf(stderr, "WARNING: fd %d sent overlong request!\n", client->sock);
306                         close_client(client);
307                         return;
308                 case RP_NOT_FINISHED_YET:
309                         // OK, we don't have the entire header yet. Fine; we'll get it later.
310                         // See if there's more data for us.
311                         goto read_request_again;
312                 case RP_EXTRA_DATA:
313                         fprintf(stderr, "WARNING: fd %d had junk data after request!\n", client->sock);
314                         close_client(client);
315                         return;
316                 case RP_FINISHED:
317                         break;
318                 }
319
320                 assert(status == RP_FINISHED);
321
322                 int error_code = parse_request(client);
323                 if (error_code == 200) {
324                         construct_header(client);
325                 } else {
326                         construct_error(client, error_code);
327                 }
328
329                 // We've changed states, so fall through.
330                 assert(client->state == Client::SENDING_ERROR ||
331                        client->state == Client::SENDING_HEADER);
332         }
333         case Client::SENDING_ERROR:
334         case Client::SENDING_HEADER: {
335 sending_header_or_error_again:
336                 int ret;
337                 do {
338                         ret = write(client->sock,
339                                     client->header_or_error.data() + client->header_or_error_bytes_sent,
340                                     client->header_or_error.size() - client->header_or_error_bytes_sent);
341                 } while (ret == -1 && errno == EINTR);
342
343                 if (ret == -1 && errno == EAGAIN) {
344                         // We're out of socket space, so now we're at the “low edge” of epoll's
345                         // edge triggering. epoll will tell us when there is more room, so for now,
346                         // just return.
347                         // This is postcondition #4.
348                         return;
349                 }
350
351                 if (ret == -1) {
352                         // Error! Postcondition #1.
353                         perror("write");
354                         close_client(client);
355                         return;
356                 }
357                 
358                 client->header_or_error_bytes_sent += ret;
359                 assert(client->header_or_error_bytes_sent <= client->header_or_error.size());
360
361                 if (client->header_or_error_bytes_sent < client->header_or_error.size()) {
362                         // We haven't sent all yet. Fine; go another round.
363                         goto sending_header_or_error_again;
364                 }
365
366                 // We're done sending the header or error! Clear it to release some memory.
367                 client->header_or_error.clear();
368
369                 if (client->state == Client::SENDING_ERROR) {
370                         // We're done sending the error, so now close.  
371                         // This is postcondition #1.
372                         close_client(client);
373                         return;
374                 }
375
376                 // Start sending from the end. In other words, we won't send any of the backlog,
377                 // but we'll start sending immediately as we get data.
378                 // This is postcondition #3.
379                 client->state = Client::SENDING_DATA;
380                 client->bytes_sent = client->stream->bytes_received;
381                 client->stream->put_client_to_sleep(client);
382                 return;
383         }
384         case Client::SENDING_DATA: {
385 sending_data_again:
386                 // See if there's some data we've lost. Ideally, we should drop to a block boundary,
387                 // but resync will be the mux's problem.
388                 Stream *stream = client->stream;
389                 size_t bytes_to_send = stream->bytes_received - client->bytes_sent;
390                 if (bytes_to_send == 0) {
391                         return;
392                 }
393                 if (bytes_to_send > stream->backlog_size) {
394                         fprintf(stderr, "WARNING: fd %d lost %lld bytes, maybe too slow connection\n",
395                                 client->sock,
396                                 (long long int)(bytes_to_send - stream->backlog_size));
397                         client->bytes_sent = stream->bytes_received - stream->backlog_size;
398                         bytes_to_send = stream->backlog_size;
399                 }
400
401                 // See if we need to split across the circular buffer.
402                 bool more_data = false;
403                 if ((client->bytes_sent % stream->backlog_size) + bytes_to_send > stream->backlog_size) {
404                         bytes_to_send = stream->backlog_size - (client->bytes_sent % stream->backlog_size);
405                         more_data = true;
406                 }
407
408                 ssize_t ret;
409                 do {
410                         loff_t offset = client->bytes_sent % stream->backlog_size;
411                         ret = sendfile(client->sock, stream->data_fd, &offset, bytes_to_send);
412                 } while (ret == -1 && errno == EINTR);
413
414                 if (ret == -1 && errno == EAGAIN) {
415                         // We're out of socket space, so return; epoll will wake us up
416                         // when there is more room.
417                         // This is postcondition #4.
418                         return;
419                 }
420                 if (ret == -1) {
421                         // Error, close; postcondition #1.
422                         perror("sendfile");
423                         close_client(client);
424                         return;
425                 }
426                 client->bytes_sent += ret;
427
428                 if (client->bytes_sent == stream->bytes_received) {
429                         // We don't have any more data for this client, so put it to sleep.
430                         // This is postcondition #3.
431                         stream->put_client_to_sleep(client);
432                 } else if (more_data) {
433                         goto sending_data_again;
434                 }
435                 break;
436         }
437         default:
438                 assert(false);
439         }
440 }
441
442 int Server::parse_request(Client *client)
443 {
444         vector<string> lines = split_lines(client->request);
445         if (lines.empty()) {
446                 return 400;  // Bad request (empty).
447         }
448
449         vector<string> request_tokens = split_tokens(lines[0]);
450         if (request_tokens.size() < 2) {
451                 return 400;  // Bad request (empty).
452         }
453         if (request_tokens[0] != "GET") {
454                 return 400;  // Should maybe be 405 instead?
455         }
456         if (streams.count(request_tokens[1]) == 0) {
457                 return 404;  // Not found.
458         }
459
460         client->stream_id = request_tokens[1];
461         client->stream = find_stream(client->stream_id);
462         if (client->stream->mark_pool != NULL) {
463                 client->fwmark = client->stream->mark_pool->get_mark();
464         } else {
465                 client->fwmark = 0;  // No mark.
466         }
467         if (setsockopt(client->sock, SOL_SOCKET, SO_MARK, &client->fwmark, sizeof(client->fwmark)) == -1) {                          
468                 if (client->fwmark != 0) {
469                         perror("setsockopt(SO_MARK)");
470                 }
471         }
472         client->request.clear();
473
474         return 200;  // OK!
475 }
476
477 void Server::construct_header(Client *client)
478 {
479         client->header_or_error = find_stream(client->stream_id)->header;
480
481         // Switch states.
482         client->state = Client::SENDING_HEADER;
483
484         epoll_event ev;
485         ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
486         ev.data.u64 = 0;  // Keep Valgrind happy.
487         ev.data.fd = client->sock;
488
489         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
490                 perror("epoll_ctl(EPOLL_CTL_MOD)");
491                 exit(1);
492         }
493 }
494         
495 void Server::construct_error(Client *client, int error_code)
496 {
497         char error[256];
498         snprintf(error, 256, "HTTP/1.0 %d Error\r\nContent-type: text/plain\r\n\r\nSomething went wrong. Sorry.\r\n",
499                 error_code);
500         client->header_or_error = error;
501
502         // Switch states.
503         client->state = Client::SENDING_ERROR;
504
505         epoll_event ev;
506         ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
507         ev.data.u64 = 0;  // Keep Valgrind happy.
508         ev.data.fd = client->sock;
509
510         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
511                 perror("epoll_ctl(EPOLL_CTL_MOD)");
512                 exit(1);
513         }
514 }
515
516 template<class T>
517 void delete_from(vector<T> *v, T elem)
518 {
519         typename vector<T>::iterator new_end = remove(v->begin(), v->end(), elem);
520         v->erase(new_end, v->end());
521 }
522         
523 void Server::close_client(Client *client)
524 {
525         if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, NULL) == -1) {
526                 perror("epoll_ctl(EPOLL_CTL_DEL)");
527                 exit(1);
528         }
529
530         // This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
531         if (client->stream != NULL) {
532                 delete_from(&client->stream->sleeping_clients, client);
533                 delete_from(&client->stream->to_process, client);
534                 if (client->stream->mark_pool != NULL) {
535                         int fwmark = client->fwmark;
536                         client->stream->mark_pool->release_mark(fwmark);
537                 }
538         }
539
540         // Bye-bye!
541         int ret;
542         do {
543                 ret = close(client->sock);
544         } while (ret == -1 && errno == EINTR);
545
546         if (ret == -1) {
547                 perror("close");
548         }
549
550         clients.erase(client->sock);
551 }
552         
553 Stream *Server::find_stream(const string &stream_id)
554 {
555         map<string, Stream *>::iterator it = streams.find(stream_id);
556         assert(it != streams.end());
557         return it->second;
558 }
559
560 void Server::process_queued_data()
561 {
562         MutexLock lock(&queued_data_mutex);
563
564         for (size_t i = 0; i < queued_add_clients.size(); ++i) {
565                 add_client(queued_add_clients[i]);
566         }
567         queued_add_clients.clear();     
568         
569         for (map<string, string>::iterator queued_it = queued_data.begin();
570              queued_it != queued_data.end();
571              ++queued_it) {
572                 add_data(queued_it->first, queued_it->second.data(), queued_it->second.size());
573         }
574         queued_data.clear();
575 }