]> git.sesse.net Git - cubemap/blob - server.cpp
Fix O_TMPFILE usage.
[cubemap] / server.cpp
1 #include <assert.h>
2 #include <errno.h>
3 #include <netinet/in.h>
4 #include <pthread.h>
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <sys/epoll.h>
10 #include <sys/sendfile.h>
11 #include <sys/socket.h>
12 #include <sys/types.h>
13 #include <unistd.h>
14 #include <algorithm>
15 #include <map>
16 #include <string>
17 #include <utility>
18 #include <vector>
19
20 #include "accesslog.h"
21 #include "log.h"
22 #include "metacube2.h"
23 #include "mutexlock.h"
24 #include "parse.h"
25 #include "server.h"
26 #include "state.pb.h"
27 #include "stream.h"
28 #include "util.h"
29
30 #ifndef SO_MAX_PACING_RATE
31 #define SO_MAX_PACING_RATE 47
32 #endif
33
34 using namespace std;
35
36 extern AccessLogThread *access_log;
37
38 Server::Server()
39 {
40         pthread_mutex_init(&mutex, NULL);
41         pthread_mutex_init(&queued_clients_mutex, NULL);
42
43         epoll_fd = epoll_create(1024);  // Size argument is ignored.
44         if (epoll_fd == -1) {
45                 log_perror("epoll_fd");
46                 exit(1);
47         }
48 }
49
50 Server::~Server()
51 {
52         for (size_t i = 0; i < streams.size(); ++i) {   
53                 delete streams[i];
54         }
55
56         safe_close(epoll_fd);
57 }
58
59 vector<ClientStats> Server::get_client_stats() const
60 {
61         vector<ClientStats> ret;
62
63         MutexLock lock(&mutex);
64         for (map<int, Client>::const_iterator client_it = clients.begin();
65              client_it != clients.end();
66              ++client_it) {
67                 ret.push_back(client_it->second.get_stats());
68         }
69         return ret;
70 }
71
72 void Server::do_work()
73 {
74         while (!should_stop()) {
75                 // Wait until there's activity on at least one of the fds,
76                 // or 20 ms (about one frame at 50 fps) has elapsed.
77                 //
78                 // We could in theory wait forever and rely on wakeup()
79                 // from add_client_deferred() and add_data_deferred(),
80                 // but wakeup is a pretty expensive operation, and the
81                 // two threads might end up fighting over a lock, so it's
82                 // seemingly (much) more efficient to just have a timeout here.
83                 int nfds = epoll_pwait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS, &sigset_without_usr1_block);
84                 if (nfds == -1 && errno != EINTR) {
85                         log_perror("epoll_wait");
86                         exit(1);
87                 }
88
89                 MutexLock lock(&mutex);  // We release the mutex between iterations.
90         
91                 process_queued_data();
92
93                 for (int i = 0; i < nfds; ++i) {
94                         Client *client = reinterpret_cast<Client *>(events[i].data.u64);
95
96                         if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
97                                 close_client(client);
98                                 continue;
99                         }
100
101                         process_client(client);
102                 }
103
104                 for (size_t i = 0; i < streams.size(); ++i) {   
105                         vector<Client *> to_process;
106                         swap(streams[i]->to_process, to_process);
107                         for (size_t i = 0; i < to_process.size(); ++i) {
108                                 process_client(to_process[i]);
109                         }
110                 }
111         }
112 }
113
114 CubemapStateProto Server::serialize()
115 {
116         // We don't serialize anything queued, so empty the queues.
117         process_queued_data();
118
119         // Set all clients in a consistent state before serializing
120         // (ie., they have no remaining lost data). Otherwise, increasing
121         // the backlog could take clients into a newly valid area of the backlog,
122         // sending a stream of zeros instead of skipping the data as it should.
123         //
124         // TODO: Do this when clients are added back from serialized state instead;
125         // it would probably be less wasteful.
126         for (map<int, Client>::iterator client_it = clients.begin();
127              client_it != clients.end();
128              ++client_it) {
129                 skip_lost_data(&client_it->second);
130         }
131
132         CubemapStateProto serialized;
133         for (map<int, Client>::const_iterator client_it = clients.begin();
134              client_it != clients.end();
135              ++client_it) {
136                 serialized.add_clients()->MergeFrom(client_it->second.serialize());
137         }
138         for (size_t i = 0; i < streams.size(); ++i) {   
139                 serialized.add_streams()->MergeFrom(streams[i]->serialize());
140         }
141         return serialized;
142 }
143
144 void Server::add_client_deferred(int sock)
145 {
146         MutexLock lock(&queued_clients_mutex);
147         queued_add_clients.push_back(sock);
148 }
149
150 void Server::add_client(int sock)
151 {
152         pair<map<int, Client>::iterator, bool> ret =
153                 clients.insert(make_pair(sock, Client(sock)));
154         assert(ret.second == true);  // Should not already exist.
155         Client *client_ptr = &ret.first->second;
156
157         // Start listening on data from this socket.
158         epoll_event ev;
159         ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
160         ev.data.u64 = reinterpret_cast<uint64_t>(client_ptr);
161         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
162                 log_perror("epoll_ctl(EPOLL_CTL_ADD)");
163                 exit(1);
164         }
165
166         process_client(client_ptr);
167 }
168
169 void Server::add_client_from_serialized(const ClientProto &client)
170 {
171         MutexLock lock(&mutex);
172         Stream *stream;
173         int stream_index = lookup_stream_by_url(client.url());
174         if (stream_index == -1) {
175                 assert(client.state() != Client::SENDING_DATA);
176                 stream = NULL;
177         } else {
178                 stream = streams[stream_index];
179         }
180         pair<map<int, Client>::iterator, bool> ret =
181                 clients.insert(make_pair(client.sock(), Client(client, stream)));
182         assert(ret.second == true);  // Should not already exist.
183         Client *client_ptr = &ret.first->second;
184
185         // Start listening on data from this socket.
186         epoll_event ev;
187         if (client.state() == Client::READING_REQUEST) {
188                 ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
189         } else {
190                 // If we don't have more data for this client, we'll be putting it into
191                 // the sleeping array again soon.
192                 ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
193         }
194         ev.data.u64 = reinterpret_cast<uint64_t>(client_ptr);
195         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client.sock(), &ev) == -1) {
196                 log_perror("epoll_ctl(EPOLL_CTL_ADD)");
197                 exit(1);
198         }
199
200         if (client_ptr->state == Client::WAITING_FOR_KEYFRAME ||
201             (client_ptr->state == Client::SENDING_DATA &&
202              client_ptr->stream_pos == client_ptr->stream->bytes_received)) {
203                 client_ptr->stream->put_client_to_sleep(client_ptr);
204         } else {
205                 process_client(client_ptr);
206         }
207 }
208
209 int Server::lookup_stream_by_url(const std::string &url) const
210 {
211         map<string, int>::const_iterator url_it = url_map.find(url);
212         if (url_it == url_map.end()) {
213                 return -1;
214         }
215         return url_it->second;
216 }
217
218 int Server::add_stream(const string &url, size_t backlog_size, Stream::Encoding encoding)
219 {
220         MutexLock lock(&mutex);
221         url_map.insert(make_pair(url, streams.size()));
222         streams.push_back(new Stream(url, backlog_size, encoding));
223         return streams.size() - 1;
224 }
225
226 int Server::add_stream_from_serialized(const StreamProto &stream, int data_fd)
227 {
228         MutexLock lock(&mutex);
229         url_map.insert(make_pair(stream.url(), streams.size()));
230         streams.push_back(new Stream(stream, data_fd));
231         return streams.size() - 1;
232 }
233         
234 void Server::set_backlog_size(int stream_index, size_t new_size)
235 {
236         MutexLock lock(&mutex);
237         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
238         streams[stream_index]->set_backlog_size(new_size);
239 }
240         
241 void Server::set_encoding(int stream_index, Stream::Encoding encoding)
242 {
243         MutexLock lock(&mutex);
244         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
245         streams[stream_index]->encoding = encoding;
246 }
247         
248 void Server::set_header(int stream_index, const string &http_header, const string &stream_header)
249 {
250         MutexLock lock(&mutex);
251         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
252         streams[stream_index]->http_header = http_header;
253         streams[stream_index]->stream_header = stream_header;
254 }
255         
256 void Server::set_pacing_rate(int stream_index, uint32_t pacing_rate)
257 {
258         MutexLock lock(&mutex);
259         assert(clients.empty());
260         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
261         streams[stream_index]->pacing_rate = pacing_rate;
262 }
263
264 void Server::add_data_deferred(int stream_index, const char *data, size_t bytes, StreamStartSuitability suitable_for_stream_start)
265 {
266         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
267         streams[stream_index]->add_data_deferred(data, bytes, suitable_for_stream_start);
268 }
269
270 // See the .h file for postconditions after this function.      
271 void Server::process_client(Client *client)
272 {
273         switch (client->state) {
274         case Client::READING_REQUEST: {
275 read_request_again:
276                 // Try to read more of the request.
277                 char buf[1024];
278                 int ret;
279                 do {
280                         ret = read(client->sock, buf, sizeof(buf));
281                 } while (ret == -1 && errno == EINTR);
282
283                 if (ret == -1 && errno == EAGAIN) {
284                         // No more data right now. Nothing to do.
285                         // This is postcondition #2.
286                         return;
287                 }
288                 if (ret == -1) {
289                         log_perror("read");
290                         close_client(client);
291                         return;
292                 }
293                 if (ret == 0) {
294                         // OK, the socket is closed.
295                         close_client(client);
296                         return;
297                 }
298
299                 RequestParseStatus status = wait_for_double_newline(&client->request, buf, ret);
300         
301                 switch (status) {
302                 case RP_OUT_OF_SPACE:
303                         log(WARNING, "[%s] Client sent overlong request!", client->remote_addr.c_str());
304                         close_client(client);
305                         return;
306                 case RP_NOT_FINISHED_YET:
307                         // OK, we don't have the entire header yet. Fine; we'll get it later.
308                         // See if there's more data for us.
309                         goto read_request_again;
310                 case RP_EXTRA_DATA:
311                         log(WARNING, "[%s] Junk data after request!", client->remote_addr.c_str());
312                         close_client(client);
313                         return;
314                 case RP_FINISHED:
315                         break;
316                 }
317
318                 assert(status == RP_FINISHED);
319
320                 int error_code = parse_request(client);
321                 if (error_code == 200) {
322                         construct_header(client);
323                 } else {
324                         construct_error(client, error_code);
325                 }
326
327                 // We've changed states, so fall through.
328                 assert(client->state == Client::SENDING_ERROR ||
329                        client->state == Client::SENDING_HEADER);
330         }
331         case Client::SENDING_ERROR:
332         case Client::SENDING_HEADER: {
333 sending_header_or_error_again:
334                 int ret;
335                 do {
336                         ret = write(client->sock,
337                                     client->header_or_error.data() + client->header_or_error_bytes_sent,
338                                     client->header_or_error.size() - client->header_or_error_bytes_sent);
339                 } while (ret == -1 && errno == EINTR);
340
341                 if (ret == -1 && errno == EAGAIN) {
342                         // We're out of socket space, so now we're at the “low edge” of epoll's
343                         // edge triggering. epoll will tell us when there is more room, so for now,
344                         // just return.
345                         // This is postcondition #4.
346                         return;
347                 }
348
349                 if (ret == -1) {
350                         // Error! Postcondition #1.
351                         log_perror("write");
352                         close_client(client);
353                         return;
354                 }
355                 
356                 client->header_or_error_bytes_sent += ret;
357                 assert(client->header_or_error_bytes_sent <= client->header_or_error.size());
358
359                 if (client->header_or_error_bytes_sent < client->header_or_error.size()) {
360                         // We haven't sent all yet. Fine; go another round.
361                         goto sending_header_or_error_again;
362                 }
363
364                 // We're done sending the header or error! Clear it to release some memory.
365                 client->header_or_error.clear();
366
367                 if (client->state == Client::SENDING_ERROR) {
368                         // We're done sending the error, so now close.  
369                         // This is postcondition #1.
370                         close_client(client);
371                         return;
372                 }
373
374                 // Start sending from the first keyframe we get. In other
375                 // words, we won't send any of the backlog, but we'll start
376                 // sending immediately as we get the next keyframe block.
377                 // This is postcondition #3.
378                 if (client->stream_pos == size_t(-2)) {
379                         client->stream_pos = std::min<size_t>(
380                             client->stream->bytes_received - client->stream->backlog_size,
381                             0);
382                         client->state = Client::SENDING_DATA;
383                 } else {
384                         // client->stream_pos should be -1, but it might not be,
385                         // if we have clients from an older version.
386                         client->stream_pos = client->stream->bytes_received;
387                         client->state = Client::WAITING_FOR_KEYFRAME;
388                 }
389                 client->stream->put_client_to_sleep(client);
390                 return;
391         }
392         case Client::WAITING_FOR_KEYFRAME: {
393                 Stream *stream = client->stream;
394                 if (ssize_t(client->stream_pos) > stream->last_suitable_starting_point) {
395                         // We haven't received a keyframe since this stream started waiting,
396                         // so keep on waiting for one.
397                         // This is postcondition #3.
398                         stream->put_client_to_sleep(client);
399                         return;
400                 }
401                 client->stream_pos = stream->last_suitable_starting_point;
402                 client->state = Client::SENDING_DATA;
403                 // Fall through.
404         }
405         case Client::SENDING_DATA: {
406                 skip_lost_data(client);
407                 Stream *stream = client->stream;
408
409 sending_data_again:
410                 size_t bytes_to_send = stream->bytes_received - client->stream_pos;
411                 assert(bytes_to_send <= stream->backlog_size);
412                 if (bytes_to_send == 0) {
413                         return;
414                 }
415
416                 // See if we need to split across the circular buffer.
417                 bool more_data = false;
418                 if ((client->stream_pos % stream->backlog_size) + bytes_to_send > stream->backlog_size) {
419                         bytes_to_send = stream->backlog_size - (client->stream_pos % stream->backlog_size);
420                         more_data = true;
421                 }
422
423                 ssize_t ret;
424                 do {
425                         off_t offset = client->stream_pos % stream->backlog_size;
426                         ret = sendfile(client->sock, stream->data_fd, &offset, bytes_to_send);
427                 } while (ret == -1 && errno == EINTR);
428
429                 if (ret == -1 && errno == EAGAIN) {
430                         // We're out of socket space, so return; epoll will wake us up
431                         // when there is more room.
432                         // This is postcondition #4.
433                         return;
434                 }
435                 if (ret == -1) {
436                         // Error, close; postcondition #1.
437                         log_perror("sendfile");
438                         close_client(client);
439                         return;
440                 }
441                 client->stream_pos += ret;
442                 client->bytes_sent += ret;
443
444                 if (client->stream_pos == stream->bytes_received) {
445                         // We don't have any more data for this client, so put it to sleep.
446                         // This is postcondition #3.
447                         stream->put_client_to_sleep(client);
448                 } else if (more_data && size_t(ret) == bytes_to_send) {
449                         goto sending_data_again;
450                 }
451                 break;
452         }
453         default:
454                 assert(false);
455         }
456 }
457
458 // See if there's some data we've lost. Ideally, we should drop to a block boundary,
459 // but resync will be the mux's problem.
460 void Server::skip_lost_data(Client *client)
461 {
462         Stream *stream = client->stream;
463         if (stream == NULL) {
464                 return;
465         }
466         size_t bytes_to_send = stream->bytes_received - client->stream_pos;
467         if (bytes_to_send > stream->backlog_size) {
468                 size_t bytes_lost = bytes_to_send - stream->backlog_size;
469                 client->stream_pos = stream->bytes_received - stream->backlog_size;
470                 client->bytes_lost += bytes_lost;
471                 ++client->num_loss_events;
472         }
473 }
474
475 int Server::parse_request(Client *client)
476 {
477         vector<string> lines = split_lines(client->request);
478         if (lines.empty()) {
479                 return 400;  // Bad request (empty).
480         }
481
482         vector<string> request_tokens = split_tokens(lines[0]);
483         if (request_tokens.size() < 2) {
484                 return 400;  // Bad request (empty).
485         }
486         if (request_tokens[0] != "GET") {
487                 return 400;  // Should maybe be 405 instead?
488         }
489
490         string url = request_tokens[1];
491         if (url.find("?backlog") == url.size() - 8) {
492                 client->stream_pos = -2;
493                 url = url.substr(0, url.size() - 8);
494         } else {
495                 client->stream_pos = -1;
496         }
497
498         map<string, int>::const_iterator url_map_it = url_map.find(url);
499         if (url_map_it == url_map.end()) {
500                 return 404;  // Not found.
501         }
502
503         Stream *stream = streams[url_map_it->second];
504         if (stream->http_header.empty()) {
505                 return 503;  // Service unavailable.
506         }
507
508         client->url = request_tokens[1];
509         client->stream = stream;
510         if (setsockopt(client->sock, SOL_SOCKET, SO_MAX_PACING_RATE, &client->stream->pacing_rate, sizeof(client->stream->pacing_rate)) == -1) {
511                 if (client->stream->pacing_rate != ~0U) {
512                         log_perror("setsockopt(SO_MAX_PACING_RATE)");
513                 }
514         }
515         client->request.clear();
516
517         return 200;  // OK!
518 }
519
520 void Server::construct_header(Client *client)
521 {
522         Stream *stream = client->stream;
523         if (stream->encoding == Stream::STREAM_ENCODING_RAW) {
524                 client->header_or_error = stream->http_header +
525                         "\r\n" +
526                         stream->stream_header;
527         } else if (stream->encoding == Stream::STREAM_ENCODING_METACUBE) {
528                 client->header_or_error = stream->http_header +
529                         "Content-encoding: metacube\r\n" +
530                         "\r\n";
531                 if (!stream->stream_header.empty()) {
532                         metacube2_block_header hdr;
533                         memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync));
534                         hdr.size = htonl(stream->stream_header.size());
535                         hdr.flags = htons(METACUBE_FLAGS_HEADER);
536                         hdr.csum = htons(metacube2_compute_crc(&hdr));
537                         client->header_or_error.append(
538                                 string(reinterpret_cast<char *>(&hdr), sizeof(hdr)));
539                 }
540                 client->header_or_error.append(stream->stream_header);
541         } else {
542                 assert(false);
543         }
544
545         // Switch states.
546         client->state = Client::SENDING_HEADER;
547
548         epoll_event ev;
549         ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
550         ev.data.u64 = reinterpret_cast<uint64_t>(client);
551
552         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
553                 log_perror("epoll_ctl(EPOLL_CTL_MOD)");
554                 exit(1);
555         }
556 }
557         
558 void Server::construct_error(Client *client, int error_code)
559 {
560         char error[256];
561         snprintf(error, 256, "HTTP/1.0 %d Error\r\nContent-type: text/plain\r\n\r\nSomething went wrong. Sorry.\r\n",
562                 error_code);
563         client->header_or_error = error;
564
565         // Switch states.
566         client->state = Client::SENDING_ERROR;
567
568         epoll_event ev;
569         ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
570         ev.data.u64 = reinterpret_cast<uint64_t>(client);
571
572         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
573                 log_perror("epoll_ctl(EPOLL_CTL_MOD)");
574                 exit(1);
575         }
576 }
577
578 template<class T>
579 void delete_from(vector<T> *v, T elem)
580 {
581         typename vector<T>::iterator new_end = remove(v->begin(), v->end(), elem);
582         v->erase(new_end, v->end());
583 }
584         
585 void Server::close_client(Client *client)
586 {
587         if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, NULL) == -1) {
588                 log_perror("epoll_ctl(EPOLL_CTL_DEL)");
589                 exit(1);
590         }
591
592         // This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
593         if (client->stream != NULL) {
594                 delete_from(&client->stream->sleeping_clients, client);
595                 delete_from(&client->stream->to_process, client);
596         }
597
598         // Log to access_log.
599         access_log->write(client->get_stats());
600
601         // Bye-bye!
602         safe_close(client->sock);
603
604         clients.erase(client->sock);
605 }
606         
607 void Server::process_queued_data()
608 {
609         {
610                 MutexLock lock(&queued_clients_mutex);
611
612                 for (size_t i = 0; i < queued_add_clients.size(); ++i) {
613                         add_client(queued_add_clients[i]);
614                 }
615                 queued_add_clients.clear();
616         }
617
618         for (size_t i = 0; i < streams.size(); ++i) {   
619                 streams[i]->process_queued_data();
620         }
621 }