]> git.sesse.net Git - cubemap/blob - server.cpp
Move the logic of load-balancing etc. into ServerPool, which frees external users...
[cubemap] / server.cpp
1 #include <stdio.h>
2 #include <string.h>
3 #include <stdint.h>
4 #include <assert.h>
5 #include <arpa/inet.h>
6 #include <curl/curl.h>
7 #include <sys/socket.h>
8 #include <pthread.h>
9 #include <sys/types.h>
10 #include <sys/ioctl.h>
11 #include <sys/epoll.h>
12 #include <errno.h>
13 #include <vector>
14 #include <string>
15 #include <map>
16 #include <algorithm>
17
18 #include "metacube.h"
19 #include "server.h"
20 #include "mutexlock.h"
21 #include "state.pb.h"
22
23 using namespace std;
24
25 Client::Client(int sock)
26         : sock(sock),
27           state(Client::READING_REQUEST),
28           header_bytes_sent(0),
29           bytes_sent(0)
30 {
31         request.reserve(1024);
32 }
33         
34 Client::Client(const ClientProto &serialized)
35         : sock(serialized.sock()),
36           state(State(serialized.state())),
37           request(serialized.request()),
38           stream_id(serialized.stream_id()),
39           header(serialized.header()),
40           header_bytes_sent(serialized.header_bytes_sent()),
41           bytes_sent(serialized.bytes_sent())
42 {
43 }
44
45 ClientProto Client::serialize() const
46 {
47         ClientProto serialized;
48         serialized.set_sock(sock);
49         serialized.set_state(state);
50         serialized.set_request(request);
51         serialized.set_stream_id(stream_id);
52         serialized.set_header(header);
53         serialized.set_header_bytes_sent(serialized.header_bytes_sent());
54         serialized.set_bytes_sent(bytes_sent);
55         return serialized;
56 }
57
58 Stream::Stream(const string &stream_id)
59         : stream_id(stream_id),
60           data(new char[BACKLOG_SIZE]),
61           data_size(0)
62 {
63         memset(data, 0, BACKLOG_SIZE);
64 }
65
66 Stream::~Stream()
67 {
68         delete[] data;
69 }
70
71 Stream::Stream(const StreamProto &serialized)
72         : stream_id(serialized.stream_id()),
73           header(serialized.header()),
74           data(new char[BACKLOG_SIZE]),
75           data_size(serialized.data_size())
76 {
77         assert(serialized.data().size() == BACKLOG_SIZE);
78         memcpy(data, serialized.data().data(), BACKLOG_SIZE);
79 }
80
81 StreamProto Stream::serialize() const
82 {
83         StreamProto serialized;
84         serialized.set_header(header);
85         serialized.set_data(string(data, data + BACKLOG_SIZE));
86         serialized.set_data_size(data_size);
87         serialized.set_stream_id(stream_id);
88         return serialized;
89 }
90
91 Server::Server()
92 {
93         pthread_mutex_init(&mutex, NULL);
94
95         epoll_fd = epoll_create(1024);  // Size argument is ignored.
96         if (epoll_fd == -1) {
97                 perror("epoll_fd");
98                 exit(1);
99         }
100 }
101
102 Server::~Server()
103 {
104         close(epoll_fd);
105 }
106
107 void Server::run()
108 {
109         should_stop = false;
110         
111         // Joinable is already the default, but it's good to be certain.
112         pthread_attr_t attr;
113         pthread_attr_init(&attr);
114         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
115         pthread_create(&worker_thread, &attr, Server::do_work_thunk, this);
116 }
117         
118 void Server::stop()
119 {
120         {
121                 MutexLock lock(&mutex);
122                 should_stop = true;
123         }
124
125         if (pthread_join(worker_thread, NULL) == -1) {
126                 perror("pthread_join");
127                 exit(1);
128         }
129 }
130
131 void *Server::do_work_thunk(void *arg)
132 {
133         Server *server = static_cast<Server *>(arg);
134         server->do_work();
135         return NULL;
136 }
137
138 void Server::do_work()
139 {
140         for ( ;; ) {
141                 int nfds = epoll_wait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS);
142                 if (nfds == -1) {
143                         perror("epoll_wait");
144                         exit(1);
145                 }
146
147                 MutexLock lock(&mutex);  // We release the mutex between iterations.
148         
149                 if (should_stop) {
150                         return;
151                 }
152         
153                 for (int i = 0; i < nfds; ++i) {
154                         int fd = events[i].data.fd;
155                         assert(clients.count(fd) != 0);
156                         Client *client = &clients[fd];
157
158                         if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
159                                 close_client(client);
160                                 continue;
161                         }
162
163                         process_client(client);
164                 }
165         }
166 }
167
168 CubemapStateProto Server::serialize() const
169 {
170         CubemapStateProto serialized;
171         for (map<int, Client>::const_iterator client_it = clients.begin();
172              client_it != clients.end();
173              ++client_it) {
174                 serialized.add_clients()->MergeFrom(client_it->second.serialize());
175         }
176         for (map<string, Stream *>::const_iterator stream_it = streams.begin();
177              stream_it != streams.end();
178              ++stream_it) {
179                 serialized.add_streams()->MergeFrom(stream_it->second->serialize());
180         }
181         return serialized;
182 }
183
184 void Server::add_client(int sock)
185 {
186         MutexLock lock(&mutex);
187         clients.insert(make_pair(sock, Client(sock)));
188
189         // Start listening on data from this socket.
190         epoll_event ev;
191         ev.events = EPOLLIN | EPOLLRDHUP;
192         ev.data.u64 = 0;  // Keep Valgrind happy.
193         ev.data.fd = sock;
194         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
195                 perror("epoll_ctl(EPOLL_CTL_ADD)");
196                 exit(1);
197         }
198 }
199
200 void Server::add_client_from_serialized(const ClientProto &client)
201 {
202         MutexLock lock(&mutex);
203         clients.insert(make_pair(client.sock(), Client(client)));
204
205         // Start listening on data from this socket.
206         epoll_event ev;
207         if (client.state() == Client::READING_REQUEST) {
208                 ev.events = EPOLLIN | EPOLLRDHUP;
209         } else {
210                 // If we don't have more data for this client, we'll be putting it into
211                 // the sleeping array again soon.
212                 ev.events = EPOLLOUT | EPOLLRDHUP;
213         }
214         ev.data.u64 = 0;  // Keep Valgrind happy.
215         ev.data.fd = client.sock();
216         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client.sock(), &ev) == -1) {
217                 perror("epoll_ctl(EPOLL_CTL_ADD)");
218                 exit(1);
219         }
220 }
221
222 void Server::add_stream(const string &stream_id)
223 {
224         MutexLock lock(&mutex);
225         streams.insert(make_pair(stream_id, new Stream(stream_id)));
226 }
227
228 void Server::add_stream_from_serialized(const StreamProto &stream)
229 {
230         MutexLock lock(&mutex);
231         streams.insert(make_pair(stream.stream_id(), new Stream(stream)));
232 }
233         
234 void Server::set_header(const string &stream_id, const string &header)
235 {
236         MutexLock lock(&mutex);
237         find_stream(stream_id)->header = header;
238
239         // If there are clients we haven't sent anything to yet, we should give
240         // them the header, so push back into the SENDING_HEADER state.
241         for (map<int, Client>::iterator client_it = clients.begin();
242              client_it != clients.end();
243              ++client_it) {
244                 Client *client = &client_it->second;
245                 if (client->state == Client::SENDING_DATA &&
246                     client->bytes_sent == 0) {
247                         construct_header(client);
248                 }
249         }
250 }
251         
252 void Server::add_data(const string &stream_id, const char *data, size_t bytes)
253 {
254         if (bytes == 0) {
255                 return;
256         }
257
258         MutexLock lock(&mutex);
259         Stream *stream = find_stream(stream_id);
260         size_t pos = stream->data_size % BACKLOG_SIZE;
261         stream->data_size += bytes;
262
263         if (pos + bytes > BACKLOG_SIZE) {
264                 size_t to_copy = BACKLOG_SIZE - pos;
265                 memcpy(stream->data + pos, data, to_copy);
266                 data += to_copy;
267                 bytes -= to_copy;
268                 pos = 0;
269         }
270
271         memcpy(stream->data + pos, data, bytes);
272         wake_up_all_clients();
273 }
274         
275 void Server::process_client(Client *client)
276 {
277         switch (client->state) {
278         case Client::READING_REQUEST: {
279                 // Try to read more of the request.
280                 char buf[1024];
281                 int ret = read(client->sock, buf, sizeof(buf));
282                 if (ret == -1) {
283                         perror("read");
284                         close_client(client);
285                         return;
286                 }
287                 if (ret == 0) {
288                         // No data? This really means that we were triggered for something else than
289                         // POLLIN (which suggests a logic error in epoll).
290                         fprintf(stderr, "WARNING: fd %d returned unexpectedly 0 bytes!\n", client->sock);
291                         close_client(client);
292                         return;
293                 }
294
295                 // Guard against overlong requests gobbling up all of our space.
296                 if (client->request.size() + ret > MAX_CLIENT_REQUEST) {
297                         fprintf(stderr, "WARNING: fd %d sent overlong request!\n", client->sock);
298                         close_client(client);
299                         return;
300                 }       
301
302                 // See if we have \r\n\r\n anywhere in the request. We start three bytes
303                 // before what we just appended, in case we just got the final character.
304                 size_t existing_req_bytes = client->request.size();
305                 client->request.append(string(buf, buf + ret));
306         
307                 size_t start_at = (existing_req_bytes >= 3 ? existing_req_bytes - 3 : 0);
308                 const char *ptr = reinterpret_cast<char *>(
309                         memmem(client->request.data() + start_at, client->request.size() - start_at,
310                                "\r\n\r\n", 4));
311                 if (ptr == NULL) {
312                         // OK, we don't have the entire header yet. Fine; we'll get it later.
313                         return;
314                 }
315
316                 if (ptr != client->request.data() + client->request.size() - 4) {
317                         fprintf(stderr, "WARNING: fd %d had junk data after request!\n", client->sock);
318                         close_client(client);
319                         return;
320                 }
321
322                 parse_request(client);
323                 construct_header(client);
324                 break;
325         }
326         case Client::SENDING_HEADER: {
327                 int ret = write(client->sock,
328                                 client->header.data() + client->header_bytes_sent,
329                                 client->header.size() - client->header_bytes_sent);
330                 if (ret == -1) {
331                         perror("write");
332                         close_client(client);
333                         return;
334                 }
335                 
336                 client->header_bytes_sent += ret;
337                 assert(client->header_bytes_sent <= client->header.size());
338
339                 if (client->header_bytes_sent < client->header.size()) {
340                         // We haven't sent all yet. Fine; we'll do that later.
341                         return;
342                 }
343
344                 // We're done sending the header! Clear the entire header to release some memory.
345                 client->header.clear();
346
347                 // Start sending from the end. In other words, we won't send any of the backlog,
348                 // but we'll start sending immediately as we get data.
349                 client->state = Client::SENDING_DATA;
350                 client->bytes_sent = find_stream(client->stream_id)->data_size;
351                 break;
352         }
353         case Client::SENDING_DATA: {
354                 // See if there's some data we've lost. Ideally, we should drop to a block boundary,
355                 // but resync will be the mux's problem.
356                 const Stream &stream = *find_stream(client->stream_id);
357                 size_t bytes_to_send = stream.data_size - client->bytes_sent;
358                 if (bytes_to_send > BACKLOG_SIZE) {
359                         fprintf(stderr, "WARNING: fd %d lost %lld bytes, maybe too slow connection\n",
360                                 client->sock,
361                                 (long long int)(bytes_to_send - BACKLOG_SIZE));
362                         client->bytes_sent = find_stream(client->stream_id)->data_size - BACKLOG_SIZE;
363                         bytes_to_send = BACKLOG_SIZE;
364                 }
365
366                 // See if we need to split across the circular buffer.
367                 ssize_t ret;
368                 if ((client->bytes_sent % BACKLOG_SIZE) + bytes_to_send > BACKLOG_SIZE) {
369                         size_t bytes_first_part = BACKLOG_SIZE - (client->bytes_sent % BACKLOG_SIZE);
370
371                         iovec iov[2];
372                         iov[0].iov_base = const_cast<char *>(stream.data + (client->bytes_sent % BACKLOG_SIZE));
373                         iov[0].iov_len = bytes_first_part;
374
375                         iov[1].iov_base = const_cast<char *>(stream.data);
376                         iov[1].iov_len = bytes_to_send - bytes_first_part;
377
378                         ret = writev(client->sock, iov, 2);
379                 } else {
380                         ret = write(client->sock,
381                                     stream.data + (client->bytes_sent % BACKLOG_SIZE),
382                                     bytes_to_send);
383                 }
384                 if (ret == -1) {
385                         perror("write/writev");
386                         close_client(client);
387                         return;
388                 }
389                 client->bytes_sent += ret;
390
391                 if (client->bytes_sent == stream.data_size) {
392                         // We don't have any more data for this client, so put it to sleep.
393                         put_client_to_sleep(client);
394                 }
395                 break;
396         }
397         default:
398                 assert(false);
399         }
400 }
401
402 void Server::parse_request(Client *client)
403 {
404         // TODO: Actually parse the request. :-)
405         client->stream_id = "stream";
406         client->request.clear();
407 }
408
409 void Server::construct_header(Client *client)
410 {
411         client->header = "HTTP/1.0 200 OK\r\nContent-type: video/x-flv\r\nCache-Control: no-cache\r\n\r\n" +
412                 find_stream(client->stream_id)->header;
413
414         // Switch states.
415         client->state = Client::SENDING_HEADER;
416
417         epoll_event ev;
418         ev.events = EPOLLOUT | EPOLLRDHUP;
419         ev.data.u64 = 0;  // Keep Valgrind happy.
420         ev.data.fd = client->sock;
421
422         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
423                 perror("epoll_ctl(EPOLL_CTL_MOD)");
424                 exit(1);
425         }
426 }
427         
428 void Server::close_client(Client *client)
429 {
430         if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, NULL) == -1) {
431                 perror("epoll_ctl(EPOLL_CTL_DEL)");
432                 exit(1);
433         }
434
435         // This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
436         vector<int>::iterator new_end =
437                 remove(sleeping_clients.begin(), sleeping_clients.end(), client->sock);
438         sleeping_clients.erase(new_end, sleeping_clients.end());
439         
440         // Bye-bye!
441         close(client->sock);
442         clients.erase(client->sock);
443 }
444         
445 void Server::put_client_to_sleep(Client *client)
446 {
447         epoll_event ev;
448         ev.events = EPOLLRDHUP;
449         ev.data.u64 = 0;  // Keep Valgrind happy.
450         ev.data.fd = client->sock;
451
452         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
453                 perror("epoll_ctl(EPOLL_CTL_MOD)");
454                 exit(1);
455         }
456
457         sleeping_clients.push_back(client->sock);
458 }
459
460 void Server::wake_up_all_clients()
461 {
462         for (unsigned i = 0; i < sleeping_clients.size(); ++i) {
463                 epoll_event ev;
464                 ev.events = EPOLLOUT | EPOLLRDHUP;
465                 ev.data.u64 = 0;  // Keep Valgrind happy.
466                 ev.data.fd = sleeping_clients[i];
467                 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, sleeping_clients[i], &ev) == -1) {
468                         perror("epoll_ctl(EPOLL_CTL_MOD)");
469                         exit(1);
470                 }
471         }
472         sleeping_clients.clear();
473 }
474         
475 Stream *Server::find_stream(const string &stream_id)
476 {
477         map<string, Stream *>::iterator it = streams.find(stream_id);
478         assert(it != streams.end());
479         return it->second;
480 }