]> git.sesse.net Git - cubemap/blob - server.cpp
Start working on serialization.
[cubemap] / server.cpp
1 #include <stdio.h>
2 #include <string.h>
3 #include <stdint.h>
4 #include <assert.h>
5 #include <arpa/inet.h>
6 #include <curl/curl.h>
7 #include <sys/socket.h>
8 #include <pthread.h>
9 #include <sys/types.h>
10 #include <sys/ioctl.h>
11 #include <sys/epoll.h>
12 #include <errno.h>
13 #include <vector>
14 #include <string>
15 #include <map>
16 #include <algorithm>
17
18 #include "metacube.h"
19 #include "server.h"
20 #include "mutexlock.h"
21 #include "state.pb.h"
22
23 using namespace std;
24
25 Client::Client(int sock)
26         : state(Client::READING_REQUEST),
27           header_bytes_sent(0),
28           bytes_sent(0)
29 {
30         request.reserve(1024);
31 }
32         
33 Client::Client(const ClientProto &serialized)
34         : sock(serialized.sock()),
35           state(State(serialized.state())),
36           request(serialized.request()),
37           stream_id(serialized.stream_id()),
38           header(serialized.header()),
39           header_bytes_sent(serialized.header_bytes_sent()),
40           bytes_sent(serialized.bytes_sent())
41 {
42 }
43
44 ClientProto Client::serialize() const
45 {
46         ClientProto serialized;
47         serialized.set_sock(sock);
48         serialized.set_state(state);
49         serialized.set_request(request);
50         serialized.set_stream_id(stream_id);
51         serialized.set_header(header);
52         serialized.set_header_bytes_sent(serialized.header_bytes_sent());
53         serialized.set_bytes_sent(bytes_sent);
54         return serialized;
55 }
56
57 Server::Server()
58 {
59         pthread_mutex_init(&mutex, NULL);
60
61         epoll_fd = epoll_create(1024);  // Size argument is ignored.
62         if (epoll_fd == -1) {
63                 perror("epoll_fd");
64                 exit(1);
65         }
66 }
67
68 void Server::run()
69 {
70         should_stop = false;
71         
72         // Joinable is already the default, but it's good to be certain.
73         pthread_attr_t attr;
74         pthread_attr_init(&attr);
75         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
76         pthread_create(&worker_thread, &attr, Server::do_work_thunk, this);
77 }
78
79 void Server::stop()
80 {
81         {
82                 MutexLock lock(&mutex);
83                 should_stop = true;
84         }
85
86         if (pthread_join(worker_thread, NULL) == -1) {
87                 perror("pthread_join");
88                 exit(1);
89         }
90 }
91
92 void *Server::do_work_thunk(void *arg)
93 {
94         Server *server = static_cast<Server *>(arg);
95         server->do_work();
96         return NULL;
97 }
98
99 void Server::do_work()
100 {
101         for ( ;; ) {
102                 int nfds = epoll_wait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS);
103                 if (nfds == -1) {
104                         perror("epoll_wait");
105                         exit(1);
106                 }
107
108                 MutexLock lock(&mutex);  // We release the mutex between iterations.
109         
110                 if (should_stop) {
111                         return;
112                 }
113         
114                 for (int i = 0; i < nfds; ++i) {
115                         int fd = events[i].data.fd;
116                         assert(clients.count(fd) != 0);
117                         Client *client = &clients[fd];
118
119                         if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
120                                 close_client(client);
121                                 continue;
122                         }
123
124                         process_client(client);
125                 }
126         }
127 }
128         
129 void Server::add_client(int sock)
130 {
131         MutexLock lock(&mutex);
132         clients.insert(make_pair(sock, Client(sock)));
133
134         // Start listening on data from this socket.
135         epoll_event ev;
136         ev.events = EPOLLIN | EPOLLRDHUP;
137         ev.data.fd = sock;
138         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
139                 perror("epoll_ctl(EPOLL_CTL_ADD)");
140                 exit(1);
141         }
142 }
143         
144 void Server::add_stream(const string &stream_id)
145 {
146         MutexLock lock(&mutex);
147         streams.insert(make_pair(stream_id, Stream()));
148 }
149         
150 void Server::set_header(const string &stream_id, const string &header)
151 {
152         MutexLock lock(&mutex);
153         assert(streams.count(stream_id) != 0);
154         streams[stream_id].header = header;
155 }
156         
157 void Server::add_data(const string &stream_id, const char *data, size_t bytes)
158 {
159         if (bytes == 0) {
160                 return;
161         }
162
163         MutexLock lock(&mutex);
164         assert(streams.count(stream_id) != 0);
165         Stream *stream = &streams[stream_id];
166         size_t pos = stream->data_size % BACKLOG_SIZE;
167         stream->data_size += bytes;
168
169         if (pos + bytes > BACKLOG_SIZE) {
170                 size_t to_copy = BACKLOG_SIZE - pos;
171                 memcpy(stream->data + pos, data, to_copy);
172                 data += to_copy;
173                 bytes -= to_copy;
174                 pos = 0;
175         }
176
177         memcpy(stream->data + pos, data, bytes);
178         wake_up_all_clients();
179 }
180         
181 void Server::process_client(Client *client)
182 {
183         switch (client->state) {
184         case Client::READING_REQUEST: {
185                 // Try to read more of the request.
186                 char buf[1024];
187                 int ret = read(client->sock, buf, sizeof(buf));
188                 if (ret == -1) {
189                         perror("read");
190                         close_client(client);
191                         return;
192                 }
193                 if (ret == 0) {
194                         // No data? This really means that we were triggered for something else than
195                         // POLLIN (which suggests a logic error in epoll).
196                         fprintf(stderr, "WARNING: fd %d returned unexpectedly 0 bytes!\n", client->sock);
197                         close_client(client);
198                         return;
199                 }
200
201                 // Guard against overlong requests gobbling up all of our space.
202                 if (client->request.size() + ret > MAX_CLIENT_REQUEST) {
203                         fprintf(stderr, "WARNING: fd %d sent overlong request!\n", client->sock);
204                         close_client(client);
205                         return;
206                 }       
207
208                 // See if we have \r\n\r\n anywhere in the request. We start three bytes
209                 // before what we just appended, in case we just got the final character.
210                 size_t existing_req_bytes = client->request.size();
211                 client->request.append(string(buf, buf + ret));
212         
213                 size_t start_at = (existing_req_bytes >= 3 ? existing_req_bytes - 3 : 0);
214                 const char *ptr = reinterpret_cast<char *>(
215                         memmem(client->request.data() + start_at, client->request.size() - start_at,
216                                "\r\n\r\n", 4));
217                 if (ptr == NULL) {
218                         // OK, we don't have the entire header yet. Fine; we'll get it later.
219                         return;
220                 }
221
222                 if (ptr != client->request.data() + client->request.size() - 4) {
223                         fprintf(stderr, "WARNING: fd %d had junk data after request!\n", client->sock);
224                         close_client(client);
225                         return;
226                 }
227
228                 parse_request(client);
229                 break;
230         }
231         case Client::SENDING_HEADER: {
232                 int ret = write(client->sock,
233                                 client->header.data() + client->header_bytes_sent,
234                                 client->header.size() - client->header_bytes_sent);
235                 if (ret == -1) {
236                         perror("write");
237                         close_client(client);
238                         return;
239                 }
240                 
241                 client->header_bytes_sent += ret;
242                 assert(client->header_bytes_sent <= client->header.size());
243
244                 if (client->header_bytes_sent < client->header.size()) {
245                         // We haven't sent all yet. Fine; we'll do that later.
246                         return;
247                 }
248
249                 // We're done sending the header! Clear the entire header to release some memory.
250                 client->header.clear();
251
252                 // Start sending from the end. In other words, we won't send any of the backlog,
253                 // but we'll start sending immediately as we get data.
254                 client->state = Client::SENDING_DATA;
255                 client->bytes_sent = streams[client->stream_id].data_size;
256                 break;
257         }
258         case Client::SENDING_DATA: {
259                 // See if there's some data we've lost. Ideally, we should drop to a block boundary,
260                 // but resync will be the mux's problem.
261                 const Stream &stream = streams[client->stream_id];
262                 size_t bytes_to_send = stream.data_size - client->bytes_sent;
263                 if (bytes_to_send > BACKLOG_SIZE) {
264                         fprintf(stderr, "WARNING: fd %d lost %lld bytes, maybe too slow connection\n",
265                                 client->sock,
266                                 (long long int)(bytes_to_send - BACKLOG_SIZE));
267                         client->bytes_sent = streams[client->stream_id].data_size - BACKLOG_SIZE;
268                         bytes_to_send = BACKLOG_SIZE;
269                 }
270
271                 // See if we need to split across the circular buffer.
272                 ssize_t ret;
273                 if ((client->bytes_sent % BACKLOG_SIZE) + bytes_to_send > BACKLOG_SIZE) {
274                         size_t bytes_first_part = BACKLOG_SIZE - (client->bytes_sent % BACKLOG_SIZE);
275
276                         iovec iov[2];
277                         iov[0].iov_base = const_cast<char *>(stream.data + (client->bytes_sent % BACKLOG_SIZE));
278                         iov[0].iov_len = bytes_first_part;
279
280                         iov[1].iov_base = const_cast<char *>(stream.data);
281                         iov[1].iov_len = bytes_to_send - bytes_first_part;
282
283                         ret = writev(client->sock, iov, 2);
284                 } else {
285                         ret = write(client->sock,
286                                     stream.data + (client->bytes_sent % BACKLOG_SIZE),
287                                     bytes_to_send);
288                 }
289                 if (ret == -1) {
290                         perror("write/writev");
291                         close_client(client);
292                         return;
293                 }
294                 client->bytes_sent += ret;
295
296                 if (client->bytes_sent == stream.data_size) {
297                         // We don't have any more data for this client, so put it to sleep.
298                         put_client_to_sleep(client);
299                 }
300                 break;
301         }
302         default:
303                 assert(false);
304         }
305 }
306
307 void Server::parse_request(Client *client)
308 {
309         // TODO: Actually parse the request. :-)
310         client->stream_id = "stream";
311         client->request.clear();
312
313         // Construct the header.
314         client->header = "HTTP/1.0 200 OK\r\n  Content-type: video/x-flv\r\nCache-Control: no-cache\r\nContent-type: todo/fixme\r\n\r\n" +
315                 streams[client->stream_id].header;
316
317         // Switch states.
318         client->state = Client::SENDING_HEADER;
319
320         epoll_event ev;
321         ev.events = EPOLLOUT | EPOLLRDHUP;
322         ev.data.fd = client->sock;
323
324         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
325                 perror("epoll_ctl(EPOLL_CTL_MOD)");
326                 exit(1);
327         }
328 }
329         
330 void Server::close_client(Client *client)
331 {
332         if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, NULL) == -1) {
333                 perror("epoll_ctl(EPOLL_CTL_DEL)");
334                 exit(1);
335         }
336
337         // This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
338         vector<int>::iterator new_end =
339                 remove(sleeping_clients.begin(), sleeping_clients.end(), client->sock);
340         sleeping_clients.erase(new_end, sleeping_clients.end());
341         
342         // Bye-bye!
343         close(client->sock);
344         clients.erase(client->sock);
345 }
346         
347 void Server::put_client_to_sleep(Client *client)
348 {
349         epoll_event ev;
350         ev.events = EPOLLRDHUP;
351         ev.data.fd = client->sock;
352
353         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
354                 perror("epoll_ctl(EPOLL_CTL_MOD)");
355                 exit(1);
356         }
357
358         sleeping_clients.push_back(client->sock);
359 }
360
361 void Server::wake_up_all_clients()
362 {
363         for (unsigned i = 0; i < sleeping_clients.size(); ++i) {
364                 epoll_event ev;
365                 ev.events = EPOLLOUT | EPOLLRDHUP;
366                 ev.data.fd = sleeping_clients[i];
367                 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, sleeping_clients[i], &ev) == -1) {
368                         perror("epoll_ctl(EPOLL_CTL_MOD)");
369                         exit(1);
370                 }
371         }
372         sleeping_clients.clear();
373 }