]> git.sesse.net Git - cubemap/blob - server.cpp
Implement sleeping/waking clients.
[cubemap] / server.cpp
1 #include <stdio.h>
2 #include <string.h>
3 #include <stdint.h>
4 #include <assert.h>
5 #include <arpa/inet.h>
6 #include <curl/curl.h>
7 #include <sys/socket.h>
8 #include <pthread.h>
9 #include <sys/types.h>
10 #include <sys/ioctl.h>
11 #include <sys/epoll.h>
12 #include <errno.h>
13 #include <vector>
14 #include <string>
15 #include <map>
16
17 #include "metacube.h"
18 #include "server.h"
19 #include "mutexlock.h"
20
21 using namespace std;
22
23 Server::Server()
24 {
25         pthread_mutex_init(&mutex, NULL);
26
27         epoll_fd = epoll_create(1024);  // Size argument is ignored.
28         if (epoll_fd == -1) {
29                 perror("epoll_fd");
30                 exit(1);
31         }
32 }
33
34 void Server::run()
35 {
36         pthread_t thread;
37         pthread_create(&thread, NULL, Server::do_work_thunk, this);
38 }
39
40 void *Server::do_work_thunk(void *arg)
41 {
42         Server *server = static_cast<Server *>(arg);
43         server->do_work();
44         return NULL;
45 }
46
47 void Server::do_work()
48 {
49         for ( ;; ) {
50                 int nfds = epoll_wait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS);
51
52                 MutexLock lock(&mutex);  // We release the mutex between iterations.
53                 if (nfds == -1) {
54                         perror("epoll_wait");
55                         exit(1);
56                 }
57                 
58                 for (int i = 0; i < nfds; ++i) {
59                         int fd = events[i].data.fd;
60                         assert(clients.count(fd) != 0);
61                         Client *client = &clients[fd];
62
63                         if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
64                                 close_client(client);
65                                 continue;
66                         }
67
68                         process_client(client);
69                 }
70         }
71 }
72         
73 void Server::add_client(int sock)
74 {
75         MutexLock lock(&mutex);
76         Client new_client;
77         new_client.sock = sock;
78         new_client.client_request.reserve(1024);
79         new_client.state = Client::READING_REQUEST;
80         new_client.header_bytes_sent = 0;
81         new_client.bytes_sent = 0;
82
83         clients.insert(make_pair(sock, new_client));
84
85         // Start listening on data from this socket.
86         epoll_event ev;
87         ev.events = EPOLLIN | EPOLLRDHUP;
88         ev.data.fd = sock;
89         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
90                 perror("epoll_ctl(EPOLL_CTL_ADD)");
91                 exit(1);
92         }
93 }
94         
95 void Server::add_stream(const string &stream_id)
96 {
97         MutexLock lock(&mutex);
98         streams.insert(make_pair(stream_id, Stream()));
99 }
100         
101 void Server::set_header(const string &stream_id, const string &header)
102 {
103         MutexLock lock(&mutex);
104         assert(streams.count(stream_id) != 0);
105         streams[stream_id].header = header;
106 }
107         
108 void Server::add_data(const string &stream_id, const char *data, size_t bytes)
109 {
110         if (bytes == 0) {
111                 return;
112         }
113
114         MutexLock lock(&mutex);
115         assert(streams.count(stream_id) != 0);
116         Stream *stream = &streams[stream_id];
117         size_t pos = stream->data_size % BACKLOG_SIZE;
118         stream->data_size += bytes;
119
120         if (pos + bytes > BACKLOG_SIZE) {
121                 size_t to_copy = BACKLOG_SIZE - pos;
122                 memcpy(stream->data + pos, data, to_copy);
123                 data += to_copy;
124                 bytes -= to_copy;
125                 pos = 0;
126         }
127
128         memcpy(stream->data + pos, data, bytes);
129         wake_up_all_clients();
130 }
131         
132 void Server::process_client(Client *client)
133 {
134         switch (client->state) {
135         case Client::READING_REQUEST: {
136                 // Try to read more of the request.
137                 char buf[1024];
138                 int ret = read(client->sock, buf, sizeof(buf));
139                 if (ret == -1) {
140                         perror("read");
141                         close_client(client);
142                         return;
143                 }
144                 if (ret == 0) {
145                         // No data? This really means that we were triggered for something else than
146                         // POLLIN (which suggests a logic error in epoll).
147                         fprintf(stderr, "WARNING: fd %d returned unexpectedly 0 bytes!\n", client->sock);
148                         close_client(client);
149                         return;
150                 }
151
152                 // Guard against overlong requests gobbling up all of our space.
153                 if (client->client_request.size() + ret > MAX_CLIENT_REQUEST) {
154                         fprintf(stderr, "WARNING: fd %d sent overlong request!\n", client->sock);
155                         close_client(client);
156                         return;
157                 }       
158
159                 // See if we have \r\n\r\n anywhere in the request. We start three bytes
160                 // before what we just appended, in case we just got the final character.
161                 size_t existing_req_bytes = client->client_request.size();
162                 client->client_request.append(string(buf, buf + ret));
163         
164                 size_t start_at = (existing_req_bytes >= 3 ? existing_req_bytes - 3 : 0);
165                 const char *ptr = reinterpret_cast<char *>(
166                         memmem(client->client_request.data() + start_at, client->client_request.size() - start_at,
167                                "\r\n\r\n", 4));
168                 if (ptr == NULL) {
169                         // OK, we don't have the entire header yet. Fine; we'll get it later.
170                         return;
171                 }
172
173                 if (ptr != client->client_request.data() + client->client_request.size() - 4) {
174                         fprintf(stderr, "WARNING: fd %d had junk data after request!\n", client->sock);
175                         close_client(client);
176                         return;
177                 }
178
179                 parse_request(client);
180                 break;
181         }
182         case Client::SENDING_HEADER: {
183                 int ret = write(client->sock,
184                                 client->header.data() + client->header_bytes_sent,
185                                 client->header.size() - client->header_bytes_sent);
186                 if (ret == -1) {
187                         perror("write");
188                         close_client(client);
189                         return;
190                 }
191                 
192                 client->header_bytes_sent += ret;
193                 assert(client->header_bytes_sent <= client->header.size());
194
195                 if (client->header_bytes_sent < client->header.size()) {
196                         // We haven't sent all yet. Fine; we'll do that later.
197                         return;
198                 }
199
200                 // We're done sending the header! Clear the entire header to release some memory.
201                 client->header.clear();
202
203                 // Start sending from the end. In other words, we won't send any of the backlog,
204                 // but we'll start sending immediately as we get data.
205                 client->state = Client::SENDING_DATA;
206                 client->bytes_sent = streams[client->stream_id].data_size;
207                 break;
208         }
209         case Client::SENDING_DATA: {
210                 // See if there's some data we've lost. Ideally, we should drop to a block boundary,
211                 // but resync will be the mux's problem.
212                 const Stream &stream = streams[client->stream_id];
213                 size_t bytes_to_send = stream.data_size - client->bytes_sent;
214                 if (bytes_to_send > BACKLOG_SIZE) {
215                         fprintf(stderr, "WARNING: fd %d lost %lld bytes, maybe too slow connection\n",
216                                 client->sock,
217                                 (long long int)(bytes_to_send - BACKLOG_SIZE));
218                         client->bytes_sent = streams[client->stream_id].data_size - BACKLOG_SIZE;
219                         bytes_to_send = BACKLOG_SIZE;
220                 }
221
222                 // See if we need to split across the circular buffer.
223                 int ret;
224                 if ((client->bytes_sent % BACKLOG_SIZE) + bytes_to_send > BACKLOG_SIZE) {
225                         // TODO: writev
226                         assert(false);
227                 } else {
228                         ret = write(client->sock,
229                                     stream.data + (client->bytes_sent % BACKLOG_SIZE),
230                                     bytes_to_send);
231                 }
232                 if (ret == -1) {
233                         perror("write/writev");
234                         close_client(client);
235                         return;
236                 }
237                 client->bytes_sent += ret;
238
239                 if (client->bytes_sent == stream.data_size) {
240                         // We don't have any more data for this client, so put it to sleep.
241                         put_client_to_sleep(client);
242                 }
243                 break;
244         }
245         default:
246                 // TODO
247                 assert(false);
248         }
249 }
250
251 void Server::parse_request(Client *client)
252 {
253         // TODO: Actually parse the request. :-)
254         client->stream_id = "stream";
255
256         // Construct the header.
257         client->header = "HTTP/1.0 200 OK\r\nContent-type: todo/fixme\r\n\r\n" +
258                 streams[client->stream_id].header;
259
260         // Switch states.
261         client->state = Client::SENDING_HEADER;
262
263         epoll_event ev;
264         ev.events = EPOLLOUT | EPOLLRDHUP;
265         ev.data.fd = client->sock;
266
267         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
268                 perror("epoll_ctl(EPOLL_CTL_MOD)");
269                 exit(1);
270         }
271 }
272         
273 void Server::close_client(Client *client)
274 {
275         if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, NULL) == -1) {
276                 perror("epoll_ctl(EPOLL_CTL_DEL)");
277                 exit(1);
278         }
279         
280         // Bye-bye!
281         close(client->sock);
282         clients.erase(client->sock);
283 }
284         
285 void Server::put_client_to_sleep(Client *client)
286 {
287         epoll_event ev;
288         ev.events = EPOLLRDHUP;
289         ev.data.fd = client->sock;
290
291         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
292                 perror("epoll_ctl(EPOLL_CTL_MOD)");
293                 exit(1);
294         }
295
296         sleeping_clients.push_back(client->sock);
297 }
298
299 void Server::wake_up_all_clients()
300 {
301         for (unsigned i = 0; i < sleeping_clients.size(); ++i) {
302                 epoll_event ev;
303                 ev.events = EPOLLOUT | EPOLLRDHUP;
304                 ev.data.fd = sleeping_clients[i];
305                 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, sleeping_clients[i], &ev) == -1) {
306                         perror("epoll_ctl(EPOLL_CTL_MOD)");
307                         exit(1);
308                 }
309         }
310         sleeping_clients.clear();
311 }