7 #include <sys/socket.h>
10 #include <sys/ioctl.h>
11 #include <sys/epoll.h>
20 #include "mutexlock.h"
25 Client::Client(int sock)
27 state(Client::READING_REQUEST),
31 request.reserve(1024);
34 Client::Client(const ClientProto &serialized)
35 : sock(serialized.sock()),
36 state(State(serialized.state())),
37 request(serialized.request()),
38 stream_id(serialized.stream_id()),
39 header(serialized.header()),
40 header_bytes_sent(serialized.header_bytes_sent()),
41 bytes_sent(serialized.bytes_sent())
45 ClientProto Client::serialize() const
47 ClientProto serialized;
48 serialized.set_sock(sock);
49 serialized.set_state(state);
50 serialized.set_request(request);
51 serialized.set_stream_id(stream_id);
52 serialized.set_header(header);
53 serialized.set_header_bytes_sent(serialized.header_bytes_sent());
54 serialized.set_bytes_sent(bytes_sent);
58 Stream::Stream(const string &stream_id)
59 : stream_id(stream_id),
60 data(new char[BACKLOG_SIZE]),
63 memset(data, 0, BACKLOG_SIZE);
71 Stream::Stream(const StreamProto &serialized)
72 : header(serialized.header()),
73 data(new char[BACKLOG_SIZE]),
74 data_size(serialized.data_size())
76 assert(serialized.data().size() == BACKLOG_SIZE);
77 memcpy(data, serialized.data().data(), BACKLOG_SIZE);
80 StreamProto Stream::serialize() const
82 StreamProto serialized;
83 serialized.set_header(header);
84 serialized.set_data(string(data, data + BACKLOG_SIZE));
85 serialized.set_data_size(data_size);
91 pthread_mutex_init(&mutex, NULL);
93 epoll_fd = epoll_create(1024); // Size argument is ignored.
104 // Joinable is already the default, but it's good to be certain.
106 pthread_attr_init(&attr);
107 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
108 pthread_create(&worker_thread, &attr, Server::do_work_thunk, this);
114 MutexLock lock(&mutex);
118 if (pthread_join(worker_thread, NULL) == -1) {
119 perror("pthread_join");
124 void *Server::do_work_thunk(void *arg)
126 Server *server = static_cast<Server *>(arg);
131 void Server::do_work()
134 int nfds = epoll_wait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS);
136 perror("epoll_wait");
140 MutexLock lock(&mutex); // We release the mutex between iterations.
146 for (int i = 0; i < nfds; ++i) {
147 int fd = events[i].data.fd;
148 assert(clients.count(fd) != 0);
149 Client *client = &clients[fd];
151 if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
152 close_client(client);
156 process_client(client);
161 CubemapStateProto Server::serialize() const
163 CubemapStateProto serialized;
164 for (map<int, Client>::const_iterator client_it = clients.begin();
165 client_it != clients.end();
167 serialized.add_clients()->MergeFrom(client_it->second.serialize());
169 for (map<string, Stream *>::const_iterator stream_it = streams.begin();
170 stream_it != streams.end();
172 serialized.add_streams()->MergeFrom(stream_it->second->serialize());
177 void Server::add_client(int sock)
179 MutexLock lock(&mutex);
180 clients.insert(make_pair(sock, Client(sock)));
182 // Start listening on data from this socket.
184 ev.events = EPOLLIN | EPOLLRDHUP;
185 ev.data.u64 = 0; // Keep Valgrind happy.
187 if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
188 perror("epoll_ctl(EPOLL_CTL_ADD)");
193 void Server::add_stream(const string &stream_id)
195 MutexLock lock(&mutex);
196 streams.insert(make_pair(stream_id, new Stream(stream_id)));
199 void Server::set_header(const string &stream_id, const string &header)
201 MutexLock lock(&mutex);
202 find_stream(stream_id)->header = header;
205 void Server::add_data(const string &stream_id, const char *data, size_t bytes)
211 MutexLock lock(&mutex);
212 Stream *stream = find_stream(stream_id);
213 size_t pos = stream->data_size % BACKLOG_SIZE;
214 stream->data_size += bytes;
216 if (pos + bytes > BACKLOG_SIZE) {
217 size_t to_copy = BACKLOG_SIZE - pos;
218 memcpy(stream->data + pos, data, to_copy);
224 memcpy(stream->data + pos, data, bytes);
225 wake_up_all_clients();
228 void Server::process_client(Client *client)
230 switch (client->state) {
231 case Client::READING_REQUEST: {
232 // Try to read more of the request.
234 int ret = read(client->sock, buf, sizeof(buf));
237 close_client(client);
241 // No data? This really means that we were triggered for something else than
242 // POLLIN (which suggests a logic error in epoll).
243 fprintf(stderr, "WARNING: fd %d returned unexpectedly 0 bytes!\n", client->sock);
244 close_client(client);
248 // Guard against overlong requests gobbling up all of our space.
249 if (client->request.size() + ret > MAX_CLIENT_REQUEST) {
250 fprintf(stderr, "WARNING: fd %d sent overlong request!\n", client->sock);
251 close_client(client);
255 // See if we have \r\n\r\n anywhere in the request. We start three bytes
256 // before what we just appended, in case we just got the final character.
257 size_t existing_req_bytes = client->request.size();
258 client->request.append(string(buf, buf + ret));
260 size_t start_at = (existing_req_bytes >= 3 ? existing_req_bytes - 3 : 0);
261 const char *ptr = reinterpret_cast<char *>(
262 memmem(client->request.data() + start_at, client->request.size() - start_at,
265 // OK, we don't have the entire header yet. Fine; we'll get it later.
269 if (ptr != client->request.data() + client->request.size() - 4) {
270 fprintf(stderr, "WARNING: fd %d had junk data after request!\n", client->sock);
271 close_client(client);
275 parse_request(client);
278 case Client::SENDING_HEADER: {
279 int ret = write(client->sock,
280 client->header.data() + client->header_bytes_sent,
281 client->header.size() - client->header_bytes_sent);
284 close_client(client);
288 client->header_bytes_sent += ret;
289 assert(client->header_bytes_sent <= client->header.size());
291 if (client->header_bytes_sent < client->header.size()) {
292 // We haven't sent all yet. Fine; we'll do that later.
296 // We're done sending the header! Clear the entire header to release some memory.
297 client->header.clear();
299 // Start sending from the end. In other words, we won't send any of the backlog,
300 // but we'll start sending immediately as we get data.
301 client->state = Client::SENDING_DATA;
302 client->bytes_sent = find_stream(client->stream_id)->data_size;
305 case Client::SENDING_DATA: {
306 // See if there's some data we've lost. Ideally, we should drop to a block boundary,
307 // but resync will be the mux's problem.
308 const Stream &stream = *find_stream(client->stream_id);
309 size_t bytes_to_send = stream.data_size - client->bytes_sent;
310 if (bytes_to_send > BACKLOG_SIZE) {
311 fprintf(stderr, "WARNING: fd %d lost %lld bytes, maybe too slow connection\n",
313 (long long int)(bytes_to_send - BACKLOG_SIZE));
314 client->bytes_sent = find_stream(client->stream_id)->data_size - BACKLOG_SIZE;
315 bytes_to_send = BACKLOG_SIZE;
318 // See if we need to split across the circular buffer.
320 if ((client->bytes_sent % BACKLOG_SIZE) + bytes_to_send > BACKLOG_SIZE) {
321 size_t bytes_first_part = BACKLOG_SIZE - (client->bytes_sent % BACKLOG_SIZE);
324 iov[0].iov_base = const_cast<char *>(stream.data + (client->bytes_sent % BACKLOG_SIZE));
325 iov[0].iov_len = bytes_first_part;
327 iov[1].iov_base = const_cast<char *>(stream.data);
328 iov[1].iov_len = bytes_to_send - bytes_first_part;
330 ret = writev(client->sock, iov, 2);
332 ret = write(client->sock,
333 stream.data + (client->bytes_sent % BACKLOG_SIZE),
337 perror("write/writev");
338 close_client(client);
341 client->bytes_sent += ret;
343 if (client->bytes_sent == stream.data_size) {
344 // We don't have any more data for this client, so put it to sleep.
345 put_client_to_sleep(client);
354 void Server::parse_request(Client *client)
356 // TODO: Actually parse the request. :-)
357 client->stream_id = "stream";
358 client->request.clear();
360 // Construct the header.
361 client->header = "HTTP/1.0 200 OK\r\n Content-type: video/x-flv\r\nCache-Control: no-cache\r\nContent-type: todo/fixme\r\n\r\n" +
362 find_stream(client->stream_id)->header;
365 client->state = Client::SENDING_HEADER;
368 ev.events = EPOLLOUT | EPOLLRDHUP;
369 ev.data.u64 = 0; // Keep Valgrind happy.
370 ev.data.fd = client->sock;
372 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
373 perror("epoll_ctl(EPOLL_CTL_MOD)");
378 void Server::close_client(Client *client)
380 if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, NULL) == -1) {
381 perror("epoll_ctl(EPOLL_CTL_DEL)");
385 // This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
386 vector<int>::iterator new_end =
387 remove(sleeping_clients.begin(), sleeping_clients.end(), client->sock);
388 sleeping_clients.erase(new_end, sleeping_clients.end());
392 clients.erase(client->sock);
395 void Server::put_client_to_sleep(Client *client)
398 ev.events = EPOLLRDHUP;
399 ev.data.u64 = 0; // Keep Valgrind happy.
400 ev.data.fd = client->sock;
402 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
403 perror("epoll_ctl(EPOLL_CTL_MOD)");
407 sleeping_clients.push_back(client->sock);
410 void Server::wake_up_all_clients()
412 for (unsigned i = 0; i < sleeping_clients.size(); ++i) {
414 ev.events = EPOLLOUT | EPOLLRDHUP;
415 ev.data.u64 = 0; // Keep Valgrind happy.
416 ev.data.fd = sleeping_clients[i];
417 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, sleeping_clients[i], &ev) == -1) {
418 perror("epoll_ctl(EPOLL_CTL_MOD)");
422 sleeping_clients.clear();
425 Stream *Server::find_stream(const string &stream_id)
427 map<string, Stream *>::iterator it = streams.find(stream_id);
428 assert(it != streams.end());