#include <stdint.h>
#include <assert.h>
#include <arpa/inet.h>
-#include <curl/curl.h>
#include <sys/socket.h>
#include <pthread.h>
#include <sys/types.h>
return serialized;
}
+void Stream::put_client_to_sleep(Client *client)
+{
+ sleeping_clients.push_back(client);
+}
+
+void Stream::wake_up_all_clients()
+{
+ if (to_process.empty()) {
+ swap(sleeping_clients, to_process);
+ } else {
+ to_process.insert(to_process.end(), sleeping_clients.begin(), sleeping_clients.end());
+ sleeping_clients.clear();
+ }
+}
+
Server::Server()
{
pthread_mutex_init(&mutex, NULL);
+ pthread_mutex_init(&queued_data_mutex, NULL);
epoll_fd = epoll_create(1024); // Size argument is ignored.
if (epoll_fd == -1) {
if (should_stop) {
return;
}
-
+
+ process_queued_data();
+
for (int i = 0; i < nfds; ++i) {
int fd = events[i].data.fd;
assert(clients.count(fd) != 0);
process_client(client);
}
+
+ for (map<string, Stream *>::iterator stream_it = streams.begin();
+ stream_it != streams.end();
+ ++stream_it) {
+ vector<Client *> to_process;
+ swap(stream_it->second->to_process, to_process);
+ for (size_t i = 0; i < to_process.size(); ++i) {
+ process_client(to_process[i]);
+ }
+ }
}
}
-CubemapStateProto Server::serialize() const
+CubemapStateProto Server::serialize()
{
+ // We don't serialize anything queued, so empty the queues.
+ process_queued_data();
+
CubemapStateProto serialized;
for (map<int, Client>::const_iterator client_it = clients.begin();
client_it != clients.end();
return serialized;
}
+void Server::add_client_deferred(int sock)
+{
+ MutexLock lock(&queued_data_mutex);
+ queued_add_clients.push_back(sock);
+}
+
void Server::add_client(int sock)
{
- MutexLock lock(&mutex);
clients.insert(make_pair(sock, Client(sock)));
// Start listening on data from this socket.
epoll_event ev;
- ev.events = EPOLLIN | EPOLLRDHUP;
+ ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
ev.data.u64 = 0; // Keep Valgrind happy.
ev.data.fd = sock;
if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
perror("epoll_ctl(EPOLL_CTL_ADD)");
exit(1);
}
+
+ process_client(&clients[sock]);
}
void Server::add_client_from_serialized(const ClientProto &client)
MutexLock lock(&mutex);
Stream *stream = find_stream(client.stream_id());
clients.insert(make_pair(client.sock(), Client(client, stream)));
+ Client *client_ptr = &clients[client.sock()];
// Start listening on data from this socket.
epoll_event ev;
exit(1);
}
- process_client(&clients[client.sock()]);
+ if (client_ptr->state == Client::SENDING_DATA &&
+ client_ptr->bytes_sent == client_ptr->stream->data_size) {
+ client_ptr->stream->put_client_to_sleep(client_ptr);
+ } else {
+ process_client(client_ptr);
+ }
}
void Server::add_stream(const string &stream_id)
}
}
}
-
-void Server::add_data(const string &stream_id, const char *data, size_t bytes)
+
+void Server::add_data_deferred(const string &stream_id, const char *data, size_t bytes)
{
- if (bytes == 0) {
- return;
- }
+ MutexLock lock(&queued_data_mutex);
+ queued_data[stream_id].append(string(data, data + bytes));
+}
- MutexLock lock(&mutex);
+void Server::add_data(const string &stream_id, const char *data, size_t bytes)
+{
Stream *stream = find_stream(stream_id);
size_t pos = stream->data_size % BACKLOG_SIZE;
stream->data_size += bytes;
}
memcpy(stream->data + pos, data, bytes);
- wake_up_all_clients();
+ stream->wake_up_all_clients();
}
// See the .h file for postconditions after this function.
return;
}
- // Guard against overlong requests gobbling up all of our space.
- if (client->request.size() + ret > MAX_CLIENT_REQUEST) {
+ RequestParseStatus status = wait_for_double_newline(&client->request, buf, ret);
+
+ switch (status) {
+ case RP_OUT_OF_SPACE:
fprintf(stderr, "WARNING: fd %d sent overlong request!\n", client->sock);
close_client(client);
return;
- }
-
- // See if we have \r\n\r\n anywhere in the request. We start three bytes
- // before what we just appended, in case we just got the final character.
- size_t existing_req_bytes = client->request.size();
- client->request.append(string(buf, buf + ret));
-
- size_t start_at = (existing_req_bytes >= 3 ? existing_req_bytes - 3 : 0);
- const char *ptr = reinterpret_cast<char *>(
- memmem(client->request.data() + start_at, client->request.size() - start_at,
- "\r\n\r\n", 4));
- if (ptr == NULL) {
+ case RP_NOT_FINISHED_YET:
// OK, we don't have the entire header yet. Fine; we'll get it later.
// See if there's more data for us.
goto read_request_again;
- }
-
- if (ptr != client->request.data() + client->request.size() - 4) {
+ case RP_EXTRA_DATA:
fprintf(stderr, "WARNING: fd %d had junk data after request!\n", client->sock);
close_client(client);
return;
+ case RP_FINISHED:
+ break;
}
+ assert(status == RP_FINISHED);
+
int error_code = parse_request(client);
if (error_code == 200) {
construct_header(client);
// This is postcondition #3.
client->state = Client::SENDING_DATA;
client->bytes_sent = client->stream->data_size;
- put_client_to_sleep(client);
+ client->stream->put_client_to_sleep(client);
return;
}
case Client::SENDING_DATA: {
// See if there's some data we've lost. Ideally, we should drop to a block boundary,
// but resync will be the mux's problem.
- const Stream *stream = client->stream;
+ Stream *stream = client->stream;
size_t bytes_to_send = stream->data_size - client->bytes_sent;
if (bytes_to_send == 0) {
return;
if (client->bytes_sent == stream->data_size) {
// We don't have any more data for this client, so put it to sleep.
// This is postcondition #3.
- put_client_to_sleep(client);
+ stream->put_client_to_sleep(client);
} else {
// XXX: Do we need to go another round here to explicitly
// get the EAGAIN?
void Server::construct_header(Client *client)
{
- client->header_or_error = "HTTP/1.0 200 OK\r\nContent-type: video/x-flv\r\nCache-Control: no-cache\r\n\r\n" +
- find_stream(client->stream_id)->header;
+ client->header_or_error = find_stream(client->stream_id)->header;
// Switch states.
client->state = Client::SENDING_HEADER;
exit(1);
}
}
+
+template<class T>
+void delete_from(vector<T> *v, T elem)
+{
+ typename vector<T>::iterator new_end = remove(v->begin(), v->end(), elem);
+ v->erase(new_end, v->end());
+}
void Server::close_client(Client *client)
{
}
// This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
- vector<Client *>::iterator new_end =
- remove(sleeping_clients.begin(), sleeping_clients.end(), client);
- sleeping_clients.erase(new_end, sleeping_clients.end());
+ if (client->stream != NULL) {
+ delete_from(&client->stream->sleeping_clients, client);
+ delete_from(&client->stream->to_process, client);
+ }
// Bye-bye!
int ret;
clients.erase(client->sock);
}
-void Server::put_client_to_sleep(Client *client)
+Stream *Server::find_stream(const string &stream_id)
{
- sleeping_clients.push_back(client);
+ map<string, Stream *>::iterator it = streams.find(stream_id);
+ assert(it != streams.end());
+ return it->second;
}
-void Server::wake_up_all_clients()
+void Server::process_queued_data()
{
- vector<Client *> to_process;
- swap(sleeping_clients, to_process);
- for (unsigned i = 0; i < to_process.size(); ++i) {
- process_client(to_process[i]);
+ MutexLock lock(&queued_data_mutex);
+
+ for (size_t i = 0; i < queued_add_clients.size(); ++i) {
+ add_client(queued_add_clients[i]);
}
-}
+ queued_add_clients.clear();
-Stream *Server::find_stream(const string &stream_id)
-{
- map<string, Stream *>::iterator it = streams.find(stream_id);
- assert(it != streams.end());
- return it->second;
+ for (map<string, string>::iterator queued_it = queued_data.begin();
+ queued_it != queued_data.end();
+ ++queued_it) {
+ add_data(queued_it->first, queued_it->second.data(), queued_it->second.size());
+ }
+ queued_data.clear();
}