X-Git-Url: https://git.sesse.net/?p=cubemap;a=blobdiff_plain;f=server.cpp;h=f06685ab02224ea654b85d82f09d35a5dc456a5c;hp=94350faae3ff182d93e232ea6d34bffc50f54080;hb=c2c9f6441f9ae8091a39aea0340417d5915e1ac9;hpb=f3ed48479f9209e708cd17698dc9e778e07284cd diff --git a/server.cpp b/server.cpp index 94350fa..f06685a 100644 --- a/server.cpp +++ b/server.cpp @@ -3,7 +3,6 @@ #include #include #include -#include #include #include #include @@ -91,9 +90,25 @@ StreamProto Stream::serialize() const return serialized; } +void Stream::put_client_to_sleep(Client *client) +{ + sleeping_clients.push_back(client); +} + +void Stream::wake_up_all_clients() +{ + if (to_process.empty()) { + swap(sleeping_clients, to_process); + } else { + to_process.insert(to_process.end(), sleeping_clients.begin(), sleeping_clients.end()); + sleeping_clients.clear(); + } +} + Server::Server() { pthread_mutex_init(&mutex, NULL); + pthread_mutex_init(&queued_data_mutex, NULL); epoll_fd = epoll_create(1024); // Size argument is ignored. if (epoll_fd == -1) { @@ -162,7 +177,9 @@ void Server::do_work() if (should_stop) { return; } - + + process_queued_data(); + for (int i = 0; i < nfds; ++i) { int fd = events[i].data.fd; assert(clients.count(fd) != 0); @@ -175,11 +192,24 @@ void Server::do_work() process_client(client); } + + for (map::iterator stream_it = streams.begin(); + stream_it != streams.end(); + ++stream_it) { + Stream *stream = stream_it->second; + for (size_t i = 0; i < stream->to_process.size(); ++i) { + process_client(stream->to_process[i]); + } + stream->to_process.clear(); + } } } -CubemapStateProto Server::serialize() const +CubemapStateProto Server::serialize() { + // We don't serialize anything queued, so empty the queues. + process_queued_data(); + CubemapStateProto serialized; for (map::const_iterator client_it = clients.begin(); client_it != clients.end(); @@ -194,20 +224,27 @@ CubemapStateProto Server::serialize() const return serialized; } +void Server::add_client_deferred(int sock) +{ + MutexLock lock(&queued_data_mutex); + queued_add_clients.push_back(sock); +} + void Server::add_client(int sock) { - MutexLock lock(&mutex); clients.insert(make_pair(sock, Client(sock))); // Start listening on data from this socket. epoll_event ev; - ev.events = EPOLLIN | EPOLLRDHUP; + ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP; ev.data.u64 = 0; // Keep Valgrind happy. ev.data.fd = sock; if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) { perror("epoll_ctl(EPOLL_CTL_ADD)"); exit(1); } + + process_client(&clients[sock]); } void Server::add_client_from_serialized(const ClientProto &client) @@ -215,6 +252,7 @@ void Server::add_client_from_serialized(const ClientProto &client) MutexLock lock(&mutex); Stream *stream = find_stream(client.stream_id()); clients.insert(make_pair(client.sock(), Client(client, stream))); + Client *client_ptr = &clients[client.sock()]; // Start listening on data from this socket. epoll_event ev; @@ -232,7 +270,12 @@ void Server::add_client_from_serialized(const ClientProto &client) exit(1); } - process_client(&clients[client.sock()]); + if (client_ptr->state == Client::SENDING_DATA && + client_ptr->bytes_sent == client_ptr->stream->data_size) { + client_ptr->stream->put_client_to_sleep(client_ptr); + } else { + process_client(client_ptr); + } } void Server::add_stream(const string &stream_id) @@ -264,14 +307,15 @@ void Server::set_header(const string &stream_id, const string &header) } } } - -void Server::add_data(const string &stream_id, const char *data, size_t bytes) + +void Server::add_data_deferred(const string &stream_id, const char *data, size_t bytes) { - if (bytes == 0) { - return; - } + MutexLock lock(&queued_data_mutex); + queued_data[stream_id].append(string(data, data + bytes)); +} - MutexLock lock(&mutex); +void Server::add_data(const string &stream_id, const char *data, size_t bytes) +{ Stream *stream = find_stream(stream_id); size_t pos = stream->data_size % BACKLOG_SIZE; stream->data_size += bytes; @@ -285,7 +329,7 @@ void Server::add_data(const string &stream_id, const char *data, size_t bytes) } memcpy(stream->data + pos, data, bytes); - wake_up_all_clients(); + stream->wake_up_all_clients(); } // See the .h file for postconditions after this function. @@ -317,34 +361,27 @@ read_request_again: return; } - // Guard against overlong requests gobbling up all of our space. - if (client->request.size() + ret > MAX_CLIENT_REQUEST) { + RequestParseStatus status = wait_for_double_newline(&client->request, buf, ret); + + switch (status) { + case RP_OUT_OF_SPACE: fprintf(stderr, "WARNING: fd %d sent overlong request!\n", client->sock); close_client(client); return; - } - - // See if we have \r\n\r\n anywhere in the request. We start three bytes - // before what we just appended, in case we just got the final character. - size_t existing_req_bytes = client->request.size(); - client->request.append(string(buf, buf + ret)); - - size_t start_at = (existing_req_bytes >= 3 ? existing_req_bytes - 3 : 0); - const char *ptr = reinterpret_cast( - memmem(client->request.data() + start_at, client->request.size() - start_at, - "\r\n\r\n", 4)); - if (ptr == NULL) { + case RP_NOT_FINISHED_YET: // OK, we don't have the entire header yet. Fine; we'll get it later. // See if there's more data for us. goto read_request_again; - } - - if (ptr != client->request.data() + client->request.size() - 4) { + case RP_EXTRA_DATA: fprintf(stderr, "WARNING: fd %d had junk data after request!\n", client->sock); close_client(client); return; + case RP_FINISHED: + break; } + assert(status == RP_FINISHED); + int error_code = parse_request(client); if (error_code == 200) { construct_header(client); @@ -404,13 +441,13 @@ sending_header_or_error_again: // This is postcondition #3. client->state = Client::SENDING_DATA; client->bytes_sent = client->stream->data_size; - put_client_to_sleep(client); + client->stream->put_client_to_sleep(client); return; } case Client::SENDING_DATA: { // See if there's some data we've lost. Ideally, we should drop to a block boundary, // but resync will be the mux's problem. - const Stream *stream = client->stream; + Stream *stream = client->stream; size_t bytes_to_send = stream->data_size - client->bytes_sent; if (bytes_to_send == 0) { return; @@ -462,7 +499,7 @@ sending_header_or_error_again: if (client->bytes_sent == stream->data_size) { // We don't have any more data for this client, so put it to sleep. // This is postcondition #3. - put_client_to_sleep(client); + stream->put_client_to_sleep(client); } else { // XXX: Do we need to go another round here to explicitly // get the EAGAIN? @@ -547,9 +584,14 @@ void Server::close_client(Client *client) } // This client could be sleeping, so we'll need to fix that. (Argh, O(n).) - vector::iterator new_end = - remove(sleeping_clients.begin(), sleeping_clients.end(), client); - sleeping_clients.erase(new_end, sleeping_clients.end()); + if (client->stream != NULL) { + vector::iterator new_end = + remove(client->stream->sleeping_clients.begin(), + client->stream->sleeping_clients.end(), + client); + client->stream->sleeping_clients.erase( + new_end, client->stream->sleeping_clients.end()); + } // Bye-bye! int ret; @@ -564,23 +606,26 @@ void Server::close_client(Client *client) clients.erase(client->sock); } -void Server::put_client_to_sleep(Client *client) +Stream *Server::find_stream(const string &stream_id) { - sleeping_clients.push_back(client); + map::iterator it = streams.find(stream_id); + assert(it != streams.end()); + return it->second; } -void Server::wake_up_all_clients() +void Server::process_queued_data() { - vector to_process; - swap(sleeping_clients, to_process); - for (unsigned i = 0; i < to_process.size(); ++i) { - process_client(to_process[i]); + MutexLock lock(&queued_data_mutex); + + for (size_t i = 0; i < queued_add_clients.size(); ++i) { + add_client(queued_add_clients[i]); } -} + queued_add_clients.clear(); -Stream *Server::find_stream(const string &stream_id) -{ - map::iterator it = streams.find(stream_id); - assert(it != streams.end()); - return it->second; + for (map::iterator queued_it = queued_data.begin(); + queued_it != queued_data.end(); + ++queued_it) { + add_data(queued_it->first, queued_it->second.data(), queued_it->second.size()); + } + queued_data.clear(); }