]> git.sesse.net Git - cubemap/blobdiff - server.cpp
Make most operations on Server deferred, so that we a) do not get bugs with epoll...
[cubemap] / server.cpp
index 54e6b5e804d9959632b1ef70df6a53fee7f52fac..60836a74ca52774855f80c2a3ce623545dbc534f 100644 (file)
@@ -91,9 +91,25 @@ StreamProto Stream::serialize() const
        return serialized;
 }
 
+void Stream::put_client_to_sleep(Client *client)
+{
+       sleeping_clients.push_back(client);
+}
+
+void Stream::wake_up_all_clients()
+{
+       if (to_process.empty()) {
+               swap(sleeping_clients, to_process);
+       } else {
+               to_process.insert(to_process.end(), sleeping_clients.begin(), sleeping_clients.end());
+               sleeping_clients.clear();
+       }
+}
+
 Server::Server()
 {
        pthread_mutex_init(&mutex, NULL);
+       pthread_mutex_init(&queued_data_mutex, NULL);
 
        epoll_fd = epoll_create(1024);  // Size argument is ignored.
        if (epoll_fd == -1) {
@@ -162,7 +178,9 @@ void Server::do_work()
                if (should_stop) {
                        return;
                }
-       
+
+               process_queued_data();
+
                for (int i = 0; i < nfds; ++i) {
                        int fd = events[i].data.fd;
                        assert(clients.count(fd) != 0);
@@ -175,11 +193,24 @@ void Server::do_work()
 
                        process_client(client);
                }
+
+               for (map<string, Stream *>::iterator stream_it = streams.begin();
+                    stream_it != streams.end();
+                    ++stream_it) {
+                       Stream *stream = stream_it->second;
+                       for (size_t i = 0; i < stream->to_process.size(); ++i) {
+                               process_client(stream->to_process[i]);
+                       }
+                       stream->to_process.clear();
+               }
        }
 }
 
-CubemapStateProto Server::serialize() const
+CubemapStateProto Server::serialize()
 {
+       // We don't serialize anything queued, so empty the queues.
+       process_queued_data();
+
        CubemapStateProto serialized;
        for (map<int, Client>::const_iterator client_it = clients.begin();
             client_it != clients.end();
@@ -194,20 +225,27 @@ CubemapStateProto Server::serialize() const
        return serialized;
 }
 
+void Server::add_client_deferred(int sock)
+{
+       MutexLock lock(&queued_data_mutex);
+       queued_add_clients.push_back(sock);
+}
+
 void Server::add_client(int sock)
 {
-       MutexLock lock(&mutex);
        clients.insert(make_pair(sock, Client(sock)));
 
        // Start listening on data from this socket.
        epoll_event ev;
-       ev.events = EPOLLIN | EPOLLRDHUP;
+       ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
        ev.data.u64 = 0;  // Keep Valgrind happy.
        ev.data.fd = sock;
        if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
                perror("epoll_ctl(EPOLL_CTL_ADD)");
                exit(1);
        }
+
+       process_client(&clients[sock]);
 }
 
 void Server::add_client_from_serialized(const ClientProto &client)
@@ -215,6 +253,7 @@ void Server::add_client_from_serialized(const ClientProto &client)
        MutexLock lock(&mutex);
        Stream *stream = find_stream(client.stream_id());
        clients.insert(make_pair(client.sock(), Client(client, stream)));
+       Client *client_ptr = &clients[client.sock()];
 
        // Start listening on data from this socket.
        epoll_event ev;
@@ -232,7 +271,12 @@ void Server::add_client_from_serialized(const ClientProto &client)
                exit(1);
        }
 
-       process_client(&clients[client.sock()]);
+       if (client_ptr->state == Client::SENDING_DATA && 
+           client_ptr->bytes_sent == client_ptr->stream->data_size) {
+               client_ptr->stream->put_client_to_sleep(client_ptr);
+       } else {
+               process_client(client_ptr);
+       }
 }
 
 void Server::add_stream(const string &stream_id)
@@ -264,14 +308,15 @@ void Server::set_header(const string &stream_id, const string &header)
                }
        }
 }
-       
-void Server::add_data(const string &stream_id, const char *data, size_t bytes)
+
+void Server::add_data_deferred(const string &stream_id, const char *data, size_t bytes)
 {
-       if (bytes == 0) {
-               return;
-       }
+       MutexLock lock(&queued_data_mutex);
+       queued_data[stream_id].append(string(data, data + bytes));
+}
 
-       MutexLock lock(&mutex);
+void Server::add_data(const string &stream_id, const char *data, size_t bytes)
+{
        Stream *stream = find_stream(stream_id);
        size_t pos = stream->data_size % BACKLOG_SIZE;
        stream->data_size += bytes;
@@ -285,7 +330,7 @@ void Server::add_data(const string &stream_id, const char *data, size_t bytes)
        }
 
        memcpy(stream->data + pos, data, bytes);
-       wake_up_all_clients();
+       stream->wake_up_all_clients();
 }
 
 // See the .h file for postconditions after this function.     
@@ -404,14 +449,17 @@ sending_header_or_error_again:
                // This is postcondition #3.
                client->state = Client::SENDING_DATA;
                client->bytes_sent = client->stream->data_size;
-               sleeping_clients.push_back(client);
+               client->stream->put_client_to_sleep(client);
                return;
        }
        case Client::SENDING_DATA: {
                // See if there's some data we've lost. Ideally, we should drop to a block boundary,
                // but resync will be the mux's problem.
-               const Stream *stream = client->stream;
+               Stream *stream = client->stream;
                size_t bytes_to_send = stream->data_size - client->bytes_sent;
+               if (bytes_to_send == 0) {
+                       return;
+               }
                if (bytes_to_send > BACKLOG_SIZE) {
                        fprintf(stderr, "WARNING: fd %d lost %lld bytes, maybe too slow connection\n",
                                client->sock,
@@ -459,7 +507,7 @@ sending_header_or_error_again:
                if (client->bytes_sent == stream->data_size) {
                        // We don't have any more data for this client, so put it to sleep.
                        // This is postcondition #3.
-                       put_client_to_sleep(client);
+                       stream->put_client_to_sleep(client);
                } else {
                        // XXX: Do we need to go another round here to explicitly
                        // get the EAGAIN?
@@ -544,9 +592,14 @@ void Server::close_client(Client *client)
        }
 
        // This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
-       vector<Client *>::iterator new_end =
-               remove(sleeping_clients.begin(), sleeping_clients.end(), client);
-       sleeping_clients.erase(new_end, sleeping_clients.end());
+       if (client->stream != NULL) {
+               vector<Client *>::iterator new_end =
+                       remove(client->stream->sleeping_clients.begin(),
+                              client->stream->sleeping_clients.end(),
+                              client);
+               client->stream->sleeping_clients.erase(
+                       new_end, client->stream->sleeping_clients.end());
+       }
        
        // Bye-bye!
        int ret;
@@ -561,23 +614,26 @@ void Server::close_client(Client *client)
        clients.erase(client->sock);
 }
        
-void Server::put_client_to_sleep(Client *client)
+Stream *Server::find_stream(const string &stream_id)
 {
-       sleeping_clients.push_back(client);
+       map<string, Stream *>::iterator it = streams.find(stream_id);
+       assert(it != streams.end());
+       return it->second;
 }
 
-void Server::wake_up_all_clients()
+void Server::process_queued_data()
 {
-       vector<Client *> to_process;
-       swap(sleeping_clients, to_process);
-       for (unsigned i = 0; i < to_process.size(); ++i) {
-               process_client(to_process[i]);
+       MutexLock lock(&queued_data_mutex);
+
+       for (size_t i = 0; i < queued_add_clients.size(); ++i) {
+               add_client(queued_add_clients[i]);
        }
-}
+       queued_add_clients.clear();     
        
-Stream *Server::find_stream(const string &stream_id)
-{
-       map<string, Stream *>::iterator it = streams.find(stream_id);
-       assert(it != streams.end());
-       return it->second;
+       for (map<string, string>::iterator queued_it = queued_data.begin();
+            queued_it != queued_data.end();
+            ++queued_it) {
+               add_data(queued_it->first, queued_it->second.data(), queued_it->second.size());
+       }
+       queued_data.clear();
 }