+ // Set all clients in a consistent state before serializing
+ // (ie., they have no remaining lost data). Otherwise, increasing
+ // the backlog could take clients into a newly valid area of the backlog,
+ // sending a stream of zeros instead of skipping the data as it should.
+ //
+ // TODO: Do this when clients are added back from serialized state instead;
+ // it would probably be less wasteful.
+ for (map<int, Client>::iterator client_it = clients.begin();
+ client_it != clients.end();
+ ++client_it) {
+ skip_lost_data(&client_it->second);
+ }
+
+ CubemapStateProto serialized;
+ for (map<int, Client>::const_iterator client_it = clients.begin();
+ client_it != clients.end();
+ ++client_it) {
+ serialized.add_clients()->MergeFrom(client_it->second.serialize());
+ }
+ for (size_t i = 0; i < streams.size(); ++i) {
+ serialized.add_streams()->MergeFrom(streams[i]->serialize());
+ }
+ return serialized;
+}
+
+void Server::add_client_deferred(int sock)
+{
+ MutexLock lock(&queued_data_mutex);
+ queued_add_clients.push_back(sock);
+}
+
+void Server::add_client(int sock)
+{
+ pair<map<int, Client>::iterator, bool> ret =
+ clients.insert(make_pair(sock, Client(sock)));
+ assert(ret.second == true); // Should not already exist.
+ Client *client_ptr = &ret.first->second;