X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=server.cpp;h=5026deb7ecb6fd911c16888e070fa33e31482a4b;hb=3b73724f08274a2c5a435d6a834bc10fcf3db41b;hp=6bac2c63feefffeaf677fd636e39886f8dc92ebd;hpb=e6cbdf497326c42b375dbb118ca1ec0a59644c32;p=cubemap diff --git a/server.cpp b/server.cpp index 6bac2c6..5026deb 100644 --- a/server.cpp +++ b/server.cpp @@ -69,8 +69,8 @@ Server::Server() Server::~Server() { - for (size_t i = 0; i < streams.size(); ++i) { - delete streams[i]; + for (Stream *stream : streams) { + delete stream; } safe_close(epoll_fd); @@ -81,10 +81,8 @@ vector Server::get_client_stats() const vector ret; MutexLock lock(&mutex); - for (map::const_iterator client_it = clients.begin(); - client_it != clients.end(); - ++client_it) { - ret.push_back(client_it->second.get_stats()); + for (const auto &fd_and_client : clients) { + ret.push_back(fd_and_client.second.get_stats()); } return ret; } @@ -124,11 +122,11 @@ void Server::do_work() // Process each client where its stream has new data, // even if there was no socket activity. - for (size_t i = 0; i < streams.size(); ++i) { + for (Stream *stream : streams) { vector to_process; - swap(streams[i]->to_process, to_process); - for (size_t i = 0; i < to_process.size(); ++i) { - process_client(to_process[i]); + swap(stream->to_process, to_process); + for (Client *client : to_process) { + process_client(client); } } @@ -152,7 +150,7 @@ void Server::do_work() // If this client doesn't exist anymore, just ignore it // (it was deleted earlier). - map::iterator client_it = clients.find(connect_time_and_fd.second); + auto client_it = clients.find(connect_time_and_fd.second); if (client_it == clients.end()) { clients_ordered_by_connect_time.pop(); continue; @@ -189,20 +187,16 @@ CubemapStateProto Server::serialize() // // TODO: Do this when clients are added back from serialized state instead; // it would probably be less wasteful. - for (map::iterator client_it = clients.begin(); - client_it != clients.end(); - ++client_it) { - skip_lost_data(&client_it->second); + for (auto &fd_and_client : clients) { + skip_lost_data(&fd_and_client.second); } CubemapStateProto serialized; - for (map::const_iterator client_it = clients.begin(); - client_it != clients.end(); - ++client_it) { - serialized.add_clients()->MergeFrom(client_it->second.serialize()); + for (const auto &fd_and_client : clients) { + serialized.add_clients()->MergeFrom(fd_and_client.second.serialize()); } - for (size_t i = 0; i < streams.size(); ++i) { - serialized.add_streams()->MergeFrom(streams[i]->serialize()); + for (Stream *stream : streams) { + serialized.add_streams()->MergeFrom(stream->serialize()); } return serialized; } @@ -216,10 +210,9 @@ void Server::add_client_deferred(int sock, Acceptor *acceptor) void Server::add_client(int sock, Acceptor *acceptor) { const bool is_tls = acceptor->is_tls(); - pair::iterator, bool> ret = - clients.insert(make_pair(sock, Client(sock))); - assert(ret.second == true); // Should not already exist. - Client *client_ptr = &ret.first->second; + auto inserted = clients.insert(make_pair(sock, Client(sock))); + assert(inserted.second == true); // Should not already exist. + Client *client_ptr = &inserted.first->second; // Connection timestamps must be nondecreasing. I can't find any guarantee // that even the monotonic clock can't go backwards by a small amount @@ -274,10 +267,9 @@ void Server::add_client_from_serialized(const ClientProto &client) } else { stream = streams[stream_index]; } - pair::iterator, bool> ret = - clients.insert(make_pair(client.sock(), Client(client, stream))); - assert(ret.second == true); // Should not already exist. - Client *client_ptr = &ret.first->second; + auto inserted = clients.insert(make_pair(client.sock(), Client(client, stream))); + assert(inserted.second == true); // Should not already exist. + Client *client_ptr = &inserted.first->second; // Connection timestamps must be nondecreasing. assert(clients_ordered_by_connect_time.empty() || @@ -992,13 +984,13 @@ void Server::process_queued_data() { MutexLock lock(&queued_clients_mutex); - for (size_t i = 0; i < queued_add_clients.size(); ++i) { - add_client(queued_add_clients[i].first, queued_add_clients[i].second); + for (const pair &id_and_acceptor : queued_add_clients) { + add_client(id_and_acceptor.first, id_and_acceptor.second); } queued_add_clients.clear(); } - for (size_t i = 0; i < streams.size(); ++i) { - streams[i]->process_queued_data(); + for (Stream *stream : streams) { + stream->process_queued_data(); } }