From 3b73724f08274a2c5a435d6a834bc10fcf3db41b Mon Sep 17 00:00:00 2001 From: "Steinar H. Gunderson" Date: Thu, 5 Apr 2018 09:40:42 +0200 Subject: [PATCH] Use C++11 range-based for loops and auto wherever it makes sense. --- config.cpp | 53 ++++++++++++------------ httpinput.cpp | 37 ++++++++--------- main.cpp | 108 +++++++++++++++++++------------------------------ server.cpp | 58 ++++++++++++-------------- serverpool.cpp | 8 ++-- stream.cpp | 17 ++++---- udpinput.cpp | 6 +-- 7 files changed, 124 insertions(+), 163 deletions(-) diff --git a/config.cpp b/config.cpp index 47118b4..0573f54 100644 --- a/config.cpp +++ b/config.cpp @@ -144,16 +144,16 @@ bool read_config(const string &filename, vector *lines) bool fetch_config_string(const vector &config, const string &keyword, string *value) { - for (unsigned i = 0; i < config.size(); ++i) { - if (config[i].keyword != keyword) { + for (const ConfigLine &line : config) { + if (line.keyword != keyword) { continue; } - if (config[i].parameters.size() > 0 || - config[i].arguments.size() != 1) { + if (line.parameters.size() > 0 || + line.arguments.size() != 1) { log(ERROR, "'%s' takes one argument and no parameters", keyword.c_str()); return false; } - *value = config[i].arguments[0]; + *value = line.arguments[0]; return true; } return false; @@ -161,16 +161,16 @@ bool fetch_config_string(const vector &config, const string &keyword bool fetch_config_int(const vector &config, const string &keyword, int *value) { - for (unsigned i = 0; i < config.size(); ++i) { - if (config[i].keyword != keyword) { + for (const ConfigLine &line : config) { + if (line.keyword != keyword) { continue; } - if (config[i].parameters.size() > 0 || - config[i].arguments.size() != 1) { + if (line.parameters.size() > 0 || + line.arguments.size() != 1) { log(ERROR, "'%s' takes one argument and no parameters", keyword.c_str()); return false; } - *value = atoi(config[i].arguments[0].c_str()); // TODO: verify int validity. + *value = atoi(line.arguments[0].c_str()); // TODO: verify int validity. return true; } return false; @@ -215,7 +215,7 @@ bool parse_tls_parameters(const map ¶meters, AcceptorConfig { bool has_cert = false, has_key = false; - map::const_iterator tls_cert_it = parameters.find("tls_cert"); + auto tls_cert_it = parameters.find("tls_cert"); if (tls_cert_it != parameters.end()) { if (!load_file_to_string(tls_cert_it->second, 1048576, &acceptor->certificate_chain)) { return false; @@ -240,7 +240,7 @@ bool parse_tls_parameters(const map ¶meters, AcceptorConfig has_cert = true; } - map::const_iterator tls_key_it = parameters.find("tls_key"); + auto tls_key_it = parameters.find("tls_key"); if (tls_key_it != parameters.end()) { if (!load_file_to_string(tls_key_it->second, 1048576, &acceptor->private_key)) { return false; @@ -325,7 +325,7 @@ bool parse_stream(const ConfigLine &line, Config *config) StreamConfig stream; stream.url = line.arguments[0]; - map::const_iterator src_it = line.parameters.find("src"); + const auto src_it = line.parameters.find("src"); if (src_it == line.parameters.end()) { log(WARNING, "stream '%s' has no src= attribute, clients will not get any data.", stream.url.c_str()); @@ -334,14 +334,14 @@ bool parse_stream(const ConfigLine &line, Config *config) // TODO: Verify that the URL is parseable? } - map::const_iterator backlog_it = line.parameters.find("backlog_size"); + const auto backlog_it = line.parameters.find("backlog_size"); if (backlog_it == line.parameters.end()) { stream.backlog_size = DEFAULT_BACKLOG_SIZE; } else { stream.backlog_size = atoi(backlog_it->second.c_str()); } - map::const_iterator prebuffer_it = line.parameters.find("force_prebuffer"); + const auto prebuffer_it = line.parameters.find("force_prebuffer"); if (prebuffer_it == line.parameters.end()) { stream.prebuffering_bytes = 0; } else { @@ -349,7 +349,7 @@ bool parse_stream(const ConfigLine &line, Config *config) } // Parse output encoding. - map::const_iterator encoding_parm_it = line.parameters.find("encoding"); + const auto encoding_parm_it = line.parameters.find("encoding"); if (encoding_parm_it == line.parameters.end() || encoding_parm_it->second == "raw") { stream.encoding = StreamConfig::STREAM_ENCODING_RAW; @@ -361,7 +361,7 @@ bool parse_stream(const ConfigLine &line, Config *config) } // Parse input encoding. - map::const_iterator src_encoding_parm_it = line.parameters.find("src_encoding"); + const auto src_encoding_parm_it = line.parameters.find("src_encoding"); if (src_encoding_parm_it == line.parameters.end() || src_encoding_parm_it->second == "metacube") { stream.src_encoding = StreamConfig::STREAM_ENCODING_METACUBE; @@ -373,7 +373,7 @@ bool parse_stream(const ConfigLine &line, Config *config) } // Parse the pacing rate, converting from kilobits to bytes as needed. - map::const_iterator pacing_rate_it = line.parameters.find("pacing_rate_kbit"); + const auto pacing_rate_it = line.parameters.find("pacing_rate_kbit"); if (pacing_rate_it == line.parameters.end()) { stream.pacing_rate = ~0U; } else { @@ -398,7 +398,7 @@ bool parse_udpstream(const ConfigLine &line, Config *config) return false; } - map::const_iterator src_it = line.parameters.find("src"); + const auto src_it = line.parameters.find("src"); if (src_it == line.parameters.end()) { // This is pretty meaningless, but OK, consistency is good. log(WARNING, "udpstream to %s has no src= attribute, clients will not get any data.", @@ -409,7 +409,7 @@ bool parse_udpstream(const ConfigLine &line, Config *config) } // Parse the pacing rate, converting from kilobits to bytes as needed. - map::const_iterator pacing_rate_it = line.parameters.find("pacing_rate_kbit"); + const auto pacing_rate_it = line.parameters.find("pacing_rate_kbit"); if (pacing_rate_it == line.parameters.end()) { udpstream.pacing_rate = ~0U; } else { @@ -417,7 +417,7 @@ bool parse_udpstream(const ConfigLine &line, Config *config) } // Parse the TTL. The same value is used for unicast and multicast. - map::const_iterator ttl_it = line.parameters.find("ttl"); + const auto ttl_it = line.parameters.find("ttl"); if (ttl_it == line.parameters.end()) { udpstream.ttl = -1; } else { @@ -425,7 +425,7 @@ bool parse_udpstream(const ConfigLine &line, Config *config) } // Parse the multicast interface index. - map::const_iterator multicast_iface_it = line.parameters.find("multicast_output_interface"); + const auto multicast_iface_it = line.parameters.find("multicast_output_interface"); if (multicast_iface_it == line.parameters.end()) { udpstream.multicast_iface_index = -1; } else { @@ -451,7 +451,7 @@ bool parse_gen204(const ConfigLine &line, Config *config) gen204.url = line.arguments[0]; // Parse the CORS origin, if it exists. - map::const_iterator allow_origin_it = line.parameters.find("allow_origin"); + const auto allow_origin_it = line.parameters.find("allow_origin"); if (allow_origin_it != line.parameters.end()) { gen204.allow_origin = allow_origin_it->second; } @@ -468,7 +468,7 @@ bool parse_error_log(const ConfigLine &line, Config *config) } LogConfig log_config; - map::const_iterator type_it = line.parameters.find("type"); + const auto type_it = line.parameters.find("type"); if (type_it == line.parameters.end()) { log(ERROR, "'error_log' has no type= parameter"); return false; @@ -487,7 +487,7 @@ bool parse_error_log(const ConfigLine &line, Config *config) } if (log_config.type == LogConfig::LOG_TYPE_FILE) { - map::const_iterator filename_it = line.parameters.find("filename"); + const auto filename_it = line.parameters.find("filename"); if (filename_it == line.parameters.end()) { log(ERROR, "error_log type 'file' with no filename= parameter"); return false; @@ -536,8 +536,7 @@ bool parse_config(const string &filename, Config *config) fetch_config_string(lines, "access_log", &config->access_log_file); - for (size_t i = 0; i < lines.size(); ++i) { - const ConfigLine &line = lines[i]; + for (const ConfigLine &line : lines) { if (line.keyword == "num_servers" || line.keyword == "stats_file" || line.keyword == "stats_interval" || diff --git a/httpinput.cpp b/httpinput.cpp index 2ad9746..c0cfc03 100644 --- a/httpinput.cpp +++ b/httpinput.cpp @@ -264,8 +264,7 @@ bool HTTPInput::parse_response(const string &request) // Remove “Content-encoding: metacube”. // TODO: Make case-insensitive. - multimap::iterator encoding_it = - parameters.find("Content-encoding"); + const auto encoding_it = parameters.find("Content-encoding"); if (encoding_it != parameters.end() && encoding_it->second == "metacube") { parameters.erase(encoding_it); } @@ -276,13 +275,11 @@ bool HTTPInput::parse_response(const string &request) if (parameters.count("Server") == 0) { parameters.insert(make_pair("Server", SERVER_IDENTIFICATION)); } else { - for (multimap::iterator it = parameters.begin(); - it != parameters.end(); - ++it) { - if (it->first != "Server") { + for (auto &key_and_value : parameters) { + if (key_and_value.first != "Server") { continue; } - it->second = SERVER_IDENTIFICATION " (reflecting: " + it->second + ")"; + key_and_value.second = SERVER_IDENTIFICATION " (reflecting: " + key_and_value.second + ")"; } } @@ -293,14 +290,12 @@ bool HTTPInput::parse_response(const string &request) // Construct the new HTTP header. http_header = "HTTP/1.0 200 OK\r\n"; - for (multimap::iterator it = parameters.begin(); - it != parameters.end(); - ++it) { - http_header.append(it->first + ": " + it->second + "\r\n"); + for (const auto &key_and_value : parameters) { + http_header.append(key_and_value.first + ": " + key_and_value.second + "\r\n"); } - for (size_t i = 0; i < stream_indices.size(); ++i) { - servers->set_header(stream_indices[i], http_header, stream_header); + for (int stream_index : stream_indices) { + servers->set_header(stream_index, http_header, stream_header); } return true; @@ -358,8 +353,8 @@ void HTTPInput::do_work() response.clear(); pending_data.clear(); has_metacube_header = false; - for (size_t i = 0; i < stream_indices.size(); ++i) { - servers->set_header(stream_indices[i], "", ""); + for (int stream_index : stream_indices) { + servers->set_header(stream_index, "", ""); } { @@ -585,8 +580,8 @@ void HTTPInput::process_data(char *ptr, size_t bytes) } if (encoding == Input::INPUT_ENCODING_RAW) { - for (size_t i = 0; i < stream_indices.size(); ++i) { - servers->add_data(stream_indices[i], ptr, bytes, /*metacube_flags=*/0); + for (int stream_index : stream_indices) { + servers->add_data(stream_index, ptr, bytes, /*metacube_flags=*/0); } return; } @@ -672,12 +667,12 @@ void HTTPInput::process_data(char *ptr, size_t bytes) char *inner_data = pending_data.data() + sizeof(metacube2_block_header); if (flags & METACUBE_FLAGS_HEADER) { stream_header = string(inner_data, inner_data + size); - for (size_t i = 0; i < stream_indices.size(); ++i) { - servers->set_header(stream_indices[i], http_header, stream_header); + for (int stream_index : stream_indices) { + servers->set_header(stream_index, http_header, stream_header); } } - for (size_t i = 0; i < stream_indices.size(); ++i) { - servers->add_data(stream_indices[i], inner_data, size, flags); + for (int stream_index : stream_indices) { + servers->add_data(stream_index, inner_data, size, flags); } } diff --git a/main.cpp b/main.cpp index 33ed9df..7022655 100644 --- a/main.cpp +++ b/main.cpp @@ -97,15 +97,13 @@ CubemapStateProto collect_state(const timespec &serialize_start, CubemapStateProto state = servers->serialize(); // Fills streams() and clients(). state.set_serialize_start_sec(serialize_start.tv_sec); state.set_serialize_start_usec(serialize_start.tv_nsec / 1000); - - for (size_t i = 0; i < acceptors.size(); ++i) { - state.add_acceptors()->MergeFrom(acceptors[i]->serialize()); + + for (Acceptor *acceptor : acceptors) { + state.add_acceptors()->MergeFrom(acceptor->serialize()); } - for (multimap::const_iterator input_it = inputs.begin(); - input_it != inputs.end(); - ++input_it) { - state.add_inputs()->MergeFrom(input_it->second.input->serialize()); + for (const auto &key_and_input_with_refcount : inputs) { + state.add_inputs()->MergeFrom(key_and_input_with_refcount.second.input->serialize()); } return state; @@ -117,11 +115,9 @@ vector create_acceptors( map *deserialized_acceptors) { vector acceptors; - for (unsigned i = 0; i < config.acceptors.size(); ++i) { - const AcceptorConfig &acceptor_config = config.acceptors[i]; + for (const AcceptorConfig &acceptor_config : config.acceptors) { Acceptor *acceptor = NULL; - map::iterator deserialized_acceptor_it = - deserialized_acceptors->find(acceptor_config); + const auto deserialized_acceptor_it = deserialized_acceptors->find(acceptor_config); if (deserialized_acceptor_it != deserialized_acceptors->end()) { acceptor = deserialized_acceptor_it->second; deserialized_acceptors->erase(deserialized_acceptor_it); @@ -136,12 +132,9 @@ vector create_acceptors( } // Close all acceptors that are no longer in the configuration file. - for (map::iterator - acceptor_it = deserialized_acceptors->begin(); - acceptor_it != deserialized_acceptors->end(); - ++acceptor_it) { - acceptor_it->second->close_socket(); - delete acceptor_it->second; + for (auto &config_and_acceptor : *deserialized_acceptors) { + config_and_acceptor.second->close_socket(); + delete config_and_acceptor.second; } return acceptors; @@ -171,14 +164,12 @@ void create_config_input(const string &src, Input::Encoding encoding, multimap *inputs) { - for (unsigned i = 0; i < config.streams.size(); ++i) { - const StreamConfig &stream_config = config.streams[i]; + for (const StreamConfig &stream_config : config.streams) { if (stream_config.src != "delete") { create_config_input(stream_config.src, Input::Encoding(stream_config.src_encoding), inputs); } } - for (unsigned i = 0; i < config.udpstreams.size(); ++i) { - const UDPStreamConfig &udpstream_config = config.udpstreams[i]; + for (const UDPStreamConfig &udpstream_config : config.udpstreams) { create_config_input(udpstream_config.src, Input::INPUT_ENCODING_RAW, inputs); } } @@ -189,8 +180,7 @@ void create_streams(const Config &config, { // HTTP streams. set expecting_urls = deserialized_urls; - for (unsigned i = 0; i < config.streams.size(); ++i) { - const StreamConfig &stream_config = config.streams[i]; + for (const StreamConfig &stream_config : config.streams) { int stream_index; expecting_urls.erase(stream_config.url); @@ -223,7 +213,7 @@ void create_streams(const Config &config, string src = stream_config.src; Input::Encoding src_encoding = Input::Encoding(stream_config.src_encoding); if (!src.empty()) { - multimap::iterator input_it = inputs->find(make_pair(src, src_encoding)); + const auto input_it = inputs->find(make_pair(src, src_encoding)); if (input_it != inputs->end()) { input_it->second.input->add_destination(stream_index); ++input_it->second.refcount; @@ -232,10 +222,7 @@ void create_streams(const Config &config, } // Warn about any streams servers we've lost. - for (set::const_iterator stream_it = expecting_urls.begin(); - stream_it != expecting_urls.end(); - ++stream_it) { - string url = *stream_it; + for (const string &url : expecting_urls) { log(WARNING, "stream '%s' disappeared from the configuration file. " "It will not be deleted, but clients will not get any new inputs. " "If you really meant to delete it, set src=delete and reload.", @@ -243,8 +230,7 @@ void create_streams(const Config &config, } // UDP streams. - for (unsigned i = 0; i < config.udpstreams.size(); ++i) { - const UDPStreamConfig &udpstream_config = config.udpstreams[i]; + for (const UDPStreamConfig &udpstream_config : config.udpstreams) { int stream_index = servers->add_udpstream( udpstream_config.dst, udpstream_config.pacing_rate, @@ -253,7 +239,7 @@ void create_streams(const Config &config, string src = udpstream_config.src; if (!src.empty()) { - multimap::iterator input_it = inputs->find(make_pair(src, Input::INPUT_ENCODING_RAW)); + auto input_it = inputs->find(make_pair(src, Input::INPUT_ENCODING_RAW)); assert(input_it != inputs->end()); input_it->second.input->add_destination(stream_index); ++input_it->second.refcount; @@ -261,20 +247,19 @@ void create_streams(const Config &config, } // HTTP gen204 endpoints. - for (unsigned i = 0; i < config.pings.size(); ++i) { - const Gen204Config &ping_config = config.pings[i]; + for (const Gen204Config &ping_config : config.pings) { servers->add_gen204(ping_config.url, ping_config.allow_origin); } } void open_logs(const vector &log_destinations) { - for (size_t i = 0; i < log_destinations.size(); ++i) { - if (log_destinations[i].type == LogConfig::LOG_TYPE_FILE) { - add_log_destination_file(log_destinations[i].filename); - } else if (log_destinations[i].type == LogConfig::LOG_TYPE_CONSOLE) { + for (const LogConfig &log_destination : log_destinations) { + if (log_destination.type == LogConfig::LOG_TYPE_FILE) { + add_log_destination_file(log_destination.filename); + } else if (log_destination.type == LogConfig::LOG_TYPE_CONSOLE) { add_log_destination_console(); - } else if (log_destinations[i].type == LogConfig::LOG_TYPE_SYSLOG) { + } else if (log_destination.type == LogConfig::LOG_TYPE_SYSLOG) { add_log_destination_syslog(); } else { assert(false); @@ -324,8 +309,7 @@ bool dry_run_config(const string &argv0, const string &config_filename) void find_deleted_streams(const Config &config, set *deleted_urls) { - for (unsigned i = 0; i < config.streams.size(); ++i) { - const StreamConfig &stream_config = config.streams[i]; + for (const StreamConfig &stream_config : config.streams) { if (stream_config.src == "delete") { log(INFO, "Deleting stream '%s'.", stream_config.url.c_str()); deleted_urls->insert(stream_config.url); @@ -447,18 +431,16 @@ start: // Deserialize the streams. map stream_headers_for_url; // See below. - for (int i = 0; i < loaded_state.streams_size(); ++i) { - const StreamProto &stream = loaded_state.streams(i); - + for (const StreamProto &stream : loaded_state.streams()) { if (deleted_urls.count(stream.url()) != 0) { // Delete the stream backlogs. - for (int j = 0; j < stream.data_fds_size(); ++j) { - safe_close(stream.data_fds(j)); + for (const int fd : stream.data_fds()) { + safe_close(fd); } } else { vector data_fds; - for (int j = 0; j < stream.data_fds_size(); ++j) { - data_fds.push_back(stream.data_fds(j)); + for (const int fd : stream.data_fds()) { + data_fds.push_back(fd); } servers->add_stream_from_serialized(stream, data_fds); @@ -469,9 +451,7 @@ start: } // Deserialize the inputs. Note that we don't actually add them to any stream yet. - for (int i = 0; i < loaded_state.inputs_size(); ++i) { - InputProto serialized_input = loaded_state.inputs(i); - + for (const InputProto &serialized_input : loaded_state.inputs()) { InputWithRefcount iwr; iwr.input = create_input(serialized_input); iwr.refcount = 0; @@ -484,14 +464,14 @@ start: } // Deserialize the acceptors. - for (int i = 0; i < loaded_state.acceptors_size(); ++i) { + for (const AcceptorProto &serialized_acceptor : loaded_state.acceptors()) { AcceptorConfig config; - config.addr = extract_address_from_acceptor_proto(loaded_state.acceptors(i)); - config.certificate_chain = loaded_state.acceptors(i).certificate_chain(); - config.private_key = loaded_state.acceptors(i).private_key(); + config.addr = extract_address_from_acceptor_proto(serialized_acceptor); + config.certificate_chain = serialized_acceptor.certificate_chain(); + config.private_key = serialized_acceptor.private_key(); deserialized_acceptors.insert(make_pair( config, - new Acceptor(loaded_state.acceptors(i)))); + new Acceptor(serialized_acceptor))); } log(INFO, "Deserialization done."); @@ -528,8 +508,7 @@ start: servers->run(); // Now delete all inputs that are longer in use, and start the others. - for (multimap::iterator input_it = inputs.begin(); - input_it != inputs.end(); ) { + for (auto input_it = inputs.begin(); input_it != inputs.end(); ) { if (input_it->second.refcount == 0) { if (input_it->first.second == Input::INPUT_ENCODING_RAW) { log(WARNING, "Raw input '%s' no longer in use, closing.", @@ -558,9 +537,8 @@ start: InputStatsThread *input_stats_thread = NULL; if (!config.input_stats_file.empty()) { vector inputs_no_refcount; - for (multimap::iterator input_it = inputs.begin(); - input_it != inputs.end(); ++input_it) { - inputs_no_refcount.push_back(input_it->second.input); + for (const auto &key_and_input_with_refcount : inputs) { + inputs_no_refcount.push_back(key_and_input_with_refcount.second.input); } input_stats_thread = new InputStatsThread(config.input_stats_file, config.input_stats_interval, inputs_no_refcount); @@ -603,13 +581,11 @@ start: stats_thread->stop(); delete stats_thread; } - for (size_t i = 0; i < acceptors.size(); ++i) { - acceptors[i]->stop(); + for (Acceptor *acceptor : acceptors) { + acceptor->stop(); } - for (multimap::iterator input_it = inputs.begin(); - input_it != inputs.end(); - ++input_it) { - input_it->second.input->stop(); + for (const auto &key_and_input_with_refcount : inputs) { + key_and_input_with_refcount.second.input->stop(); } servers->stop(); diff --git a/server.cpp b/server.cpp index 6bac2c6..5026deb 100644 --- a/server.cpp +++ b/server.cpp @@ -69,8 +69,8 @@ Server::Server() Server::~Server() { - for (size_t i = 0; i < streams.size(); ++i) { - delete streams[i]; + for (Stream *stream : streams) { + delete stream; } safe_close(epoll_fd); @@ -81,10 +81,8 @@ vector Server::get_client_stats() const vector ret; MutexLock lock(&mutex); - for (map::const_iterator client_it = clients.begin(); - client_it != clients.end(); - ++client_it) { - ret.push_back(client_it->second.get_stats()); + for (const auto &fd_and_client : clients) { + ret.push_back(fd_and_client.second.get_stats()); } return ret; } @@ -124,11 +122,11 @@ void Server::do_work() // Process each client where its stream has new data, // even if there was no socket activity. - for (size_t i = 0; i < streams.size(); ++i) { + for (Stream *stream : streams) { vector to_process; - swap(streams[i]->to_process, to_process); - for (size_t i = 0; i < to_process.size(); ++i) { - process_client(to_process[i]); + swap(stream->to_process, to_process); + for (Client *client : to_process) { + process_client(client); } } @@ -152,7 +150,7 @@ void Server::do_work() // If this client doesn't exist anymore, just ignore it // (it was deleted earlier). - map::iterator client_it = clients.find(connect_time_and_fd.second); + auto client_it = clients.find(connect_time_and_fd.second); if (client_it == clients.end()) { clients_ordered_by_connect_time.pop(); continue; @@ -189,20 +187,16 @@ CubemapStateProto Server::serialize() // // TODO: Do this when clients are added back from serialized state instead; // it would probably be less wasteful. - for (map::iterator client_it = clients.begin(); - client_it != clients.end(); - ++client_it) { - skip_lost_data(&client_it->second); + for (auto &fd_and_client : clients) { + skip_lost_data(&fd_and_client.second); } CubemapStateProto serialized; - for (map::const_iterator client_it = clients.begin(); - client_it != clients.end(); - ++client_it) { - serialized.add_clients()->MergeFrom(client_it->second.serialize()); + for (const auto &fd_and_client : clients) { + serialized.add_clients()->MergeFrom(fd_and_client.second.serialize()); } - for (size_t i = 0; i < streams.size(); ++i) { - serialized.add_streams()->MergeFrom(streams[i]->serialize()); + for (Stream *stream : streams) { + serialized.add_streams()->MergeFrom(stream->serialize()); } return serialized; } @@ -216,10 +210,9 @@ void Server::add_client_deferred(int sock, Acceptor *acceptor) void Server::add_client(int sock, Acceptor *acceptor) { const bool is_tls = acceptor->is_tls(); - pair::iterator, bool> ret = - clients.insert(make_pair(sock, Client(sock))); - assert(ret.second == true); // Should not already exist. - Client *client_ptr = &ret.first->second; + auto inserted = clients.insert(make_pair(sock, Client(sock))); + assert(inserted.second == true); // Should not already exist. + Client *client_ptr = &inserted.first->second; // Connection timestamps must be nondecreasing. I can't find any guarantee // that even the monotonic clock can't go backwards by a small amount @@ -274,10 +267,9 @@ void Server::add_client_from_serialized(const ClientProto &client) } else { stream = streams[stream_index]; } - pair::iterator, bool> ret = - clients.insert(make_pair(client.sock(), Client(client, stream))); - assert(ret.second == true); // Should not already exist. - Client *client_ptr = &ret.first->second; + auto inserted = clients.insert(make_pair(client.sock(), Client(client, stream))); + assert(inserted.second == true); // Should not already exist. + Client *client_ptr = &inserted.first->second; // Connection timestamps must be nondecreasing. assert(clients_ordered_by_connect_time.empty() || @@ -992,13 +984,13 @@ void Server::process_queued_data() { MutexLock lock(&queued_clients_mutex); - for (size_t i = 0; i < queued_add_clients.size(); ++i) { - add_client(queued_add_clients[i].first, queued_add_clients[i].second); + for (const pair &id_and_acceptor : queued_add_clients) { + add_client(id_and_acceptor.first, id_and_acceptor.second); } queued_add_clients.clear(); } - for (size_t i = 0; i < streams.size(); ++i) { - streams[i]->process_queued_data(); + for (Stream *stream : streams) { + stream->process_queued_data(); } } diff --git a/serverpool.cpp b/serverpool.cpp index ce1bcef..8556127 100644 --- a/serverpool.cpp +++ b/serverpool.cpp @@ -26,8 +26,8 @@ ServerPool::~ServerPool() { delete[] servers; - for (size_t i = 0; i < udp_streams.size(); ++i) { - delete udp_streams[i]; + for (UDPStream *udp_stream : udp_streams) { + delete udp_stream; } } @@ -49,8 +49,8 @@ CubemapStateProto ServerPool::serialize() state.mutable_streams(j)->add_data_fds(local_state.streams(j).data_fds(0)); } } - for (int j = 0; j < local_state.clients_size(); ++j) { - state.add_clients()->MergeFrom(local_state.clients(j)); + for (const ClientProto &client : local_state.clients()) { + state.add_clients()->MergeFrom(client); } } diff --git a/stream.cpp b/stream.cpp index 7a2c099..734110c 100644 --- a/stream.cpp +++ b/stream.cpp @@ -59,8 +59,7 @@ Stream::Stream(const StreamProto &serialized, int data_fd) exit(1); } - for (int i = 0; i < serialized.suitable_starting_point_size(); ++i) { - ssize_t point = serialized.suitable_starting_point(i); + for (ssize_t point : serialized.suitable_starting_point()) { if (point == -1) { // Can happen when upgrading from before 1.1.3, // where this was an optional field with -1 signifying @@ -82,8 +81,8 @@ StreamProto Stream::serialize() serialized.set_backlog_size(backlog_size); serialized.set_prebuffering_bytes(prebuffering_bytes); serialized.set_bytes_received(bytes_received); - for (size_t i = 0; i < suitable_starting_points.size(); ++i) { - serialized.add_suitable_starting_point(suitable_starting_points[i]); + for (size_t point : suitable_starting_points) { + serialized.add_suitable_starting_point(point); } serialized.set_url(url); data_fd = -1; @@ -292,8 +291,8 @@ void Stream::process_queued_data() // data, and 10 kB is a very fine granularity in most streams. static const int minimum_start_point_distance = 10240; size_t byte_position = bytes_received; - for (size_t i = 0; i < queued_data_copy.size(); ++i) { - if ((queued_data_copy[i].metacube_flags & METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START) == 0) { + for (const DataElement &elem : queued_data_copy) { + if ((elem.metacube_flags & METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START) == 0) { size_t num_points = suitable_starting_points.size(); if (num_points >= 2 && suitable_starting_points[num_points - 1] - suitable_starting_points[num_points - 2] < minimum_start_point_distance) { @@ -302,13 +301,13 @@ void Stream::process_queued_data() } suitable_starting_points.push_back(byte_position); } - byte_position += queued_data_copy[i].data.iov_len; + byte_position += elem.data.iov_len; } add_data_raw(queued_data_copy); remove_obsolete_starting_points(); - for (size_t i = 0; i < queued_data_copy.size(); ++i) { - char *data = reinterpret_cast(queued_data_copy[i].data.iov_base); + for (const DataElement &elem : queued_data_copy) { + char *data = reinterpret_cast(elem.data.iov_base); delete[] data; } diff --git a/udpinput.cpp b/udpinput.cpp index e58ef91..dada7b0 100644 --- a/udpinput.cpp +++ b/udpinput.cpp @@ -228,9 +228,9 @@ void UDPInput::do_work() stats.bytes_received += ret; stats.data_bytes_received += ret; } - - for (size_t i = 0; i < stream_indices.size(); ++i) { - servers->add_data(stream_indices[i], packet_buf, ret, /*metacube_flags=*/0); + + for (size_t stream_index : stream_indices) { + servers->add_data(stream_index, packet_buf, ret, /*metacube_flags=*/0); } } } -- 2.39.2