X-Git-Url: https://git.sesse.net/?p=cubemap;a=blobdiff_plain;f=main.cpp;h=98d0948de80f9acd835ee5dbea56e8290b15acda;hp=c4c5bd4187d3a74850a721fb2d8b90f77835405d;hb=b757a4a2ce9d24835b52a185134835762af2f50c;hpb=cbdce14899459aca2e5331b6e1a969c359d28880 diff --git a/main.cpp b/main.cpp index c4c5bd4..98d0948 100644 --- a/main.cpp +++ b/main.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -22,7 +23,7 @@ #include "input.h" #include "input_stats.h" #include "log.h" -#include "markpool.h" +#include "sa_compare.h" #include "serverpool.h" #include "state.pb.h" #include "stats.h" @@ -34,10 +35,21 @@ using namespace std; AccessLogThread *access_log = NULL; ServerPool *servers = NULL; -vector mark_pools; volatile bool hupped = false; volatile bool stopped = false; +namespace { + +struct OrderByConnectionTime { + bool operator() (const ClientProto &a, const ClientProto &b) const { + if (a.connect_time_sec() != b.connect_time_sec()) + return a.connect_time_sec() < b.connect_time_sec(); + return a.connect_time_nsec() < b.connect_time_nsec(); + } +}; + +} // namespace + struct InputWithRefcount { Input *input; int refcount; @@ -55,14 +67,14 @@ void do_nothing(int signum) { } -CubemapStateProto collect_state(const timeval &serialize_start, +CubemapStateProto collect_state(const timespec &serialize_start, const vector acceptors, const multimap inputs, ServerPool *servers) { CubemapStateProto state = servers->serialize(); // Fills streams() and clients(). state.set_serialize_start_sec(serialize_start.tv_sec); - state.set_serialize_start_usec(serialize_start.tv_usec); + state.set_serialize_start_usec(serialize_start.tv_nsec / 1000); for (size_t i = 0; i < acceptors.size(); ++i) { state.add_acceptors()->MergeFrom(acceptors[i]->serialize()); @@ -80,27 +92,28 @@ CubemapStateProto collect_state(const timeval &serialize_start, // Find all port statements in the configuration file, and create acceptors for htem. vector create_acceptors( const Config &config, - map *deserialized_acceptors) + map *deserialized_acceptors) { vector acceptors; for (unsigned i = 0; i < config.acceptors.size(); ++i) { const AcceptorConfig &acceptor_config = config.acceptors[i]; Acceptor *acceptor = NULL; - map::iterator deserialized_acceptor_it = - deserialized_acceptors->find(acceptor_config.port); + map::iterator deserialized_acceptor_it = + deserialized_acceptors->find(acceptor_config.addr); if (deserialized_acceptor_it != deserialized_acceptors->end()) { acceptor = deserialized_acceptor_it->second; deserialized_acceptors->erase(deserialized_acceptor_it); } else { - int server_sock = create_server_socket(acceptor_config.port, TCP_SOCKET); - acceptor = new Acceptor(server_sock, acceptor_config.port); + int server_sock = create_server_socket(acceptor_config.addr, TCP_SOCKET); + acceptor = new Acceptor(server_sock, acceptor_config.addr); } acceptor->run(); acceptors.push_back(acceptor); } // Close all acceptors that are no longer in the configuration file. - for (map::iterator acceptor_it = deserialized_acceptors->begin(); + for (map::iterator + acceptor_it = deserialized_acceptors->begin(); acceptor_it != deserialized_acceptors->end(); ++acceptor_it) { acceptor_it->second->close_socket(); @@ -149,11 +162,6 @@ void create_streams(const Config &config, const set &deserialized_urls, multimap *inputs) { - for (unsigned i = 0; i < config.mark_pools.size(); ++i) { - const MarkPoolConfig &mp_config = config.mark_pools[i]; - mark_pools.push_back(new MarkPool(mp_config.from, mp_config.to)); - } - // HTTP streams. set expecting_urls = deserialized_urls; for (unsigned i = 0; i < config.streams.size(); ++i) { @@ -171,18 +179,18 @@ void create_streams(const Config &config, if (deserialized_urls.count(stream_config.url) == 0) { stream_index = servers->add_stream(stream_config.url, stream_config.backlog_size, + stream_config.prebuffering_bytes, Stream::Encoding(stream_config.encoding)); } else { stream_index = servers->lookup_stream_by_url(stream_config.url); assert(stream_index != -1); servers->set_backlog_size(stream_index, stream_config.backlog_size); + servers->set_prebuffering_bytes(stream_index, stream_config.prebuffering_bytes); servers->set_encoding(stream_index, Stream::Encoding(stream_config.encoding)); } - if (stream_config.mark_pool != -1) { - servers->set_mark_pool(stream_index, mark_pools[stream_config.mark_pool]); - } + servers->set_pacing_rate(stream_index, stream_config.pacing_rate); string src = stream_config.src; if (!src.empty()) { @@ -208,11 +216,11 @@ void create_streams(const Config &config, // UDP streams. for (unsigned i = 0; i < config.udpstreams.size(); ++i) { const UDPStreamConfig &udpstream_config = config.udpstreams[i]; - MarkPool *mark_pool = NULL; - if (udpstream_config.mark_pool != -1) { - mark_pool = mark_pools[udpstream_config.mark_pool]; - } - int stream_index = servers->add_udpstream(udpstream_config.dst, mark_pool); + int stream_index = servers->add_udpstream( + udpstream_config.dst, + udpstream_config.pacing_rate, + udpstream_config.ttl, + udpstream_config.multicast_iface_index); string src = udpstream_config.src; if (!src.empty()) { @@ -240,7 +248,7 @@ void open_logs(const vector &log_destinations) start_logging(); } -bool dry_run_config(const std::string &argv0, const std::string &config_filename) +bool dry_run_config(const string &argv0, const string &config_filename) { char *argv0_copy = strdup(argv0.c_str()); char *config_filename_copy = strdup(config_filename.c_str()); @@ -334,7 +342,7 @@ int main(int argc, char **argv) char argv0_canon[PATH_MAX]; char config_filename_canon[PATH_MAX]; - if (realpath(argv[0], argv0_canon) == NULL) { + if (realpath("/proc/self/exe", argv0_canon) == NULL) { log_perror(argv[0]); exit(1); } @@ -382,14 +390,14 @@ start: find_deleted_streams(config, &deleted_urls); CubemapStateProto loaded_state; - struct timeval serialize_start; + timespec serialize_start; set deserialized_urls; - map deserialized_acceptors; + map deserialized_acceptors; multimap inputs; // multimap due to older versions without deduplication. if (state_fd != -1) { log(INFO, "Deserializing state from previous process..."); string serialized; - if (!read_tempfile(state_fd, &serialized)) { + if (!read_tempfile_and_close(state_fd, &serialized)) { exit(1); } if (!loaded_state.ParseFromString(serialized)) { @@ -398,7 +406,7 @@ start: } serialize_start.tv_sec = loaded_state.serialize_start_sec(); - serialize_start.tv_usec = loaded_state.serialize_start_usec(); + serialize_start.tv_nsec = loaded_state.serialize_start_usec() * 1000ull; // Deserialize the streams. map stream_headers_for_url; // See below. @@ -416,12 +424,6 @@ start: data_fds.push_back(stream.data_fds(j)); } - // Older versions stored the data once in the protobuf instead of - // sending around file descriptors. - if (data_fds.empty() && stream.has_data()) { - data_fds.push_back(make_tempfile(stream.data())); - } - servers->add_stream_from_serialized(stream, data_fds); deserialized_urls.insert(stream.url()); @@ -433,15 +435,6 @@ start: for (int i = 0; i < loaded_state.inputs_size(); ++i) { InputProto serialized_input = loaded_state.inputs(i); - // Older versions did not store the stream header in the input, - // only in each stream. We need to have the stream header in the - // input as well, in case we create a new stream reusing the same input. - // Thus, we put it into place here if it's missing. - if (!serialized_input.has_stream_header() && - stream_headers_for_url.count(serialized_input.url()) != 0) { - serialized_input.set_stream_header(stream_headers_for_url[serialized_input.url()]); - } - InputWithRefcount iwr; iwr.input = create_input(serialized_input); iwr.refcount = 0; @@ -450,8 +443,9 @@ start: // Deserialize the acceptors. for (int i = 0; i < loaded_state.acceptors_size(); ++i) { + sockaddr_in6 sin6 = extract_address_from_acceptor_proto(loaded_state.acceptors(i)); deserialized_acceptors.insert(make_pair( - loaded_state.acceptors(i).port(), + sin6, new Acceptor(loaded_state.acceptors(i)))); } @@ -464,10 +458,33 @@ start: // Find all streams in the configuration file, create them, and connect to the inputs. create_streams(config, deserialized_urls, &inputs); vector acceptors = create_acceptors(config, &deserialized_acceptors); + + // Convert old-style timestamps to new-style timestamps for all clients; + // this simplifies the sort below. + { + timespec now_monotonic; + if (clock_gettime(CLOCK_MONOTONIC_COARSE, &now_monotonic) == -1) { + log(ERROR, "clock_gettime(CLOCK_MONOTONIC_COARSE) failed."); + exit(1); + } + long delta_sec = now_monotonic.tv_sec - time(NULL); + + for (int i = 0; i < loaded_state.clients_size(); ++i) { + ClientProto* client = loaded_state.mutable_clients(i); + if (client->has_connect_time_old()) { + client->set_connect_time_sec(client->connect_time_old() + delta_sec); + client->set_connect_time_nsec(now_monotonic.tv_nsec); + client->clear_connect_time_old(); + } + } + } // Put back the existing clients. It doesn't matter which server we - // allocate them to, so just do round-robin. However, we need to add - // them after the mark pools have been set up. + // allocate them to, so just do round-robin. However, we need to sort them + // by connection time first, since add_client_serialized() expects that. + sort(loaded_state.mutable_clients()->begin(), + loaded_state.mutable_clients()->end(), + OrderByConnectionTime()); for (int i = 0; i < loaded_state.clients_size(); ++i) { if (deleted_urls.count(loaded_state.clients(i).url()) != 0) { safe_close(loaded_state.clients(i).sock()); @@ -512,14 +529,15 @@ start: input_stats_thread->run(); } - struct timeval server_start; - gettimeofday(&server_start, NULL); + timespec server_start; + int err = clock_gettime(CLOCK_MONOTONIC, &server_start); + assert(err != -1); if (state_fd != -1) { // Measure time from we started deserializing (below) to now, when basically everything // is up and running. This is, in other words, a conservative estimate of how long our // “glitch” period was, not counting of course reconnects if the configuration changed. double glitch_time = server_start.tv_sec - serialize_start.tv_sec + - 1e-6 * (server_start.tv_usec - serialize_start.tv_usec); + 1e-9 * (server_start.tv_nsec - serialize_start.tv_nsec); log(INFO, "Re-exec happened in approx. %.0f ms.", glitch_time * 1000.0); } @@ -528,7 +546,8 @@ start: } // OK, we've been HUPed. Time to shut down everything, serialize, and re-exec. - gettimeofday(&serialize_start, NULL); + err = clock_gettime(CLOCK_MONOTONIC, &serialize_start); + assert(err != -1); if (input_stats_thread != NULL) { input_stats_thread->stop(); @@ -564,11 +583,6 @@ start: } delete servers; - for (unsigned i = 0; i < mark_pools.size(); ++i) { - delete mark_pools[i]; - } - mark_pools.clear(); - access_log->stop(); delete access_log; shut_down_logging();