12 #include <systemd/sd-daemon.h>
22 #include "accesslog.h"
25 #include "input_stats.h"
27 #include "sa_compare.h"
28 #include "serverpool.h"
37 AccessLogThread *access_log = NULL;
38 ServerPool *servers = NULL;
39 volatile bool hupped = false;
40 volatile bool stopped = false;
44 struct OrderByConnectionTime {
45 bool operator() (const ClientProto &a, const ClientProto &b) const {
46 if (a.connect_time_sec() != b.connect_time_sec())
47 return a.connect_time_sec() < b.connect_time_sec();
48 return a.connect_time_nsec() < b.connect_time_nsec();
54 struct InputWithRefcount {
62 if (signum == SIGINT) {
67 void do_nothing(int signum)
71 CubemapStateProto collect_state(const timespec &serialize_start,
72 const vector<Acceptor *> acceptors,
73 const multimap<string, InputWithRefcount> inputs,
76 CubemapStateProto state = servers->serialize(); // Fills streams() and clients().
77 state.set_serialize_start_sec(serialize_start.tv_sec);
78 state.set_serialize_start_usec(serialize_start.tv_nsec / 1000);
80 for (size_t i = 0; i < acceptors.size(); ++i) {
81 state.add_acceptors()->MergeFrom(acceptors[i]->serialize());
84 for (multimap<string, InputWithRefcount>::const_iterator input_it = inputs.begin();
85 input_it != inputs.end();
87 state.add_inputs()->MergeFrom(input_it->second.input->serialize());
93 // Find all port statements in the configuration file, and create acceptors for htem.
94 vector<Acceptor *> create_acceptors(
96 map<sockaddr_in6, Acceptor *, Sockaddr6Compare> *deserialized_acceptors)
98 vector<Acceptor *> acceptors;
99 for (unsigned i = 0; i < config.acceptors.size(); ++i) {
100 const AcceptorConfig &acceptor_config = config.acceptors[i];
101 Acceptor *acceptor = NULL;
102 map<sockaddr_in6, Acceptor *, Sockaddr6Compare>::iterator deserialized_acceptor_it =
103 deserialized_acceptors->find(acceptor_config.addr);
104 if (deserialized_acceptor_it != deserialized_acceptors->end()) {
105 acceptor = deserialized_acceptor_it->second;
106 deserialized_acceptors->erase(deserialized_acceptor_it);
108 int server_sock = create_server_socket(acceptor_config.addr, TCP_SOCKET);
109 acceptor = new Acceptor(server_sock, acceptor_config.addr);
112 acceptors.push_back(acceptor);
115 // Close all acceptors that are no longer in the configuration file.
116 for (map<sockaddr_in6, Acceptor *, Sockaddr6Compare>::iterator
117 acceptor_it = deserialized_acceptors->begin();
118 acceptor_it != deserialized_acceptors->end();
120 acceptor_it->second->close_socket();
121 delete acceptor_it->second;
127 void create_config_input(const string &src, multimap<string, InputWithRefcount> *inputs)
132 if (inputs->count(src) != 0) {
136 InputWithRefcount iwr;
137 iwr.input = create_input(src);
138 if (iwr.input == NULL) {
139 log(ERROR, "did not understand URL '%s', clients will not get any data.",
144 inputs->insert(make_pair(src, iwr));
147 // Find all streams in the configuration file, and create inputs for them.
148 void create_config_inputs(const Config &config, multimap<string, InputWithRefcount> *inputs)
150 for (unsigned i = 0; i < config.streams.size(); ++i) {
151 const StreamConfig &stream_config = config.streams[i];
152 if (stream_config.src != "delete") {
153 create_config_input(stream_config.src, inputs);
156 for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
157 const UDPStreamConfig &udpstream_config = config.udpstreams[i];
158 create_config_input(udpstream_config.src, inputs);
162 void create_streams(const Config &config,
163 const set<string> &deserialized_urls,
164 multimap<string, InputWithRefcount> *inputs)
167 set<string> expecting_urls = deserialized_urls;
168 for (unsigned i = 0; i < config.streams.size(); ++i) {
169 const StreamConfig &stream_config = config.streams[i];
172 expecting_urls.erase(stream_config.url);
174 // Special-case deleted streams; they were never deserialized in the first place,
175 // so just ignore them.
176 if (stream_config.src == "delete") {
180 if (deserialized_urls.count(stream_config.url) == 0) {
181 stream_index = servers->add_stream(stream_config.url,
182 stream_config.backlog_size,
183 stream_config.prebuffering_bytes,
184 Stream::Encoding(stream_config.encoding));
186 stream_index = servers->lookup_stream_by_url(stream_config.url);
187 assert(stream_index != -1);
188 servers->set_backlog_size(stream_index, stream_config.backlog_size);
189 servers->set_prebuffering_bytes(stream_index, stream_config.prebuffering_bytes);
190 servers->set_encoding(stream_index,
191 Stream::Encoding(stream_config.encoding));
194 servers->set_pacing_rate(stream_index, stream_config.pacing_rate);
196 string src = stream_config.src;
198 multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
199 if (input_it != inputs->end()) {
200 input_it->second.input->add_destination(stream_index);
201 ++input_it->second.refcount;
206 // Warn about any streams servers we've lost.
207 for (set<string>::const_iterator stream_it = expecting_urls.begin();
208 stream_it != expecting_urls.end();
210 string url = *stream_it;
211 log(WARNING, "stream '%s' disappeared from the configuration file. "
212 "It will not be deleted, but clients will not get any new inputs. "
213 "If you really meant to delete it, set src=delete and reload.",
218 for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
219 const UDPStreamConfig &udpstream_config = config.udpstreams[i];
220 int stream_index = servers->add_udpstream(
221 udpstream_config.dst,
222 udpstream_config.pacing_rate,
223 udpstream_config.ttl,
224 udpstream_config.multicast_iface_index);
226 string src = udpstream_config.src;
228 multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
229 assert(input_it != inputs->end());
230 input_it->second.input->add_destination(stream_index);
231 ++input_it->second.refcount;
236 void open_logs(const vector<LogConfig> &log_destinations)
238 for (size_t i = 0; i < log_destinations.size(); ++i) {
239 if (log_destinations[i].type == LogConfig::LOG_TYPE_FILE) {
240 add_log_destination_file(log_destinations[i].filename);
241 } else if (log_destinations[i].type == LogConfig::LOG_TYPE_CONSOLE) {
242 add_log_destination_console();
243 } else if (log_destinations[i].type == LogConfig::LOG_TYPE_SYSLOG) {
244 add_log_destination_syslog();
252 bool dry_run_config(const string &argv0, const string &config_filename)
254 char *argv0_copy = strdup(argv0.c_str());
255 char *config_filename_copy = strdup(config_filename.c_str());
260 log_perror("fork()");
262 free(config_filename_copy);
266 execlp(argv0_copy, argv0_copy, "--test-config", config_filename_copy, NULL);
267 log_perror(argv0_copy);
275 free(config_filename_copy);
280 err = waitpid(pid, &status, 0);
281 } while (err == -1 && errno == EINTR);
284 log_perror("waitpid()");
288 return (WIFEXITED(status) && WEXITSTATUS(status) == 0);
291 void find_deleted_streams(const Config &config, set<string> *deleted_urls)
293 for (unsigned i = 0; i < config.streams.size(); ++i) {
294 const StreamConfig &stream_config = config.streams[i];
295 if (stream_config.src == "delete") {
296 log(INFO, "Deleting stream '%s'.", stream_config.url.c_str());
297 deleted_urls->insert(stream_config.url);
302 int main(int argc, char **argv)
306 signal(SIGUSR1, do_nothing); // Used in internal signalling.
307 signal(SIGPIPE, SIG_IGN);
311 bool test_config = false;
313 static const option long_options[] = {
314 { "state", required_argument, 0, 's' },
315 { "test-config", no_argument, 0, 't' },
318 int option_index = 0;
319 int c = getopt_long(argc, argv, "s:t", long_options, &option_index);
326 state_fd = atoi(optarg);
332 fprintf(stderr, "Unknown option '%s'\n", argv[option_index]);
337 string config_filename = "cubemap.config";
339 config_filename = argv[optind++];
342 // Canonicalize argv[0] and config_filename.
343 char argv0_canon[PATH_MAX];
344 char config_filename_canon[PATH_MAX];
346 if (realpath("/proc/self/exe", argv0_canon) == NULL) {
350 if (realpath(config_filename.c_str(), config_filename_canon) == NULL) {
351 log_perror(config_filename.c_str());
355 // Now parse the configuration file.
357 if (!parse_config(config_filename_canon, &config)) {
364 // Ideally we'd like to daemonize only when we've started up all threads etc.,
365 // but daemon() forks, which is not good in multithreaded software, so we'll
366 // have to do it here.
367 if (config.daemonize) {
368 if (daemon(0, 0) == -1) {
369 log_perror("daemon");
375 // Open logs as soon as possible.
376 open_logs(config.log_destinations);
378 log(INFO, "Cubemap " SERVER_VERSION " starting.");
379 if (config.access_log_file.empty()) {
380 // Create a dummy logger.
381 access_log = new AccessLogThread();
383 access_log = new AccessLogThread(config.access_log_file);
387 servers = new ServerPool(config.num_servers);
389 // Find all the streams that are to be deleted.
390 set<string> deleted_urls;
391 find_deleted_streams(config, &deleted_urls);
393 CubemapStateProto loaded_state;
394 timespec serialize_start;
395 set<string> deserialized_urls;
396 map<sockaddr_in6, Acceptor *, Sockaddr6Compare> deserialized_acceptors;
397 multimap<string, InputWithRefcount> inputs; // multimap due to older versions without deduplication.
398 if (state_fd != -1) {
399 log(INFO, "Deserializing state from previous process...");
401 if (!read_tempfile_and_close(state_fd, &serialized)) {
404 if (!loaded_state.ParseFromString(serialized)) {
405 log(ERROR, "Failed deserialization of state.");
409 serialize_start.tv_sec = loaded_state.serialize_start_sec();
410 serialize_start.tv_nsec = loaded_state.serialize_start_usec() * 1000ull;
412 // Deserialize the streams.
413 map<string, string> stream_headers_for_url; // See below.
414 for (int i = 0; i < loaded_state.streams_size(); ++i) {
415 const StreamProto &stream = loaded_state.streams(i);
417 if (deleted_urls.count(stream.url()) != 0) {
418 // Delete the stream backlogs.
419 for (int j = 0; j < stream.data_fds_size(); ++j) {
420 safe_close(stream.data_fds(j));
423 vector<int> data_fds;
424 for (int j = 0; j < stream.data_fds_size(); ++j) {
425 data_fds.push_back(stream.data_fds(j));
428 servers->add_stream_from_serialized(stream, data_fds);
429 deserialized_urls.insert(stream.url());
431 stream_headers_for_url.insert(make_pair(stream.url(), stream.stream_header()));
435 // Deserialize the inputs. Note that we don't actually add them to any stream yet.
436 for (int i = 0; i < loaded_state.inputs_size(); ++i) {
437 InputProto serialized_input = loaded_state.inputs(i);
439 InputWithRefcount iwr;
440 iwr.input = create_input(serialized_input);
442 inputs.insert(make_pair(serialized_input.url(), iwr));
445 // Deserialize the acceptors.
446 for (int i = 0; i < loaded_state.acceptors_size(); ++i) {
447 sockaddr_in6 sin6 = extract_address_from_acceptor_proto(loaded_state.acceptors(i));
448 deserialized_acceptors.insert(make_pair(
450 new Acceptor(loaded_state.acceptors(i))));
453 log(INFO, "Deserialization done.");
456 // Add any new inputs coming from the config.
457 create_config_inputs(config, &inputs);
459 // Find all streams in the configuration file, create them, and connect to the inputs.
460 create_streams(config, deserialized_urls, &inputs);
461 vector<Acceptor *> acceptors = create_acceptors(config, &deserialized_acceptors);
463 // Convert old-style timestamps to new-style timestamps for all clients;
464 // this simplifies the sort below.
466 timespec now_monotonic;
467 if (clock_gettime(CLOCK_MONOTONIC_COARSE, &now_monotonic) == -1) {
468 log(ERROR, "clock_gettime(CLOCK_MONOTONIC_COARSE) failed.");
471 long delta_sec = now_monotonic.tv_sec - time(NULL);
473 for (int i = 0; i < loaded_state.clients_size(); ++i) {
474 ClientProto* client = loaded_state.mutable_clients(i);
475 if (client->has_connect_time_old()) {
476 client->set_connect_time_sec(client->connect_time_old() + delta_sec);
477 client->set_connect_time_nsec(now_monotonic.tv_nsec);
478 client->clear_connect_time_old();
483 // Put back the existing clients. It doesn't matter which server we
484 // allocate them to, so just do round-robin. However, we need to sort them
485 // by connection time first, since add_client_serialized() expects that.
486 sort(loaded_state.mutable_clients()->begin(),
487 loaded_state.mutable_clients()->end(),
488 OrderByConnectionTime());
489 for (int i = 0; i < loaded_state.clients_size(); ++i) {
490 if (deleted_urls.count(loaded_state.clients(i).url()) != 0) {
491 safe_close(loaded_state.clients(i).sock());
493 servers->add_client_from_serialized(loaded_state.clients(i));
499 // Now delete all inputs that are longer in use, and start the others.
500 for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
501 input_it != inputs.end(); ) {
502 if (input_it->second.refcount == 0) {
503 log(WARNING, "Input '%s' no longer in use, closing.",
504 input_it->first.c_str());
505 input_it->second.input->close_socket();
506 delete input_it->second.input;
507 inputs.erase(input_it++);
509 input_it->second.input->run();
514 // Start writing statistics.
515 StatsThread *stats_thread = NULL;
516 if (!config.stats_file.empty()) {
517 stats_thread = new StatsThread(config.stats_file, config.stats_interval);
521 InputStatsThread *input_stats_thread = NULL;
522 if (!config.input_stats_file.empty()) {
523 vector<Input*> inputs_no_refcount;
524 for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
525 input_it != inputs.end(); ++input_it) {
526 inputs_no_refcount.push_back(input_it->second.input);
529 input_stats_thread = new InputStatsThread(config.input_stats_file, config.input_stats_interval, inputs_no_refcount);
530 input_stats_thread->run();
533 timespec server_start;
534 int err = clock_gettime(CLOCK_MONOTONIC, &server_start);
536 if (state_fd != -1) {
537 // Measure time from we started deserializing (below) to now, when basically everything
538 // is up and running. This is, in other words, a conservative estimate of how long our
539 // “glitch” period was, not counting of course reconnects if the configuration changed.
540 double glitch_time = server_start.tv_sec - serialize_start.tv_sec +
541 1e-9 * (server_start.tv_nsec - serialize_start.tv_nsec);
542 log(INFO, "Re-exec happened in approx. %.0f ms.", glitch_time * 1000.0);
545 sd_notify(0, "READY=1");
552 sd_notify(0, "STOPPING=1");
554 sd_notify(0, "RELOADING=1");
557 // OK, we've been HUPed. Time to shut down everything, serialize, and re-exec.
558 err = clock_gettime(CLOCK_MONOTONIC, &serialize_start);
561 if (input_stats_thread != NULL) {
562 input_stats_thread->stop();
563 delete input_stats_thread;
565 if (stats_thread != NULL) {
566 stats_thread->stop();
569 for (size_t i = 0; i < acceptors.size(); ++i) {
570 acceptors[i]->stop();
572 for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
573 input_it != inputs.end();
575 input_it->second.input->stop();
579 CubemapStateProto state;
581 log(INFO, "Shutting down.");
583 log(INFO, "Serializing state and re-execing...");
584 state = collect_state(
585 serialize_start, acceptors, inputs, servers);
587 state.SerializeToString(&serialized);
588 state_fd = make_tempfile(serialized);
589 if (state_fd == -1) {
603 // OK, so the signal was SIGHUP. Check that the new config is okay, then exec the new binary.
604 if (!dry_run_config(argv0_canon, config_filename_canon)) {
605 open_logs(config.log_destinations);
606 log(ERROR, "%s --test-config failed. Restarting old version instead of new.", argv[0]);
613 sprintf(buf, "%d", state_fd);
616 execlp(argv0_canon, argv0_canon, config_filename_canon, "--state", buf, NULL);
617 open_logs(config.log_destinations);
618 log_perror("execlp");
619 log(ERROR, "re-exec of %s failed. Waiting 0.2 seconds and trying again...", argv0_canon);