20 #include "accesslog.h"
23 #include "input_stats.h"
26 #include "serverpool.h"
35 AccessLogThread *access_log = NULL;
36 ServerPool *servers = NULL;
37 vector<MarkPool *> mark_pools;
38 volatile bool hupped = false;
39 volatile bool stopped = false;
41 struct InputWithRefcount {
49 if (signum == SIGINT) {
54 void do_nothing(int signum)
58 CubemapStateProto collect_state(const timeval &serialize_start,
59 const vector<Acceptor *> acceptors,
60 const multimap<string, InputWithRefcount> inputs,
63 CubemapStateProto state = servers->serialize(); // Fills streams() and clients().
64 state.set_serialize_start_sec(serialize_start.tv_sec);
65 state.set_serialize_start_usec(serialize_start.tv_usec);
67 for (size_t i = 0; i < acceptors.size(); ++i) {
68 state.add_acceptors()->MergeFrom(acceptors[i]->serialize());
71 for (multimap<string, InputWithRefcount>::const_iterator input_it = inputs.begin();
72 input_it != inputs.end();
74 state.add_inputs()->MergeFrom(input_it->second.input->serialize());
80 // Find all port statements in the configuration file, and create acceptors for htem.
81 vector<Acceptor *> create_acceptors(
83 map<int, Acceptor *> *deserialized_acceptors)
85 vector<Acceptor *> acceptors;
86 for (unsigned i = 0; i < config.acceptors.size(); ++i) {
87 const AcceptorConfig &acceptor_config = config.acceptors[i];
88 Acceptor *acceptor = NULL;
89 map<int, Acceptor *>::iterator deserialized_acceptor_it =
90 deserialized_acceptors->find(acceptor_config.port);
91 if (deserialized_acceptor_it != deserialized_acceptors->end()) {
92 acceptor = deserialized_acceptor_it->second;
93 deserialized_acceptors->erase(deserialized_acceptor_it);
95 int server_sock = create_server_socket(acceptor_config.port, TCP_SOCKET);
96 acceptor = new Acceptor(server_sock, acceptor_config.port);
99 acceptors.push_back(acceptor);
102 // Close all acceptors that are no longer in the configuration file.
103 for (map<int, Acceptor *>::iterator acceptor_it = deserialized_acceptors->begin();
104 acceptor_it != deserialized_acceptors->end();
106 acceptor_it->second->close_socket();
107 delete acceptor_it->second;
113 void create_config_input(const string &src, multimap<string, InputWithRefcount> *inputs)
118 if (inputs->count(src) != 0) {
122 InputWithRefcount iwr;
123 iwr.input = create_input(src);
124 if (iwr.input == NULL) {
125 log(ERROR, "did not understand URL '%s', clients will not get any data.",
130 inputs->insert(make_pair(src, iwr));
133 // Find all streams in the configuration file, and create inputs for them.
134 void create_config_inputs(const Config &config, multimap<string, InputWithRefcount> *inputs)
136 for (unsigned i = 0; i < config.streams.size(); ++i) {
137 const StreamConfig &stream_config = config.streams[i];
138 if (stream_config.src != "delete") {
139 create_config_input(stream_config.src, inputs);
142 for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
143 const UDPStreamConfig &udpstream_config = config.udpstreams[i];
144 create_config_input(udpstream_config.src, inputs);
148 void create_streams(const Config &config,
149 const set<string> &deserialized_urls,
150 multimap<string, InputWithRefcount> *inputs)
152 for (unsigned i = 0; i < config.mark_pools.size(); ++i) {
153 const MarkPoolConfig &mp_config = config.mark_pools[i];
154 mark_pools.push_back(new MarkPool(mp_config.from, mp_config.to));
158 set<string> expecting_urls = deserialized_urls;
159 for (unsigned i = 0; i < config.streams.size(); ++i) {
160 const StreamConfig &stream_config = config.streams[i];
163 expecting_urls.erase(stream_config.url);
165 // Special-case deleted streams; they were never deserialized in the first place,
166 // so just ignore them.
167 if (stream_config.src == "delete") {
171 if (deserialized_urls.count(stream_config.url) == 0) {
172 stream_index = servers->add_stream(stream_config.url,
173 stream_config.backlog_size,
174 Stream::Encoding(stream_config.encoding));
176 stream_index = servers->lookup_stream_by_url(stream_config.url);
177 assert(stream_index != -1);
178 servers->set_backlog_size(stream_index, stream_config.backlog_size);
179 servers->set_encoding(stream_index,
180 Stream::Encoding(stream_config.encoding));
183 if (stream_config.mark_pool != -1) {
184 servers->set_mark_pool(stream_index, mark_pools[stream_config.mark_pool]);
187 servers->set_pacing_rate(stream_index, stream_config.pacing_rate);
189 string src = stream_config.src;
191 multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
192 if (input_it != inputs->end()) {
193 input_it->second.input->add_destination(stream_index);
194 ++input_it->second.refcount;
199 // Warn about any streams servers we've lost.
200 for (set<string>::const_iterator stream_it = expecting_urls.begin();
201 stream_it != expecting_urls.end();
203 string url = *stream_it;
204 log(WARNING, "stream '%s' disappeared from the configuration file. "
205 "It will not be deleted, but clients will not get any new inputs. "
206 "If you really meant to delete it, set src=delete and reload.",
211 for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
212 const UDPStreamConfig &udpstream_config = config.udpstreams[i];
213 MarkPool *mark_pool = NULL;
214 if (udpstream_config.mark_pool != -1) {
215 mark_pool = mark_pools[udpstream_config.mark_pool];
217 int stream_index = servers->add_udpstream(udpstream_config.dst, mark_pool, udpstream_config.pacing_rate);
219 string src = udpstream_config.src;
221 multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
222 assert(input_it != inputs->end());
223 input_it->second.input->add_destination(stream_index);
224 ++input_it->second.refcount;
229 void open_logs(const vector<LogConfig> &log_destinations)
231 for (size_t i = 0; i < log_destinations.size(); ++i) {
232 if (log_destinations[i].type == LogConfig::LOG_TYPE_FILE) {
233 add_log_destination_file(log_destinations[i].filename);
234 } else if (log_destinations[i].type == LogConfig::LOG_TYPE_CONSOLE) {
235 add_log_destination_console();
236 } else if (log_destinations[i].type == LogConfig::LOG_TYPE_SYSLOG) {
237 add_log_destination_syslog();
245 bool dry_run_config(const std::string &argv0, const std::string &config_filename)
247 char *argv0_copy = strdup(argv0.c_str());
248 char *config_filename_copy = strdup(config_filename.c_str());
253 log_perror("fork()");
255 free(config_filename_copy);
259 execlp(argv0_copy, argv0_copy, "--test-config", config_filename_copy, NULL);
260 log_perror(argv0_copy);
268 free(config_filename_copy);
273 err = waitpid(pid, &status, 0);
274 } while (err == -1 && errno == EINTR);
277 log_perror("waitpid()");
281 return (WIFEXITED(status) && WEXITSTATUS(status) == 0);
284 void find_deleted_streams(const Config &config, set<string> *deleted_urls)
286 for (unsigned i = 0; i < config.streams.size(); ++i) {
287 const StreamConfig &stream_config = config.streams[i];
288 if (stream_config.src == "delete") {
289 log(INFO, "Deleting stream '%s'.", stream_config.url.c_str());
290 deleted_urls->insert(stream_config.url);
295 int main(int argc, char **argv)
299 signal(SIGUSR1, do_nothing); // Used in internal signalling.
300 signal(SIGPIPE, SIG_IGN);
304 bool test_config = false;
306 static const option long_options[] = {
307 { "state", required_argument, 0, 's' },
308 { "test-config", no_argument, 0, 't' },
311 int option_index = 0;
312 int c = getopt_long(argc, argv, "s:t", long_options, &option_index);
319 state_fd = atoi(optarg);
325 fprintf(stderr, "Unknown option '%s'\n", argv[option_index]);
330 string config_filename = "cubemap.config";
332 config_filename = argv[optind++];
335 // Canonicalize argv[0] and config_filename.
336 char argv0_canon[PATH_MAX];
337 char config_filename_canon[PATH_MAX];
339 if (realpath(argv[0], argv0_canon) == NULL) {
343 if (realpath(config_filename.c_str(), config_filename_canon) == NULL) {
344 log_perror(config_filename.c_str());
348 // Now parse the configuration file.
350 if (!parse_config(config_filename_canon, &config)) {
357 // Ideally we'd like to daemonize only when we've started up all threads etc.,
358 // but daemon() forks, which is not good in multithreaded software, so we'll
359 // have to do it here.
360 if (config.daemonize) {
361 if (daemon(0, 0) == -1) {
362 log_perror("daemon");
368 // Open logs as soon as possible.
369 open_logs(config.log_destinations);
371 log(INFO, "Cubemap " SERVER_VERSION " starting.");
372 if (config.access_log_file.empty()) {
373 // Create a dummy logger.
374 access_log = new AccessLogThread();
376 access_log = new AccessLogThread(config.access_log_file);
380 servers = new ServerPool(config.num_servers);
382 // Find all the streams that are to be deleted.
383 set<string> deleted_urls;
384 find_deleted_streams(config, &deleted_urls);
386 CubemapStateProto loaded_state;
387 struct timeval serialize_start;
388 set<string> deserialized_urls;
389 map<int, Acceptor *> deserialized_acceptors;
390 multimap<string, InputWithRefcount> inputs; // multimap due to older versions without deduplication.
391 if (state_fd != -1) {
392 log(INFO, "Deserializing state from previous process...");
394 if (!read_tempfile(state_fd, &serialized)) {
397 if (!loaded_state.ParseFromString(serialized)) {
398 log(ERROR, "Failed deserialization of state.");
402 serialize_start.tv_sec = loaded_state.serialize_start_sec();
403 serialize_start.tv_usec = loaded_state.serialize_start_usec();
405 // Deserialize the streams.
406 map<string, string> stream_headers_for_url; // See below.
407 for (int i = 0; i < loaded_state.streams_size(); ++i) {
408 const StreamProto &stream = loaded_state.streams(i);
410 if (deleted_urls.count(stream.url()) != 0) {
411 // Delete the stream backlogs.
412 for (int j = 0; j < stream.data_fds_size(); ++j) {
413 safe_close(stream.data_fds(j));
416 vector<int> data_fds;
417 for (int j = 0; j < stream.data_fds_size(); ++j) {
418 data_fds.push_back(stream.data_fds(j));
421 // Older versions stored the data once in the protobuf instead of
422 // sending around file descriptors.
423 if (data_fds.empty() && stream.has_data()) {
424 data_fds.push_back(make_tempfile(stream.data()));
427 servers->add_stream_from_serialized(stream, data_fds);
428 deserialized_urls.insert(stream.url());
430 stream_headers_for_url.insert(make_pair(stream.url(), stream.stream_header()));
434 // Deserialize the inputs. Note that we don't actually add them to any stream yet.
435 for (int i = 0; i < loaded_state.inputs_size(); ++i) {
436 InputProto serialized_input = loaded_state.inputs(i);
438 // Older versions did not store the stream header in the input,
439 // only in each stream. We need to have the stream header in the
440 // input as well, in case we create a new stream reusing the same input.
441 // Thus, we put it into place here if it's missing.
442 if (!serialized_input.has_stream_header() &&
443 stream_headers_for_url.count(serialized_input.url()) != 0) {
444 serialized_input.set_stream_header(stream_headers_for_url[serialized_input.url()]);
447 InputWithRefcount iwr;
448 iwr.input = create_input(serialized_input);
450 inputs.insert(make_pair(serialized_input.url(), iwr));
453 // Deserialize the acceptors.
454 for (int i = 0; i < loaded_state.acceptors_size(); ++i) {
455 deserialized_acceptors.insert(make_pair(
456 loaded_state.acceptors(i).port(),
457 new Acceptor(loaded_state.acceptors(i))));
460 log(INFO, "Deserialization done.");
463 // Add any new inputs coming from the config.
464 create_config_inputs(config, &inputs);
466 // Find all streams in the configuration file, create them, and connect to the inputs.
467 create_streams(config, deserialized_urls, &inputs);
468 vector<Acceptor *> acceptors = create_acceptors(config, &deserialized_acceptors);
470 // Put back the existing clients. It doesn't matter which server we
471 // allocate them to, so just do round-robin. However, we need to add
472 // them after the mark pools have been set up.
473 for (int i = 0; i < loaded_state.clients_size(); ++i) {
474 if (deleted_urls.count(loaded_state.clients(i).url()) != 0) {
475 safe_close(loaded_state.clients(i).sock());
477 servers->add_client_from_serialized(loaded_state.clients(i));
483 // Now delete all inputs that are longer in use, and start the others.
484 for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
485 input_it != inputs.end(); ) {
486 if (input_it->second.refcount == 0) {
487 log(WARNING, "Input '%s' no longer in use, closing.",
488 input_it->first.c_str());
489 input_it->second.input->close_socket();
490 delete input_it->second.input;
491 inputs.erase(input_it++);
493 input_it->second.input->run();
498 // Start writing statistics.
499 StatsThread *stats_thread = NULL;
500 if (!config.stats_file.empty()) {
501 stats_thread = new StatsThread(config.stats_file, config.stats_interval);
505 InputStatsThread *input_stats_thread = NULL;
506 if (!config.input_stats_file.empty()) {
507 vector<Input*> inputs_no_refcount;
508 for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
509 input_it != inputs.end(); ++input_it) {
510 inputs_no_refcount.push_back(input_it->second.input);
513 input_stats_thread = new InputStatsThread(config.input_stats_file, config.input_stats_interval, inputs_no_refcount);
514 input_stats_thread->run();
517 struct timeval server_start;
518 gettimeofday(&server_start, NULL);
519 if (state_fd != -1) {
520 // Measure time from we started deserializing (below) to now, when basically everything
521 // is up and running. This is, in other words, a conservative estimate of how long our
522 // “glitch” period was, not counting of course reconnects if the configuration changed.
523 double glitch_time = server_start.tv_sec - serialize_start.tv_sec +
524 1e-6 * (server_start.tv_usec - serialize_start.tv_usec);
525 log(INFO, "Re-exec happened in approx. %.0f ms.", glitch_time * 1000.0);
532 // OK, we've been HUPed. Time to shut down everything, serialize, and re-exec.
533 gettimeofday(&serialize_start, NULL);
535 if (input_stats_thread != NULL) {
536 input_stats_thread->stop();
537 delete input_stats_thread;
539 if (stats_thread != NULL) {
540 stats_thread->stop();
543 for (size_t i = 0; i < acceptors.size(); ++i) {
544 acceptors[i]->stop();
546 for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
547 input_it != inputs.end();
549 input_it->second.input->stop();
553 CubemapStateProto state;
555 log(INFO, "Shutting down.");
557 log(INFO, "Serializing state and re-execing...");
558 state = collect_state(
559 serialize_start, acceptors, inputs, servers);
561 state.SerializeToString(&serialized);
562 state_fd = make_tempfile(serialized);
563 if (state_fd == -1) {
569 for (unsigned i = 0; i < mark_pools.size(); ++i) {
570 delete mark_pools[i];
582 // OK, so the signal was SIGHUP. Check that the new config is okay, then exec the new binary.
583 if (!dry_run_config(argv0_canon, config_filename_canon)) {
584 open_logs(config.log_destinations);
585 log(ERROR, "%s --test-config failed. Restarting old version instead of new.", argv[0]);
592 sprintf(buf, "%d", state_fd);
595 execlp(argv0_canon, argv0_canon, config_filename_canon, "--state", buf, NULL);
596 open_logs(config.log_destinations);
597 log_perror("execlp");
598 log(ERROR, "re-exec of %s failed. Waiting 0.2 seconds and trying again...", argv0_canon);