]> git.sesse.net Git - cubemap/blob - main.cpp
Fix capitalization in systemd service unit.
[cubemap] / main.cpp
1 #include <assert.h>
2 #include <errno.h>
3 #include <getopt.h>
4 #include <limits.h>
5 #include <signal.h>
6 #include <stddef.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <sys/time.h>
11 #include <sys/wait.h>
12 #include <unistd.h>
13 #include <map>
14 #include <set>
15 #include <string>
16 #include <utility>
17 #include <vector>
18
19 #include "acceptor.h"
20 #include "accesslog.h"
21 #include "config.h"
22 #include "input.h"
23 #include "input_stats.h"
24 #include "log.h"
25 #include "markpool.h"
26 #include "serverpool.h"
27 #include "state.pb.h"
28 #include "stats.h"
29 #include "stream.h"
30 #include "util.h"
31 #include "version.h"
32
33 using namespace std;
34
35 AccessLogThread *access_log = NULL;
36 ServerPool *servers = NULL;
37 vector<MarkPool *> mark_pools;
38 volatile bool hupped = false;
39 volatile bool stopped = false;
40
41 struct InputWithRefcount {
42         Input *input;
43         int refcount;
44 };
45
46 void hup(int signum)
47 {
48         hupped = true;
49         if (signum == SIGINT) {
50                 stopped = true;
51         }
52 }
53
54 void do_nothing(int signum)
55 {
56 }
57
58 CubemapStateProto collect_state(const timeval &serialize_start,
59                                 const vector<Acceptor *> acceptors,
60                                 const multimap<string, InputWithRefcount> inputs,
61                                 ServerPool *servers)
62 {
63         CubemapStateProto state = servers->serialize();  // Fills streams() and clients().
64         state.set_serialize_start_sec(serialize_start.tv_sec);
65         state.set_serialize_start_usec(serialize_start.tv_usec);
66         
67         for (size_t i = 0; i < acceptors.size(); ++i) {
68                 state.add_acceptors()->MergeFrom(acceptors[i]->serialize());
69         }
70
71         for (multimap<string, InputWithRefcount>::const_iterator input_it = inputs.begin();
72              input_it != inputs.end();
73              ++input_it) {
74                 state.add_inputs()->MergeFrom(input_it->second.input->serialize());
75         }
76
77         return state;
78 }
79
80 // Find all port statements in the configuration file, and create acceptors for htem.
81 vector<Acceptor *> create_acceptors(
82         const Config &config,
83         map<int, Acceptor *> *deserialized_acceptors)
84 {
85         vector<Acceptor *> acceptors;
86         for (unsigned i = 0; i < config.acceptors.size(); ++i) {
87                 const AcceptorConfig &acceptor_config = config.acceptors[i];
88                 Acceptor *acceptor = NULL;
89                 map<int, Acceptor *>::iterator deserialized_acceptor_it =
90                         deserialized_acceptors->find(acceptor_config.port);
91                 if (deserialized_acceptor_it != deserialized_acceptors->end()) {
92                         acceptor = deserialized_acceptor_it->second;
93                         deserialized_acceptors->erase(deserialized_acceptor_it);
94                 } else {
95                         int server_sock = create_server_socket(acceptor_config.port, TCP_SOCKET);
96                         acceptor = new Acceptor(server_sock, acceptor_config.port);
97                 }
98                 acceptor->run();
99                 acceptors.push_back(acceptor);
100         }
101
102         // Close all acceptors that are no longer in the configuration file.
103         for (map<int, Acceptor *>::iterator acceptor_it = deserialized_acceptors->begin();
104              acceptor_it != deserialized_acceptors->end();
105              ++acceptor_it) {
106                 acceptor_it->second->close_socket();
107                 delete acceptor_it->second;
108         }
109
110         return acceptors;
111 }
112
113 void create_config_input(const string &src, multimap<string, InputWithRefcount> *inputs)
114 {
115         if (src.empty()) {
116                 return;
117         }
118         if (inputs->count(src) != 0) {
119                 return;
120         }
121
122         InputWithRefcount iwr;
123         iwr.input = create_input(src);
124         if (iwr.input == NULL) {
125                 log(ERROR, "did not understand URL '%s', clients will not get any data.",
126                         src.c_str());
127                 return;
128         }
129         iwr.refcount = 0;
130         inputs->insert(make_pair(src, iwr));
131 }
132
133 // Find all streams in the configuration file, and create inputs for them.
134 void create_config_inputs(const Config &config, multimap<string, InputWithRefcount> *inputs)
135 {
136         for (unsigned i = 0; i < config.streams.size(); ++i) {
137                 const StreamConfig &stream_config = config.streams[i];
138                 if (stream_config.src != "delete") {
139                         create_config_input(stream_config.src, inputs);
140                 }
141         }
142         for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
143                 const UDPStreamConfig &udpstream_config = config.udpstreams[i];
144                 create_config_input(udpstream_config.src, inputs);
145         }
146 }
147
148 void create_streams(const Config &config,
149                     const set<string> &deserialized_urls,
150                     multimap<string, InputWithRefcount> *inputs)
151 {
152         for (unsigned i = 0; i < config.mark_pools.size(); ++i) {
153                 const MarkPoolConfig &mp_config = config.mark_pools[i];
154                 mark_pools.push_back(new MarkPool(mp_config.from, mp_config.to));
155         }
156
157         // HTTP streams.
158         set<string> expecting_urls = deserialized_urls;
159         for (unsigned i = 0; i < config.streams.size(); ++i) {
160                 const StreamConfig &stream_config = config.streams[i];
161                 int stream_index;
162
163                 expecting_urls.erase(stream_config.url);
164
165                 // Special-case deleted streams; they were never deserialized in the first place,
166                 // so just ignore them.
167                 if (stream_config.src == "delete") {
168                         continue;
169                 }
170
171                 if (deserialized_urls.count(stream_config.url) == 0) {
172                         stream_index = servers->add_stream(stream_config.url,
173                                                            stream_config.backlog_size,
174                                                            Stream::Encoding(stream_config.encoding));
175                 } else {
176                         stream_index = servers->lookup_stream_by_url(stream_config.url);
177                         assert(stream_index != -1);
178                         servers->set_backlog_size(stream_index, stream_config.backlog_size);
179                         servers->set_encoding(stream_index,
180                                               Stream::Encoding(stream_config.encoding));
181                 }
182
183                 if (stream_config.mark_pool != -1) {
184                         servers->set_mark_pool(stream_index, mark_pools[stream_config.mark_pool]);
185                 }
186
187                 string src = stream_config.src;
188                 if (!src.empty()) {
189                         multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
190                         if (input_it != inputs->end()) {
191                                 input_it->second.input->add_destination(stream_index);
192                                 ++input_it->second.refcount;
193                         }
194                 }
195         }
196
197         // Warn about any streams servers we've lost.
198         for (set<string>::const_iterator stream_it = expecting_urls.begin();
199              stream_it != expecting_urls.end();
200              ++stream_it) {
201                 string url = *stream_it;
202                 log(WARNING, "stream '%s' disappeared from the configuration file. "
203                              "It will not be deleted, but clients will not get any new inputs. "
204                              "If you really meant to delete it, set src=delete and reload.",
205                              url.c_str());
206         }
207
208         // UDP streams.
209         for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
210                 const UDPStreamConfig &udpstream_config = config.udpstreams[i];
211                 MarkPool *mark_pool = NULL;
212                 if (udpstream_config.mark_pool != -1) {
213                         mark_pool = mark_pools[udpstream_config.mark_pool];
214                 }
215                 int stream_index = servers->add_udpstream(udpstream_config.dst, mark_pool);
216
217                 string src = udpstream_config.src;
218                 if (!src.empty()) {
219                         multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
220                         assert(input_it != inputs->end());
221                         input_it->second.input->add_destination(stream_index);
222                         ++input_it->second.refcount;
223                 }
224         }
225 }
226         
227 void open_logs(const vector<LogConfig> &log_destinations)
228 {
229         for (size_t i = 0; i < log_destinations.size(); ++i) {
230                 if (log_destinations[i].type == LogConfig::LOG_TYPE_FILE) {
231                         add_log_destination_file(log_destinations[i].filename);
232                 } else if (log_destinations[i].type == LogConfig::LOG_TYPE_CONSOLE) {
233                         add_log_destination_console();
234                 } else if (log_destinations[i].type == LogConfig::LOG_TYPE_SYSLOG) {
235                         add_log_destination_syslog();
236                 } else {
237                         assert(false);
238                 }
239         }
240         start_logging();
241 }
242         
243 bool dry_run_config(const std::string &argv0, const std::string &config_filename)
244 {
245         char *argv0_copy = strdup(argv0.c_str());
246         char *config_filename_copy = strdup(config_filename.c_str());
247
248         pid_t pid = fork();
249         switch (pid) {
250         case -1:
251                 log_perror("fork()");
252                 free(argv0_copy);
253                 free(config_filename_copy);
254                 return false;
255         case 0:
256                 // Child.
257                 execlp(argv0_copy, argv0_copy, "--test-config", config_filename_copy, NULL);
258                 log_perror(argv0_copy);
259                 _exit(1);
260         default:
261                 // Parent.
262                 break;
263         }
264                 
265         free(argv0_copy);
266         free(config_filename_copy);
267
268         int status;
269         pid_t err;
270         do {
271                 err = waitpid(pid, &status, 0);
272         } while (err == -1 && errno == EINTR);
273
274         if (err == -1) {
275                 log_perror("waitpid()");
276                 return false;
277         }       
278
279         return (WIFEXITED(status) && WEXITSTATUS(status) == 0);
280 }
281
282 void find_deleted_streams(const Config &config, set<string> *deleted_urls)
283 {
284         for (unsigned i = 0; i < config.streams.size(); ++i) {
285                 const StreamConfig &stream_config = config.streams[i];
286                 if (stream_config.src == "delete") {
287                         log(INFO, "Deleting stream '%s'.", stream_config.url.c_str());
288                         deleted_urls->insert(stream_config.url);
289                 }
290         }
291 }
292
293 int main(int argc, char **argv)
294 {
295         signal(SIGHUP, hup);
296         signal(SIGINT, hup);
297         signal(SIGUSR1, do_nothing);  // Used in internal signalling.
298         signal(SIGPIPE, SIG_IGN);
299         
300         // Parse options.
301         int state_fd = -1;
302         bool test_config = false;
303         for ( ;; ) {
304                 static const option long_options[] = {
305                         { "state", required_argument, 0, 's' },
306                         { "test-config", no_argument, 0, 't' },
307                         { 0, 0, 0, 0 }
308                 };
309                 int option_index = 0;
310                 int c = getopt_long(argc, argv, "s:t", long_options, &option_index);
311      
312                 if (c == -1) {
313                         break;
314                 }
315                 switch (c) {
316                 case 's':
317                         state_fd = atoi(optarg);
318                         break;
319                 case 't':
320                         test_config = true;
321                         break;
322                 default:
323                         fprintf(stderr, "Unknown option '%s'\n", argv[option_index]);
324                         exit(1);
325                 }
326         }
327
328         string config_filename = "cubemap.config";
329         if (optind < argc) {
330                 config_filename = argv[optind++];
331         }
332
333         // Canonicalize argv[0] and config_filename.
334         char argv0_canon[PATH_MAX];
335         char config_filename_canon[PATH_MAX];
336
337         if (realpath(argv[0], argv0_canon) == NULL) {
338                 log_perror(argv[0]);
339                 exit(1);
340         }
341         if (realpath(config_filename.c_str(), config_filename_canon) == NULL) {
342                 log_perror(config_filename.c_str());
343                 exit(1);
344         }
345
346         // Now parse the configuration file.
347         Config config;
348         if (!parse_config(config_filename_canon, &config)) {
349                 exit(1);
350         }
351         if (test_config) {
352                 exit(0);
353         }
354         
355         // Ideally we'd like to daemonize only when we've started up all threads etc.,
356         // but daemon() forks, which is not good in multithreaded software, so we'll
357         // have to do it here.
358         if (config.daemonize) {
359                 if (daemon(0, 0) == -1) {
360                         log_perror("daemon");
361                         exit(1);
362                 }
363         }
364
365 start:
366         // Open logs as soon as possible.
367         open_logs(config.log_destinations);
368
369         log(INFO, "Cubemap " SERVER_VERSION " starting.");
370         if (config.access_log_file.empty()) {
371                 // Create a dummy logger.
372                 access_log = new AccessLogThread();
373         } else {
374                 access_log = new AccessLogThread(config.access_log_file);
375         }
376         access_log->run();
377
378         servers = new ServerPool(config.num_servers);
379
380         // Find all the streams that are to be deleted.
381         set<string> deleted_urls;
382         find_deleted_streams(config, &deleted_urls);
383
384         CubemapStateProto loaded_state;
385         struct timeval serialize_start;
386         set<string> deserialized_urls;
387         map<int, Acceptor *> deserialized_acceptors;
388         multimap<string, InputWithRefcount> inputs;  // multimap due to older versions without deduplication.
389         if (state_fd != -1) {
390                 log(INFO, "Deserializing state from previous process...");
391                 string serialized;
392                 if (!read_tempfile(state_fd, &serialized)) {
393                         exit(1);
394                 }
395                 if (!loaded_state.ParseFromString(serialized)) {
396                         log(ERROR, "Failed deserialization of state.");
397                         exit(1);
398                 }
399
400                 serialize_start.tv_sec = loaded_state.serialize_start_sec();
401                 serialize_start.tv_usec = loaded_state.serialize_start_usec();
402
403                 // Deserialize the streams.
404                 map<string, string> stream_headers_for_url;  // See below.
405                 for (int i = 0; i < loaded_state.streams_size(); ++i) {
406                         const StreamProto &stream = loaded_state.streams(i);
407
408                         if (deleted_urls.count(stream.url()) != 0) {
409                                 // Delete the stream backlogs.
410                                 for (int j = 0; j < stream.data_fds_size(); ++j) {
411                                         safe_close(stream.data_fds(j));
412                                 }
413                         } else {
414                                 vector<int> data_fds;
415                                 for (int j = 0; j < stream.data_fds_size(); ++j) {
416                                         data_fds.push_back(stream.data_fds(j));
417                                 }
418
419                                 // Older versions stored the data once in the protobuf instead of
420                                 // sending around file descriptors.
421                                 if (data_fds.empty() && stream.has_data()) {
422                                         data_fds.push_back(make_tempfile(stream.data()));
423                                 }
424
425                                 servers->add_stream_from_serialized(stream, data_fds);
426                                 deserialized_urls.insert(stream.url());
427
428                                 stream_headers_for_url.insert(make_pair(stream.url(), stream.stream_header()));
429                         }
430                 }
431
432                 // Deserialize the inputs. Note that we don't actually add them to any stream yet.
433                 for (int i = 0; i < loaded_state.inputs_size(); ++i) {
434                         InputProto serialized_input = loaded_state.inputs(i);
435
436                         // Older versions did not store the stream header in the input,
437                         // only in each stream. We need to have the stream header in the
438                         // input as well, in case we create a new stream reusing the same input.
439                         // Thus, we put it into place here if it's missing.
440                         if (!serialized_input.has_stream_header() &&
441                             stream_headers_for_url.count(serialized_input.url()) != 0) {
442                                 serialized_input.set_stream_header(stream_headers_for_url[serialized_input.url()]);
443                         }
444
445                         InputWithRefcount iwr;
446                         iwr.input = create_input(serialized_input);
447                         iwr.refcount = 0;
448                         inputs.insert(make_pair(serialized_input.url(), iwr));
449                 } 
450
451                 // Deserialize the acceptors.
452                 for (int i = 0; i < loaded_state.acceptors_size(); ++i) {
453                         deserialized_acceptors.insert(make_pair(
454                                 loaded_state.acceptors(i).port(),
455                                 new Acceptor(loaded_state.acceptors(i))));
456                 }
457
458                 log(INFO, "Deserialization done.");
459         }
460
461         // Add any new inputs coming from the config.
462         create_config_inputs(config, &inputs);
463         
464         // Find all streams in the configuration file, create them, and connect to the inputs.
465         create_streams(config, deserialized_urls, &inputs);
466         vector<Acceptor *> acceptors = create_acceptors(config, &deserialized_acceptors);
467         
468         // Put back the existing clients. It doesn't matter which server we
469         // allocate them to, so just do round-robin. However, we need to add
470         // them after the mark pools have been set up.
471         for (int i = 0; i < loaded_state.clients_size(); ++i) {
472                 if (deleted_urls.count(loaded_state.clients(i).url()) != 0) {
473                         safe_close(loaded_state.clients(i).sock());
474                 } else {
475                         servers->add_client_from_serialized(loaded_state.clients(i));
476                 }
477         }
478         
479         servers->run();
480
481         // Now delete all inputs that are longer in use, and start the others.
482         for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
483              input_it != inputs.end(); ) {
484                 if (input_it->second.refcount == 0) {
485                         log(WARNING, "Input '%s' no longer in use, closing.",
486                             input_it->first.c_str());
487                         input_it->second.input->close_socket();
488                         delete input_it->second.input;
489                         inputs.erase(input_it++);
490                 } else {
491                         input_it->second.input->run();
492                         ++input_it;
493                 }
494         }
495
496         // Start writing statistics.
497         StatsThread *stats_thread = NULL;
498         if (!config.stats_file.empty()) {
499                 stats_thread = new StatsThread(config.stats_file, config.stats_interval);
500                 stats_thread->run();
501         }
502
503         InputStatsThread *input_stats_thread = NULL;
504         if (!config.input_stats_file.empty()) {
505                 vector<Input*> inputs_no_refcount;
506                 for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
507                      input_it != inputs.end(); ++input_it) {
508                         inputs_no_refcount.push_back(input_it->second.input);
509                 }
510
511                 input_stats_thread = new InputStatsThread(config.input_stats_file, config.input_stats_interval, inputs_no_refcount);
512                 input_stats_thread->run();
513         }
514
515         struct timeval server_start;
516         gettimeofday(&server_start, NULL);
517         if (state_fd != -1) {
518                 // Measure time from we started deserializing (below) to now, when basically everything
519                 // is up and running. This is, in other words, a conservative estimate of how long our
520                 // “glitch” period was, not counting of course reconnects if the configuration changed.
521                 double glitch_time = server_start.tv_sec - serialize_start.tv_sec +
522                         1e-6 * (server_start.tv_usec - serialize_start.tv_usec);
523                 log(INFO, "Re-exec happened in approx. %.0f ms.", glitch_time * 1000.0);
524         }
525
526         while (!hupped) {
527                 usleep(100000);
528         }
529
530         // OK, we've been HUPed. Time to shut down everything, serialize, and re-exec.
531         gettimeofday(&serialize_start, NULL);
532
533         if (input_stats_thread != NULL) {
534                 input_stats_thread->stop();
535                 delete input_stats_thread;
536         }
537         if (stats_thread != NULL) {
538                 stats_thread->stop();
539                 delete stats_thread;
540         }
541         for (size_t i = 0; i < acceptors.size(); ++i) {
542                 acceptors[i]->stop();
543         }
544         for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
545              input_it != inputs.end();
546              ++input_it) {
547                 input_it->second.input->stop();
548         }
549         servers->stop();
550
551         CubemapStateProto state;
552         if (stopped) {
553                 log(INFO, "Shutting down.");
554         } else {
555                 log(INFO, "Serializing state and re-execing...");
556                 state = collect_state(
557                         serialize_start, acceptors, inputs, servers);
558                 string serialized;
559                 state.SerializeToString(&serialized);
560                 state_fd = make_tempfile(serialized);
561                 if (state_fd == -1) {
562                         exit(1);
563                 }
564         }
565         delete servers;
566
567         for (unsigned i = 0; i < mark_pools.size(); ++i) {
568                 delete mark_pools[i];
569         }
570         mark_pools.clear();
571
572         access_log->stop();
573         delete access_log;
574         shut_down_logging();
575
576         if (stopped) {
577                 exit(0);
578         }
579
580         // OK, so the signal was SIGHUP. Check that the new config is okay, then exec the new binary.
581         if (!dry_run_config(argv0_canon, config_filename_canon)) {
582                 open_logs(config.log_destinations);
583                 log(ERROR, "%s --test-config failed. Restarting old version instead of new.", argv[0]);
584                 hupped = false;
585                 shut_down_logging();
586                 goto start;
587         }
588          
589         char buf[16];
590         sprintf(buf, "%d", state_fd);
591
592         for ( ;; ) {
593                 execlp(argv0_canon, argv0_canon, config_filename_canon, "--state", buf, NULL);
594                 open_logs(config.log_destinations);
595                 log_perror("execlp");
596                 log(ERROR, "re-exec of %s failed. Waiting 0.2 seconds and trying again...", argv0_canon);
597                 shut_down_logging();
598                 usleep(200000);
599         }
600 }