]> git.sesse.net Git - cubemap/blob - main.cpp
Stop leaking TLS contexts.
[cubemap] / main.cpp
1 #include <assert.h>
2 #include <errno.h>
3 #include <getopt.h>
4 #include <limits.h>
5 #include <signal.h>
6 #include <stddef.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <sys/time.h>
11 #include <sys/wait.h>
12 #include <systemd/sd-daemon.h>
13 #include <unistd.h>
14 #include <algorithm>
15 #include <map>
16 #include <set>
17 #include <string>
18 #include <utility>
19 #include <vector>
20
21 #include "tlse.h"
22
23 #include "acceptor.h"
24 #include "accesslog.h"
25 #include "config.h"
26 #include "input.h"
27 #include "input_stats.h"
28 #include "log.h"
29 #include "sa_compare.h"
30 #include "serverpool.h"
31 #include "state.pb.h"
32 #include "stats.h"
33 #include "stream.h"
34 #include "util.h"
35 #include "version.h"
36
37 using namespace std;
38
39 AccessLogThread *access_log = NULL;
40 ServerPool *servers = NULL;
41 volatile bool hupped = false;
42 volatile bool stopped = false;
43
44 typedef pair<string, Input::Encoding> InputKey;
45
46 namespace {
47
48 struct OrderByConnectionTime {
49         bool operator() (const ClientProto &a, const ClientProto &b) const {
50                 if (a.connect_time_sec() != b.connect_time_sec())
51                         return a.connect_time_sec() < b.connect_time_sec();
52                 return a.connect_time_nsec() < b.connect_time_nsec();
53         }
54 };
55
56 // An arbitrary ordering.
57 struct AcceptorConfigCompare {
58         bool operator() (const AcceptorConfig &a, const AcceptorConfig &b) const {
59                 int cmp = a.certificate_chain.compare(b.certificate_chain);
60                 if (cmp != 0) {
61                         return cmp < 0;
62                 }
63
64                 cmp = a.private_key.compare(b.private_key);
65                 if (cmp != 0) {
66                         return cmp < 0;
67                 }
68
69                 return Sockaddr6Compare()(a.addr, b.addr);
70         }
71 };
72
73 }  // namespace
74
75 struct InputWithRefcount {
76         Input *input;
77         int refcount;
78 };
79
80 void hup(int signum)
81 {
82         hupped = true;
83         if (signum == SIGINT) {
84                 stopped = true;
85         }
86 }
87
88 void do_nothing(int signum)
89 {
90 }
91
92 CubemapStateProto collect_state(const timespec &serialize_start,
93                                 const vector<Acceptor *> acceptors,
94                                 const multimap<InputKey, InputWithRefcount> inputs,
95                                 ServerPool *servers)
96 {
97         CubemapStateProto state = servers->serialize();  // Fills streams() and clients().
98         state.set_serialize_start_sec(serialize_start.tv_sec);
99         state.set_serialize_start_usec(serialize_start.tv_nsec / 1000);
100         
101         for (size_t i = 0; i < acceptors.size(); ++i) {
102                 state.add_acceptors()->MergeFrom(acceptors[i]->serialize());
103         }
104
105         for (multimap<InputKey, InputWithRefcount>::const_iterator input_it = inputs.begin();
106              input_it != inputs.end();
107              ++input_it) {
108                 state.add_inputs()->MergeFrom(input_it->second.input->serialize());
109         }
110
111         return state;
112 }
113
114 // Find all port statements in the configuration file, and create acceptors for them.
115 vector<Acceptor *> create_acceptors(
116         const Config &config,
117         map<AcceptorConfig, Acceptor *, AcceptorConfigCompare> *deserialized_acceptors)
118 {
119         vector<Acceptor *> acceptors;
120         for (unsigned i = 0; i < config.acceptors.size(); ++i) {
121                 const AcceptorConfig &acceptor_config = config.acceptors[i];
122                 Acceptor *acceptor = NULL;
123                 map<AcceptorConfig, Acceptor *, AcceptorConfigCompare>::iterator deserialized_acceptor_it =
124                         deserialized_acceptors->find(acceptor_config);
125                 if (deserialized_acceptor_it != deserialized_acceptors->end()) {
126                         acceptor = deserialized_acceptor_it->second;
127                         deserialized_acceptors->erase(deserialized_acceptor_it);
128                 } else {
129                         int server_sock = create_server_socket(acceptor_config.addr, TCP_SOCKET);
130                         acceptor = new Acceptor(server_sock, acceptor_config.addr,
131                                                 acceptor_config.certificate_chain,
132                                                 acceptor_config.private_key);
133                 }
134                 acceptor->run();
135                 acceptors.push_back(acceptor);
136         }
137
138         // Close all acceptors that are no longer in the configuration file.
139         for (map<AcceptorConfig, Acceptor *, AcceptorConfigCompare>::iterator
140                  acceptor_it = deserialized_acceptors->begin();
141              acceptor_it != deserialized_acceptors->end();
142              ++acceptor_it) {
143                 acceptor_it->second->close_socket();
144                 delete acceptor_it->second;
145         }
146
147         return acceptors;
148 }
149
150 void create_config_input(const string &src, Input::Encoding encoding, multimap<InputKey, InputWithRefcount> *inputs)
151 {
152         if (src.empty()) {
153                 return;
154         }
155         InputKey key(src, encoding);
156         if (inputs->count(key) != 0) {
157                 return;
158         }
159
160         InputWithRefcount iwr;
161         iwr.input = create_input(src, encoding);
162         if (iwr.input == NULL) {
163                 log(ERROR, "did not understand URL '%s' or source encoding was invalid, clients will not get any data.",
164                         src.c_str());
165                 return;
166         }
167         iwr.refcount = 0;
168         inputs->insert(make_pair(key, iwr));
169 }
170
171 // Find all streams in the configuration file, and create inputs for them.
172 void create_config_inputs(const Config &config, multimap<InputKey, InputWithRefcount> *inputs)
173 {
174         for (unsigned i = 0; i < config.streams.size(); ++i) {
175                 const StreamConfig &stream_config = config.streams[i];
176                 if (stream_config.src != "delete") {
177                         create_config_input(stream_config.src, Input::Encoding(stream_config.src_encoding), inputs);
178                 }
179         }
180         for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
181                 const UDPStreamConfig &udpstream_config = config.udpstreams[i];
182                 create_config_input(udpstream_config.src, Input::INPUT_ENCODING_RAW, inputs);
183         }
184 }
185
186 void create_streams(const Config &config,
187                     const set<string> &deserialized_urls,
188                     multimap<InputKey, InputWithRefcount> *inputs)
189 {
190         // HTTP streams.
191         set<string> expecting_urls = deserialized_urls;
192         for (unsigned i = 0; i < config.streams.size(); ++i) {
193                 const StreamConfig &stream_config = config.streams[i];
194                 int stream_index;
195
196                 expecting_urls.erase(stream_config.url);
197
198                 // Special-case deleted streams; they were never deserialized in the first place,
199                 // so just ignore them.
200                 if (stream_config.src == "delete") {
201                         continue;
202                 }
203
204                 if (deserialized_urls.count(stream_config.url) == 0) {
205                         stream_index = servers->add_stream(stream_config.url,
206                                                            stream_config.backlog_size,
207                                                            stream_config.prebuffering_bytes,
208                                                            Stream::Encoding(stream_config.encoding),
209                                                            Stream::Encoding(stream_config.src_encoding));
210                 } else {
211                         stream_index = servers->lookup_stream_by_url(stream_config.url);
212                         assert(stream_index != -1);
213                         servers->set_backlog_size(stream_index, stream_config.backlog_size);
214                         servers->set_prebuffering_bytes(stream_index, stream_config.prebuffering_bytes);
215                         servers->set_encoding(stream_index,
216                                               Stream::Encoding(stream_config.encoding));
217                         servers->set_src_encoding(stream_index,
218                                                   Stream::Encoding(stream_config.src_encoding));
219                 }
220
221                 servers->set_pacing_rate(stream_index, stream_config.pacing_rate);
222
223                 string src = stream_config.src;
224                 Input::Encoding src_encoding = Input::Encoding(stream_config.src_encoding);
225                 if (!src.empty()) {
226                         multimap<InputKey, InputWithRefcount>::iterator input_it = inputs->find(make_pair(src, src_encoding));
227                         if (input_it != inputs->end()) {
228                                 input_it->second.input->add_destination(stream_index);
229                                 ++input_it->second.refcount;
230                         }
231                 }
232         }
233
234         // Warn about any streams servers we've lost.
235         for (set<string>::const_iterator stream_it = expecting_urls.begin();
236              stream_it != expecting_urls.end();
237              ++stream_it) {
238                 string url = *stream_it;
239                 log(WARNING, "stream '%s' disappeared from the configuration file. "
240                              "It will not be deleted, but clients will not get any new inputs. "
241                              "If you really meant to delete it, set src=delete and reload.",
242                              url.c_str());
243         }
244
245         // UDP streams.
246         for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
247                 const UDPStreamConfig &udpstream_config = config.udpstreams[i];
248                 int stream_index = servers->add_udpstream(
249                         udpstream_config.dst,
250                         udpstream_config.pacing_rate,
251                         udpstream_config.ttl,
252                         udpstream_config.multicast_iface_index);
253
254                 string src = udpstream_config.src;
255                 if (!src.empty()) {
256                         multimap<InputKey, InputWithRefcount>::iterator input_it = inputs->find(make_pair(src, Input::INPUT_ENCODING_RAW));
257                         assert(input_it != inputs->end());
258                         input_it->second.input->add_destination(stream_index);
259                         ++input_it->second.refcount;
260                 }
261         }
262
263         // HTTP gen204 endpoints.
264         for (unsigned i = 0; i < config.pings.size(); ++i) {
265                 const Gen204Config &ping_config = config.pings[i];
266                 servers->add_gen204(ping_config.url, ping_config.allow_origin);
267         }
268 }
269         
270 void open_logs(const vector<LogConfig> &log_destinations)
271 {
272         for (size_t i = 0; i < log_destinations.size(); ++i) {
273                 if (log_destinations[i].type == LogConfig::LOG_TYPE_FILE) {
274                         add_log_destination_file(log_destinations[i].filename);
275                 } else if (log_destinations[i].type == LogConfig::LOG_TYPE_CONSOLE) {
276                         add_log_destination_console();
277                 } else if (log_destinations[i].type == LogConfig::LOG_TYPE_SYSLOG) {
278                         add_log_destination_syslog();
279                 } else {
280                         assert(false);
281                 }
282         }
283         start_logging();
284 }
285         
286 bool dry_run_config(const string &argv0, const string &config_filename)
287 {
288         char *argv0_copy = strdup(argv0.c_str());
289         char *config_filename_copy = strdup(config_filename.c_str());
290
291         pid_t pid = fork();
292         switch (pid) {
293         case -1:
294                 log_perror("fork()");
295                 free(argv0_copy);
296                 free(config_filename_copy);
297                 return false;
298         case 0:
299                 // Child.
300                 execlp(argv0_copy, argv0_copy, "--test-config", config_filename_copy, NULL);
301                 log_perror(argv0_copy);
302                 _exit(1);
303         default:
304                 // Parent.
305                 break;
306         }
307                 
308         free(argv0_copy);
309         free(config_filename_copy);
310
311         int status;
312         pid_t err;
313         do {
314                 err = waitpid(pid, &status, 0);
315         } while (err == -1 && errno == EINTR);
316
317         if (err == -1) {
318                 log_perror("waitpid()");
319                 return false;
320         }       
321
322         return (WIFEXITED(status) && WEXITSTATUS(status) == 0);
323 }
324
325 void find_deleted_streams(const Config &config, set<string> *deleted_urls)
326 {
327         for (unsigned i = 0; i < config.streams.size(); ++i) {
328                 const StreamConfig &stream_config = config.streams[i];
329                 if (stream_config.src == "delete") {
330                         log(INFO, "Deleting stream '%s'.", stream_config.url.c_str());
331                         deleted_urls->insert(stream_config.url);
332                 }
333         }
334 }
335
336 int main(int argc, char **argv)
337 {
338         signal(SIGHUP, hup);
339         signal(SIGINT, hup);
340         signal(SIGUSR1, do_nothing);  // Used in internal signalling.
341         signal(SIGPIPE, SIG_IGN);
342
343         tls_init();
344         
345         // Parse options.
346         int state_fd = -1;
347         bool test_config = false;
348         for ( ;; ) {
349                 static const option long_options[] = {
350                         { "state", required_argument, 0, 's' },
351                         { "test-config", no_argument, 0, 't' },
352                         { 0, 0, 0, 0 }
353                 };
354                 int option_index = 0;
355                 int c = getopt_long(argc, argv, "s:t", long_options, &option_index);
356      
357                 if (c == -1) {
358                         break;
359                 }
360                 switch (c) {
361                 case 's':
362                         state_fd = atoi(optarg);
363                         break;
364                 case 't':
365                         test_config = true;
366                         break;
367                 default:
368                         fprintf(stderr, "Unknown option '%s'\n", argv[option_index]);
369                         exit(1);
370                 }
371         }
372
373         string config_filename = "cubemap.config";
374         if (optind < argc) {
375                 config_filename = argv[optind++];
376         }
377
378         // Canonicalize argv[0] and config_filename.
379         char argv0_canon[PATH_MAX];
380         char config_filename_canon[PATH_MAX];
381
382         if (realpath("/proc/self/exe", argv0_canon) == NULL) {
383                 log_perror(argv[0]);
384                 exit(1);
385         }
386         if (realpath(config_filename.c_str(), config_filename_canon) == NULL) {
387                 log_perror(config_filename.c_str());
388                 exit(1);
389         }
390
391         // Now parse the configuration file.
392         Config config;
393         if (!parse_config(config_filename_canon, &config)) {
394                 exit(1);
395         }
396         if (test_config) {
397                 exit(0);
398         }
399         
400         // Ideally we'd like to daemonize only when we've started up all threads etc.,
401         // but daemon() forks, which is not good in multithreaded software, so we'll
402         // have to do it here.
403         if (config.daemonize) {
404                 if (daemon(0, 0) == -1) {
405                         log_perror("daemon");
406                         exit(1);
407                 }
408         }
409
410 start:
411         // Open logs as soon as possible.
412         open_logs(config.log_destinations);
413
414         log(INFO, "Cubemap " SERVER_VERSION " starting.");
415         if (config.access_log_file.empty()) {
416                 // Create a dummy logger.
417                 access_log = new AccessLogThread();
418         } else {
419                 access_log = new AccessLogThread(config.access_log_file);
420         }
421         access_log->run();
422
423         servers = new ServerPool(config.num_servers);
424
425         // Find all the streams that are to be deleted.
426         set<string> deleted_urls;
427         find_deleted_streams(config, &deleted_urls);
428
429         CubemapStateProto loaded_state;
430         timespec serialize_start;
431         set<string> deserialized_urls;
432         map<AcceptorConfig, Acceptor *, AcceptorConfigCompare> deserialized_acceptors;
433         multimap<InputKey, InputWithRefcount> inputs;  // multimap due to older versions without deduplication.
434         if (state_fd != -1) {
435                 log(INFO, "Deserializing state from previous process...");
436                 string serialized;
437                 if (!read_tempfile_and_close(state_fd, &serialized)) {
438                         exit(1);
439                 }
440                 if (!loaded_state.ParseFromString(serialized)) {
441                         log(ERROR, "Failed deserialization of state.");
442                         exit(1);
443                 }
444
445                 serialize_start.tv_sec = loaded_state.serialize_start_sec();
446                 serialize_start.tv_nsec = loaded_state.serialize_start_usec() * 1000ull;
447
448                 // Deserialize the streams.
449                 map<string, string> stream_headers_for_url;  // See below.
450                 for (int i = 0; i < loaded_state.streams_size(); ++i) {
451                         const StreamProto &stream = loaded_state.streams(i);
452
453                         if (deleted_urls.count(stream.url()) != 0) {
454                                 // Delete the stream backlogs.
455                                 for (int j = 0; j < stream.data_fds_size(); ++j) {
456                                         safe_close(stream.data_fds(j));
457                                 }
458                         } else {
459                                 vector<int> data_fds;
460                                 for (int j = 0; j < stream.data_fds_size(); ++j) {
461                                         data_fds.push_back(stream.data_fds(j));
462                                 }
463
464                                 servers->add_stream_from_serialized(stream, data_fds);
465                                 deserialized_urls.insert(stream.url());
466
467                                 stream_headers_for_url.insert(make_pair(stream.url(), stream.stream_header()));
468                         }
469                 }
470
471                 // Deserialize the inputs. Note that we don't actually add them to any stream yet.
472                 for (int i = 0; i < loaded_state.inputs_size(); ++i) {
473                         InputProto serialized_input = loaded_state.inputs(i);
474
475                         InputWithRefcount iwr;
476                         iwr.input = create_input(serialized_input);
477                         iwr.refcount = 0;
478
479                         Input::Encoding src_encoding = serialized_input.is_metacube_encoded() ?
480                                 Input::INPUT_ENCODING_METACUBE :
481                                 Input::INPUT_ENCODING_RAW;
482                         InputKey key(serialized_input.url(), src_encoding);
483                         inputs.insert(make_pair(key, iwr));
484                 } 
485
486                 // Deserialize the acceptors.
487                 for (int i = 0; i < loaded_state.acceptors_size(); ++i) {
488                         AcceptorConfig config;
489                         config.addr = extract_address_from_acceptor_proto(loaded_state.acceptors(i));
490                         config.certificate_chain = loaded_state.acceptors(i).certificate_chain();
491                         config.private_key = loaded_state.acceptors(i).private_key();
492                         deserialized_acceptors.insert(make_pair(
493                                 config,
494                                 new Acceptor(loaded_state.acceptors(i))));
495                 }
496
497                 log(INFO, "Deserialization done.");
498         }
499
500         // Add any new inputs coming from the config.
501         create_config_inputs(config, &inputs);
502         
503         // Find all streams in the configuration file, create them, and connect to the inputs.
504         create_streams(config, deserialized_urls, &inputs);
505         vector<Acceptor *> acceptors = create_acceptors(config, &deserialized_acceptors);
506
507         // Make all the servers create TLS contexts for every TLS keypair we have.
508         for (Acceptor *acceptor : acceptors) {
509                 if (acceptor->is_tls()) {
510                         servers->create_tls_context_for_acceptor(acceptor);
511                 }
512         }
513         
514         // Put back the existing clients. It doesn't matter which server we
515         // allocate them to, so just do round-robin. However, we need to sort them
516         // by connection time first, since add_client_serialized() expects that.
517         sort(loaded_state.mutable_clients()->begin(),
518              loaded_state.mutable_clients()->end(),
519              OrderByConnectionTime());
520         for (int i = 0; i < loaded_state.clients_size(); ++i) {
521                 if (deleted_urls.count(loaded_state.clients(i).url()) != 0) {
522                         safe_close(loaded_state.clients(i).sock());
523                 } else {
524                         servers->add_client_from_serialized(loaded_state.clients(i));
525                 }
526         }
527         
528         servers->run();
529
530         // Now delete all inputs that are longer in use, and start the others.
531         for (multimap<InputKey, InputWithRefcount>::iterator input_it = inputs.begin();
532              input_it != inputs.end(); ) {
533                 if (input_it->second.refcount == 0) {
534                         if (input_it->first.second == Input::INPUT_ENCODING_RAW) {
535                                 log(WARNING, "Raw input '%s' no longer in use, closing.",
536                                     input_it->first.first.c_str());
537                         } else {
538                                 assert(input_it->first.second == Input::INPUT_ENCODING_METACUBE);
539                                 log(WARNING, "Metacube input '%s' no longer in use, closing.",
540                                     input_it->first.first.c_str());
541                         }
542                         input_it->second.input->close_socket();
543                         delete input_it->second.input;
544                         inputs.erase(input_it++);
545                 } else {
546                         input_it->second.input->run();
547                         ++input_it;
548                 }
549         }
550
551         // Start writing statistics.
552         StatsThread *stats_thread = NULL;
553         if (!config.stats_file.empty()) {
554                 stats_thread = new StatsThread(config.stats_file, config.stats_interval);
555                 stats_thread->run();
556         }
557
558         InputStatsThread *input_stats_thread = NULL;
559         if (!config.input_stats_file.empty()) {
560                 vector<Input*> inputs_no_refcount;
561                 for (multimap<InputKey, InputWithRefcount>::iterator input_it = inputs.begin();
562                      input_it != inputs.end(); ++input_it) {
563                         inputs_no_refcount.push_back(input_it->second.input);
564                 }
565
566                 input_stats_thread = new InputStatsThread(config.input_stats_file, config.input_stats_interval, inputs_no_refcount);
567                 input_stats_thread->run();
568         }
569
570         timespec server_start;
571         int err = clock_gettime(CLOCK_MONOTONIC, &server_start);
572         assert(err != -1);
573         if (state_fd != -1) {
574                 // Measure time from we started deserializing (below) to now, when basically everything
575                 // is up and running. This is, in other words, a conservative estimate of how long our
576                 // “glitch” period was, not counting of course reconnects if the configuration changed.
577                 double glitch_time = server_start.tv_sec - serialize_start.tv_sec +
578                         1e-9 * (server_start.tv_nsec - serialize_start.tv_nsec);
579                 log(INFO, "Re-exec happened in approx. %.0f ms.", glitch_time * 1000.0);
580         }
581
582         sd_notify(0, "READY=1");
583
584         while (!hupped) {
585                 usleep(100000);
586         }
587
588         if (stopped) {
589                 sd_notify(0, "STOPPING=1");
590         } else {
591                 sd_notify(0, "RELOADING=1");
592         }
593
594         // OK, we've been HUPed. Time to shut down everything, serialize, and re-exec.
595         err = clock_gettime(CLOCK_MONOTONIC, &serialize_start);
596         assert(err != -1);
597
598         if (input_stats_thread != NULL) {
599                 input_stats_thread->stop();
600                 delete input_stats_thread;
601         }
602         if (stats_thread != NULL) {
603                 stats_thread->stop();
604                 delete stats_thread;
605         }
606         for (size_t i = 0; i < acceptors.size(); ++i) {
607                 acceptors[i]->stop();
608         }
609         for (multimap<InputKey, InputWithRefcount>::iterator input_it = inputs.begin();
610              input_it != inputs.end();
611              ++input_it) {
612                 input_it->second.input->stop();
613         }
614         servers->stop();
615
616         CubemapStateProto state;
617         if (stopped) {
618                 log(INFO, "Shutting down.");
619         } else {
620                 log(INFO, "Serializing state and re-execing...");
621                 state = collect_state(
622                         serialize_start, acceptors, inputs, servers);
623                 string serialized;
624                 state.SerializeToString(&serialized);
625                 state_fd = make_tempfile(serialized);
626                 if (state_fd == -1) {
627                         exit(1);
628                 }
629         }
630         delete servers;
631
632         access_log->stop();
633         delete access_log;
634         shut_down_logging();
635
636         if (stopped) {
637                 exit(0);
638         }
639
640         // OK, so the signal was SIGHUP. Check that the new config is okay, then exec the new binary.
641         if (!dry_run_config(argv0_canon, config_filename_canon)) {
642                 open_logs(config.log_destinations);
643                 log(ERROR, "%s --test-config failed. Restarting old version instead of new.", argv[0]);
644                 hupped = false;
645                 shut_down_logging();
646                 goto start;
647         }
648          
649         char buf[16];
650         sprintf(buf, "%d", state_fd);
651
652         for ( ;; ) {
653                 execlp(argv0_canon, argv0_canon, config_filename_canon, "--state", buf, NULL);
654                 open_logs(config.log_destinations);
655                 log_perror("execlp");
656                 log(ERROR, "re-exec of %s failed. Waiting 0.2 seconds and trying again...", argv0_canon);
657                 shut_down_logging();
658                 usleep(200000);
659         }
660 }