]> git.sesse.net Git - cubemap/blob - main.cpp
Silence an irrelevant Coverity Scan warning.
[cubemap] / main.cpp
1 #include <assert.h>
2 #include <errno.h>
3 #include <getopt.h>
4 #include <limits.h>
5 #include <signal.h>
6 #include <stddef.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <sys/time.h>
11 #include <sys/wait.h>
12 #include <systemd/sd-daemon.h>
13 #include <unistd.h>
14 #include <algorithm>
15 #include <map>
16 #include <set>
17 #include <string>
18 #include <unordered_map>
19 #include <utility>
20 #include <vector>
21
22 #include "tlse.h"
23
24 #include "acceptor.h"
25 #include "accesslog.h"
26 #include "config.h"
27 #include "input.h"
28 #include "input_stats.h"
29 #include "log.h"
30 #include "sa_compare.h"
31 #include "serverpool.h"
32 #include "state.pb.h"
33 #include "stats.h"
34 #include "stream.h"
35 #include "util.h"
36 #include "version.h"
37
38 using namespace std;
39
40 AccessLogThread *access_log = nullptr;
41 ServerPool *servers = nullptr;
42 volatile bool hupped = false;
43 volatile bool stopped = false;
44
45 typedef pair<string, Input::Encoding> InputKey;
46
47 namespace {
48
49 struct OrderByConnectionTime {
50         bool operator() (const ClientProto &a, const ClientProto &b) const {
51                 if (a.connect_time_sec() != b.connect_time_sec())
52                         return a.connect_time_sec() < b.connect_time_sec();
53                 return a.connect_time_nsec() < b.connect_time_nsec();
54         }
55 };
56
57 // An arbitrary ordering.
58 struct AcceptorConfigCompare {
59         bool operator() (const AcceptorConfig &a, const AcceptorConfig &b) const {
60                 int cmp = a.certificate_chain.compare(b.certificate_chain);
61                 if (cmp != 0) {
62                         return cmp < 0;
63                 }
64
65                 cmp = a.private_key.compare(b.private_key);
66                 if (cmp != 0) {
67                         return cmp < 0;
68                 }
69
70                 return Sockaddr6Compare()(a.addr, b.addr);
71         }
72 };
73
74 }  // namespace
75
76 struct InputWithRefcount {
77         Input *input;
78         int refcount;
79 };
80
81 void hup(int signum)
82 {
83         hupped = true;
84         if (signum == SIGINT) {
85                 stopped = true;
86         }
87 }
88
89 void do_nothing(int signum)
90 {
91 }
92
93 CubemapStateProto collect_state(const timespec &serialize_start,
94                                 const vector<Acceptor *> acceptors,
95                                 const multimap<InputKey, InputWithRefcount> inputs,
96                                 ServerPool *servers)
97 {
98         CubemapStateProto state = servers->serialize();  // Fills streams() and clients().
99         state.set_serialize_start_sec(serialize_start.tv_sec);
100         state.set_serialize_start_usec(serialize_start.tv_nsec / 1000);
101
102         for (Acceptor *acceptor : acceptors) {  
103                 state.add_acceptors()->MergeFrom(acceptor->serialize());
104         }
105
106         for (const auto &key_and_input_with_refcount : inputs) {
107                 state.add_inputs()->MergeFrom(key_and_input_with_refcount.second.input->serialize());
108         }
109
110         return state;
111 }
112
113 // Find all port statements in the configuration file, and create acceptors for them.
114 vector<Acceptor *> create_acceptors(
115         const Config &config,
116         map<AcceptorConfig, Acceptor *, AcceptorConfigCompare> *deserialized_acceptors)
117 {
118         vector<Acceptor *> acceptors;
119         for (const AcceptorConfig &acceptor_config : config.acceptors) {
120                 Acceptor *acceptor = nullptr;
121                 const auto deserialized_acceptor_it = deserialized_acceptors->find(acceptor_config);
122                 if (deserialized_acceptor_it != deserialized_acceptors->end()) {
123                         acceptor = deserialized_acceptor_it->second;
124                         deserialized_acceptors->erase(deserialized_acceptor_it);
125                 } else {
126                         int server_sock = create_server_socket(acceptor_config.addr, TCP_SOCKET);
127                         acceptor = new Acceptor(server_sock, acceptor_config.addr,
128                                                 acceptor_config.certificate_chain,
129                                                 acceptor_config.private_key);
130                 }
131                 acceptor->run();
132                 acceptors.push_back(acceptor);
133         }
134
135         // Close all acceptors that are no longer in the configuration file.
136         for (auto &config_and_acceptor : *deserialized_acceptors) {
137                 config_and_acceptor.second->close_socket();
138                 delete config_and_acceptor.second;
139         }
140
141         return acceptors;
142 }
143
144 void create_config_input(const string &src, Input::Encoding encoding, multimap<InputKey, InputWithRefcount> *inputs)
145 {
146         if (src.empty()) {
147                 return;
148         }
149         InputKey key(src, encoding);
150         if (inputs->count(key) != 0) {
151                 return;
152         }
153
154         InputWithRefcount iwr;
155         iwr.input = create_input(src, encoding);
156         if (iwr.input == nullptr) {
157                 log(ERROR, "did not understand URL '%s' or source encoding was invalid, clients will not get any data.",
158                         src.c_str());
159                 return;
160         }
161         iwr.refcount = 0;
162         inputs->insert(make_pair(key, iwr));
163 }
164
165 // Find all streams in the configuration file, and create inputs for them.
166 void create_config_inputs(const Config &config, multimap<InputKey, InputWithRefcount> *inputs)
167 {
168         for (const StreamConfig &stream_config : config.streams) {
169                 if (stream_config.src == "delete") {
170                         // Ignored for pre-1.4.0 configuration compatibility.
171                         continue;
172                 }
173                 create_config_input(stream_config.src, Input::Encoding(stream_config.src_encoding), inputs);
174         }
175         for (const UDPStreamConfig &udpstream_config : config.udpstreams) {
176                 create_config_input(udpstream_config.src, Input::INPUT_ENCODING_RAW, inputs);
177         }
178 }
179
180 void create_streams(const Config &config,
181                     const set<string> &deserialized_urls,
182                     multimap<InputKey, InputWithRefcount> *inputs)
183 {
184         // HTTP streams.
185         set<string> expecting_urls = deserialized_urls;
186         for (const StreamConfig &stream_config : config.streams) {
187                 int stream_index;
188
189                 expecting_urls.erase(stream_config.url);
190
191                 if (stream_config.src == "delete") {
192                         // Ignored for pre-1.4.0 configuration compatibility.
193                         continue;
194                 }
195
196                 if (deserialized_urls.count(stream_config.url) == 0) {
197                         stream_index = servers->add_stream(stream_config.url,
198                                                            stream_config.hls_url,
199                                                            stream_config.backlog_size,
200                                                            stream_config.prebuffering_bytes,
201                                                            Stream::Encoding(stream_config.encoding),
202                                                            Stream::Encoding(stream_config.src_encoding),
203                                                            stream_config.hls_frag_duration,
204                                                            stream_config.hls_backlog_margin,
205                                                            stream_config.allow_origin);
206                 } else {
207                         stream_index = servers->lookup_stream_by_url(stream_config.url);
208                         assert(stream_index != -1);
209                         servers->set_backlog_size(stream_index, stream_config.backlog_size);
210                         if (!stream_config.hls_url.empty()) {
211                                 servers->register_hls_url(stream_index, stream_config.hls_url);
212                         }
213                         servers->set_prebuffering_bytes(stream_index, stream_config.prebuffering_bytes);
214                         servers->set_encoding(stream_index,
215                                               Stream::Encoding(stream_config.encoding));
216                         servers->set_src_encoding(stream_index,
217                                                   Stream::Encoding(stream_config.src_encoding));
218                         servers->set_hls_frag_duration(stream_index, stream_config.hls_frag_duration);
219                         servers->set_hls_backlog_margin(stream_index, stream_config.hls_backlog_margin);
220                         servers->set_allow_origin(stream_index, stream_config.allow_origin);
221                 }
222
223                 servers->set_pacing_rate(stream_index, stream_config.pacing_rate);
224
225                 string src = stream_config.src;
226                 Input::Encoding src_encoding = Input::Encoding(stream_config.src_encoding);
227                 if (!src.empty()) {
228                         const auto input_it = inputs->find(make_pair(src, src_encoding));
229                         if (input_it != inputs->end()) {
230                                 input_it->second.input->add_destination(stream_index);
231                                 ++input_it->second.refcount;
232                         }
233                 }
234         }
235
236         // Warn about any streams servers we've lost.
237         for (const string &url : expecting_urls) {
238                 log(WARNING, "stream '%s' disappeared from the configuration file. "
239                              "It will not be deleted, but clients will not get any new inputs. "
240                              "If you really meant to delete it, set src=delete and reload.",
241                              url.c_str());
242         }
243
244         // UDP streams.
245         for (const UDPStreamConfig &udpstream_config : config.udpstreams) {
246                 int stream_index = servers->add_udpstream(
247                         udpstream_config.dst,
248                         udpstream_config.pacing_rate,
249                         udpstream_config.ttl,
250                         udpstream_config.multicast_iface_index);
251
252                 string src = udpstream_config.src;
253                 if (!src.empty()) {
254                         auto input_it = inputs->find(make_pair(src, Input::INPUT_ENCODING_RAW));
255                         assert(input_it != inputs->end());
256                         input_it->second.input->add_destination(stream_index);
257                         ++input_it->second.refcount;
258                 }
259         }
260
261         // HTTP gen204 endpoints.
262         for (const Gen204Config &ping_config : config.pings) {
263                 servers->add_gen204(ping_config.url, ping_config.allow_origin);
264         }
265 }
266         
267 void open_logs(const vector<LogConfig> &log_destinations)
268 {
269         for (const LogConfig &log_destination : log_destinations) {
270                 if (log_destination.type == LogConfig::LOG_TYPE_FILE) {
271                         add_log_destination_file(log_destination.filename);
272                 } else if (log_destination.type == LogConfig::LOG_TYPE_CONSOLE) {
273                         add_log_destination_console();
274                 } else if (log_destination.type == LogConfig::LOG_TYPE_SYSLOG) {
275                         add_log_destination_syslog();
276                 } else {
277                         assert(false);
278                 }
279         }
280         start_logging();
281 }
282         
283 bool dry_run_config(const string &argv0, const string &config_filename)
284 {
285         char *argv0_copy = strdup(argv0.c_str());
286         char *config_filename_copy = strdup(config_filename.c_str());
287
288         pid_t pid = fork();
289         switch (pid) {
290         case -1:
291                 log_perror("fork()");
292                 free(argv0_copy);
293                 free(config_filename_copy);
294                 return false;
295         case 0:
296                 // Child.
297                 execlp(argv0_copy, argv0_copy, "--test-config", config_filename_copy, nullptr);
298                 log_perror(argv0_copy);
299                 _exit(1);
300         default:
301                 // Parent.
302                 break;
303         }
304                 
305         free(argv0_copy);
306         free(config_filename_copy);
307
308         int status;
309         pid_t err;
310         do {
311                 err = waitpid(pid, &status, 0);
312         } while (err == -1 && errno == EINTR);
313
314         if (err == -1) {
315                 log_perror("waitpid()");
316                 return false;
317         }       
318
319         return (WIFEXITED(status) && WEXITSTATUS(status) == 0);
320 }
321
322 void find_all_streams(const Config &config, set<string> *all_urls)
323 {
324         for (const StreamConfig &stream_config : config.streams) {
325                 if (stream_config.src == "delete") {
326                         log(WARNING, "stream '%s' has src=delete; ignoring it. Since Cubemap 1.4.0, you do not "
327                                      "need to set src=delete to delete streams anymore; just delete them from "
328                                      "the configuration file.",
329                                      stream_config.url.c_str());
330                 } else {
331                         all_urls->insert(stream_config.url);
332                 }
333         }
334 }
335
336 int main(int argc, char **argv)
337 {
338         signal(SIGHUP, hup);
339         signal(SIGINT, hup);
340         signal(SIGUSR1, do_nothing);  // Used in internal signalling.
341         signal(SIGPIPE, SIG_IGN);
342
343         tls_init();
344         
345         // Parse options.
346         int state_fd = -1;
347         bool test_config = false;
348         for ( ;; ) {
349                 static const option long_options[] = {
350                         { "state", required_argument, 0, 's' },
351                         { "test-config", no_argument, 0, 't' },
352                         { 0, 0, 0, 0 }
353                 };
354                 int option_index = 0;
355                 int c = getopt_long(argc, argv, "s:t", long_options, &option_index);
356      
357                 if (c == -1) {
358                         break;
359                 }
360                 switch (c) {
361                 case 's':
362                         state_fd = atoi(optarg);
363                         break;
364                 case 't':
365                         test_config = true;
366                         break;
367                 default:
368                         fprintf(stderr, "Unknown option '%s'\n", argv[option_index]);
369                         exit(1);
370                 }
371         }
372
373         string config_filename = "cubemap.config";
374         if (optind < argc) {
375                 config_filename = argv[optind++];
376         }
377
378         // Canonicalize argv[0] and config_filename.
379         char argv0_canon[PATH_MAX];
380         char config_filename_canon[PATH_MAX];
381
382         if (realpath("/proc/self/exe", argv0_canon) == nullptr) {
383                 log_perror(argv[0]);
384                 exit(1);
385         }
386         if (realpath(config_filename.c_str(), config_filename_canon) == nullptr) {
387                 log_perror(config_filename.c_str());
388                 exit(1);
389         }
390
391         // Now parse the configuration file.
392         Config config;
393         if (!parse_config(config_filename_canon, &config)) {
394                 exit(1);
395         }
396         if (test_config) {
397                 exit(0);
398         }
399         
400         // Ideally we'd like to daemonize only when we've started up all threads etc.,
401         // but daemon() forks, which is not good in multithreaded software, so we'll
402         // have to do it here.
403         if (config.daemonize) {
404                 if (daemon(0, 0) == -1) {
405                         log_perror("daemon");
406                         exit(1);
407                 }
408         }
409
410 start:
411         // Open logs as soon as possible.
412         open_logs(config.log_destinations);
413
414         log(INFO, "Cubemap " SERVER_VERSION " starting.");
415         if (config.access_log_file.empty()) {
416                 // Create a dummy logger.
417                 access_log = new AccessLogThread();
418         } else {
419                 access_log = new AccessLogThread(config.access_log_file);
420         }
421         access_log->run();
422
423         servers = new ServerPool(config.num_servers);
424
425         // Find all the streams that are to be kept.
426         set<string> all_urls;
427         find_all_streams(config, &all_urls);
428
429         CubemapStateProto loaded_state;
430         timespec serialize_start;
431         set<string> deserialized_urls;
432         map<AcceptorConfig, Acceptor *, AcceptorConfigCompare> deserialized_acceptors;
433         multimap<InputKey, InputWithRefcount> inputs;  // multimap due to older versions without deduplication.
434         if (state_fd != -1) {
435                 log(INFO, "Deserializing state from previous process...");
436                 string serialized;
437                 if (!read_tempfile_and_close(state_fd, &serialized)) {
438                         exit(1);
439                 }
440                 if (!loaded_state.ParseFromString(serialized)) {
441                         log(ERROR, "Failed deserialization of state.");
442                         exit(1);
443                 }
444
445                 serialize_start.tv_sec = loaded_state.serialize_start_sec();
446                 serialize_start.tv_nsec = loaded_state.serialize_start_usec() * 1000ull;
447
448                 // Deserialize the streams.
449                 unordered_map<string, string> stream_headers_for_url;  // See below.
450                 for (const StreamProto &stream : loaded_state.streams()) {
451                         if (all_urls.count(stream.url()) == 0) {
452                                 // Delete the stream backlogs.
453                                 log(INFO, "Deleting stream '%s'.", stream.url().c_str());
454                                 for (const int fd : stream.data_fds()) {
455                                         safe_close(fd);
456                                 }
457                         } else {
458                                 vector<int> data_fds;
459                                 for (const int fd : stream.data_fds()) {
460                                         data_fds.push_back(fd);
461                                 }
462
463                                 servers->add_stream_from_serialized(stream, data_fds);
464                                 deserialized_urls.insert(stream.url());
465
466                                 stream_headers_for_url.insert(make_pair(stream.url(), stream.stream_header()));
467                         }
468                 }
469
470                 // Deserialize the inputs. Note that we don't actually add them to any stream yet.
471                 for (const InputProto &serialized_input : loaded_state.inputs()) {
472                         InputWithRefcount iwr;
473                         iwr.input = create_input(serialized_input);
474                         iwr.refcount = 0;
475
476                         Input::Encoding src_encoding = serialized_input.is_metacube_encoded() ?
477                                 Input::INPUT_ENCODING_METACUBE :
478                                 Input::INPUT_ENCODING_RAW;
479                         InputKey key(serialized_input.url(), src_encoding);
480                         inputs.insert(make_pair(key, iwr));
481                 } 
482
483                 // Deserialize the acceptors.
484                 for (const AcceptorProto &serialized_acceptor : loaded_state.acceptors()) {
485                         AcceptorConfig config;
486                         config.addr = extract_address_from_acceptor_proto(serialized_acceptor);
487                         config.certificate_chain = serialized_acceptor.certificate_chain();
488                         config.private_key = serialized_acceptor.private_key();
489                         deserialized_acceptors.insert(make_pair(
490                                 config,
491                                 new Acceptor(serialized_acceptor)));
492                 }
493
494                 log(INFO, "Deserialization done.");
495         }
496
497         // Add any new inputs coming from the config.
498         create_config_inputs(config, &inputs);
499         
500         // Find all streams in the configuration file, create them, and connect to the inputs.
501         create_streams(config, deserialized_urls, &inputs);
502         vector<Acceptor *> acceptors = create_acceptors(config, &deserialized_acceptors);
503
504         // Make all the servers create TLS contexts for every TLS keypair we have.
505         for (Acceptor *acceptor : acceptors) {
506                 if (acceptor->is_tls()) {
507                         servers->create_tls_context_for_acceptor(acceptor);
508                 }
509         }
510
511         // Allocate strings for the short responses.
512         vector<shared_ptr<const string>> short_response_pool;
513         for (const ShortResponsePool &str : loaded_state.short_response_pool()) {
514                 short_response_pool.emplace_back(new string(str.header_or_short_response()));
515         }
516         
517         // Put back the existing clients. It doesn't matter which server we
518         // allocate them to, so just do round-robin. However, we need to sort them
519         // by connection time first, since add_client_serialized() expects that.
520         sort(loaded_state.mutable_clients()->begin(),
521              loaded_state.mutable_clients()->end(),
522              OrderByConnectionTime());
523         for (int i = 0; i < loaded_state.clients_size(); ++i) {
524                 if (!loaded_state.clients(i).url().empty() &&
525                     all_urls.count(loaded_state.clients(i).url()) == 0) {
526                         // Belongs to a dead stream (not keepalive), so we just have to close.
527                         safe_close(loaded_state.clients(i).sock());
528                 } else {
529                         servers->add_client_from_serialized(loaded_state.clients(i), short_response_pool);
530                 }
531         }
532         
533         short_response_pool.clear();  // No longer needed; the clients have their own refcounts now.
534
535         // Put back the HLS zombies. There's no really good allocation here
536         // except round-robin; it would be marginally more efficient to match it
537         // to the client (since that would have them deleted immediately when
538         // the client requests the next fragment, instead of being later weeded
539         // out during statistics collection), but it's not a big deal.
540         for (const HLSZombieProto &zombie_proto : loaded_state.hls_zombies()) {
541                 servers->add_hls_zombie_from_serialized(zombie_proto);
542         }
543
544         servers->run();
545
546         // Now delete all inputs that are longer in use, and start the others.
547         for (auto input_it = inputs.begin(); input_it != inputs.end(); ) {
548                 if (input_it->second.refcount == 0) {
549                         if (input_it->first.second == Input::INPUT_ENCODING_RAW) {
550                                 log(WARNING, "Raw input '%s' no longer in use, closing.",
551                                     input_it->first.first.c_str());
552                         } else {
553                                 assert(input_it->first.second == Input::INPUT_ENCODING_METACUBE);
554                                 log(WARNING, "Metacube input '%s' no longer in use, closing.",
555                                     input_it->first.first.c_str());
556                         }
557                         input_it->second.input->close_socket();
558                         delete input_it->second.input;
559                         inputs.erase(input_it++);
560                 } else {
561                         input_it->second.input->run();
562                         ++input_it;
563                 }
564         }
565
566         // Start writing statistics.
567         unique_ptr<StatsThread> stats_thread;
568         if (!config.stats_file.empty()) {
569                 stats_thread.reset(new StatsThread(config.stats_file, config.stats_interval));
570                 stats_thread->run();
571         }
572
573         unique_ptr<InputStatsThread> input_stats_thread;
574         if (!config.input_stats_file.empty()) {
575                 vector<Input*> inputs_no_refcount;
576                 for (const auto &key_and_input_with_refcount : inputs) {
577                         inputs_no_refcount.push_back(key_and_input_with_refcount.second.input);
578                 }
579
580                 input_stats_thread.reset(new InputStatsThread(config.input_stats_file, config.input_stats_interval, inputs_no_refcount));
581                 input_stats_thread->run();
582         }
583
584         timespec server_start;
585         int err = clock_gettime(CLOCK_MONOTONIC, &server_start);
586         assert(err != -1);
587         if (state_fd != -1) {
588                 // Measure time from we started deserializing (below) to now, when basically everything
589                 // is up and running. This is, in other words, a conservative estimate of how long our
590                 // “glitch” period was, not counting of course reconnects if the configuration changed.
591                 double glitch_time = server_start.tv_sec - serialize_start.tv_sec +
592                         1e-9 * (server_start.tv_nsec - serialize_start.tv_nsec);
593                 log(INFO, "Re-exec happened in approx. %.0f ms.", glitch_time * 1000.0);
594         }
595
596         sd_notify(0, "READY=1");
597
598         while (!hupped) {
599                 usleep(100000);
600         }
601
602         if (stopped) {
603                 sd_notify(0, "STOPPING=1");
604         } else {
605                 sd_notify(0, "RELOADING=1");
606         }
607
608         // OK, we've been HUPed. Time to shut down everything, serialize, and re-exec.
609         err = clock_gettime(CLOCK_MONOTONIC, &serialize_start);
610         assert(err != -1);
611
612         if (input_stats_thread != nullptr) {
613                 input_stats_thread->stop();
614                 input_stats_thread.reset();
615         }
616         if (stats_thread != nullptr) {
617                 stats_thread->stop();
618                 stats_thread.reset();
619         }
620         for (Acceptor *acceptor : acceptors) {
621                 acceptor->stop();
622         }
623         for (const auto &key_and_input_with_refcount : inputs) {
624                 key_and_input_with_refcount.second.input->stop();
625         }
626         servers->stop();
627
628         CubemapStateProto state;
629         if (stopped) {
630                 log(INFO, "Shutting down.");
631         } else {
632                 log(INFO, "Serializing state and re-execing...");
633                 state = collect_state(
634                         serialize_start, acceptors, inputs, servers);
635                 string serialized;
636                 state.SerializeToString(&serialized);
637                 state_fd = make_tempfile(serialized);
638                 if (state_fd == -1) {
639                         exit(1);
640                 }
641         }
642         delete servers;
643
644         access_log->stop();
645         delete access_log;
646         shut_down_logging();
647
648         if (stopped) {
649                 exit(0);
650         }
651
652         // OK, so the signal was SIGHUP. Check that the new config is okay, then exec the new binary.
653         if (!dry_run_config(argv0_canon, config_filename_canon)) {
654                 open_logs(config.log_destinations);
655                 log(ERROR, "%s --test-config failed. Restarting old version instead of new.", argv[0]);
656                 hupped = false;
657                 shut_down_logging();
658                 goto start;
659         }
660          
661         char buf[16];
662         sprintf(buf, "%d", state_fd);
663
664         for ( ;; ) {
665                 execlp(argv0_canon, argv0_canon, config_filename_canon, "--state", buf, nullptr);
666                 open_logs(config.log_destinations);
667                 log_perror("execlp");
668                 log(ERROR, "re-exec of %s failed. Waiting 0.2 seconds and trying again...", argv0_canon);
669                 shut_down_logging();
670                 usleep(200000);
671         }
672 }