]> git.sesse.net Git - cubemap/blob - main.cpp
Fix some signed/unsigned comparison warnings (pacing_rate should be uint32_t everywhe...
[cubemap] / main.cpp
1 #include <assert.h>
2 #include <errno.h>
3 #include <getopt.h>
4 #include <limits.h>
5 #include <signal.h>
6 #include <stddef.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <sys/time.h>
11 #include <sys/wait.h>
12 #include <unistd.h>
13 #include <map>
14 #include <set>
15 #include <string>
16 #include <utility>
17 #include <vector>
18
19 #include "acceptor.h"
20 #include "accesslog.h"
21 #include "config.h"
22 #include "input.h"
23 #include "input_stats.h"
24 #include "log.h"
25 #include "markpool.h"
26 #include "sa_compare.h"
27 #include "serverpool.h"
28 #include "state.pb.h"
29 #include "stats.h"
30 #include "stream.h"
31 #include "util.h"
32 #include "version.h"
33
34 using namespace std;
35
36 AccessLogThread *access_log = NULL;
37 ServerPool *servers = NULL;
38 vector<MarkPool *> mark_pools;
39 volatile bool hupped = false;
40 volatile bool stopped = false;
41
42 struct InputWithRefcount {
43         Input *input;
44         int refcount;
45 };
46
47 void hup(int signum)
48 {
49         hupped = true;
50         if (signum == SIGINT) {
51                 stopped = true;
52         }
53 }
54
55 void do_nothing(int signum)
56 {
57 }
58
59 CubemapStateProto collect_state(const timeval &serialize_start,
60                                 const vector<Acceptor *> acceptors,
61                                 const multimap<string, InputWithRefcount> inputs,
62                                 ServerPool *servers)
63 {
64         CubemapStateProto state = servers->serialize();  // Fills streams() and clients().
65         state.set_serialize_start_sec(serialize_start.tv_sec);
66         state.set_serialize_start_usec(serialize_start.tv_usec);
67         
68         for (size_t i = 0; i < acceptors.size(); ++i) {
69                 state.add_acceptors()->MergeFrom(acceptors[i]->serialize());
70         }
71
72         for (multimap<string, InputWithRefcount>::const_iterator input_it = inputs.begin();
73              input_it != inputs.end();
74              ++input_it) {
75                 state.add_inputs()->MergeFrom(input_it->second.input->serialize());
76         }
77
78         return state;
79 }
80
81 // Find all port statements in the configuration file, and create acceptors for htem.
82 vector<Acceptor *> create_acceptors(
83         const Config &config,
84         map<sockaddr_in6, Acceptor *, Sockaddr6Compare> *deserialized_acceptors)
85 {
86         vector<Acceptor *> acceptors;
87         for (unsigned i = 0; i < config.acceptors.size(); ++i) {
88                 const AcceptorConfig &acceptor_config = config.acceptors[i];
89                 Acceptor *acceptor = NULL;
90                 map<sockaddr_in6, Acceptor *, Sockaddr6Compare>::iterator deserialized_acceptor_it =
91                         deserialized_acceptors->find(acceptor_config.addr);
92                 if (deserialized_acceptor_it != deserialized_acceptors->end()) {
93                         acceptor = deserialized_acceptor_it->second;
94                         deserialized_acceptors->erase(deserialized_acceptor_it);
95                 } else {
96                         int server_sock = create_server_socket(acceptor_config.addr, TCP_SOCKET);
97                         acceptor = new Acceptor(server_sock, acceptor_config.addr);
98                 }
99                 acceptor->run();
100                 acceptors.push_back(acceptor);
101         }
102
103         // Close all acceptors that are no longer in the configuration file.
104         for (map<sockaddr_in6, Acceptor *, Sockaddr6Compare>::iterator
105                  acceptor_it = deserialized_acceptors->begin();
106              acceptor_it != deserialized_acceptors->end();
107              ++acceptor_it) {
108                 acceptor_it->second->close_socket();
109                 delete acceptor_it->second;
110         }
111
112         return acceptors;
113 }
114
115 void create_config_input(const string &src, multimap<string, InputWithRefcount> *inputs)
116 {
117         if (src.empty()) {
118                 return;
119         }
120         if (inputs->count(src) != 0) {
121                 return;
122         }
123
124         InputWithRefcount iwr;
125         iwr.input = create_input(src);
126         if (iwr.input == NULL) {
127                 log(ERROR, "did not understand URL '%s', clients will not get any data.",
128                         src.c_str());
129                 return;
130         }
131         iwr.refcount = 0;
132         inputs->insert(make_pair(src, iwr));
133 }
134
135 // Find all streams in the configuration file, and create inputs for them.
136 void create_config_inputs(const Config &config, multimap<string, InputWithRefcount> *inputs)
137 {
138         for (unsigned i = 0; i < config.streams.size(); ++i) {
139                 const StreamConfig &stream_config = config.streams[i];
140                 if (stream_config.src != "delete") {
141                         create_config_input(stream_config.src, inputs);
142                 }
143         }
144         for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
145                 const UDPStreamConfig &udpstream_config = config.udpstreams[i];
146                 create_config_input(udpstream_config.src, inputs);
147         }
148 }
149
150 void create_streams(const Config &config,
151                     const set<string> &deserialized_urls,
152                     multimap<string, InputWithRefcount> *inputs)
153 {
154         for (unsigned i = 0; i < config.mark_pools.size(); ++i) {
155                 const MarkPoolConfig &mp_config = config.mark_pools[i];
156                 mark_pools.push_back(new MarkPool(mp_config.from, mp_config.to));
157         }
158
159         // HTTP streams.
160         set<string> expecting_urls = deserialized_urls;
161         for (unsigned i = 0; i < config.streams.size(); ++i) {
162                 const StreamConfig &stream_config = config.streams[i];
163                 int stream_index;
164
165                 expecting_urls.erase(stream_config.url);
166
167                 // Special-case deleted streams; they were never deserialized in the first place,
168                 // so just ignore them.
169                 if (stream_config.src == "delete") {
170                         continue;
171                 }
172
173                 if (deserialized_urls.count(stream_config.url) == 0) {
174                         stream_index = servers->add_stream(stream_config.url,
175                                                            stream_config.backlog_size,
176                                                            Stream::Encoding(stream_config.encoding));
177                 } else {
178                         stream_index = servers->lookup_stream_by_url(stream_config.url);
179                         assert(stream_index != -1);
180                         servers->set_backlog_size(stream_index, stream_config.backlog_size);
181                         servers->set_encoding(stream_index,
182                                               Stream::Encoding(stream_config.encoding));
183                 }
184
185                 if (stream_config.mark_pool != -1) {
186                         servers->set_mark_pool(stream_index, mark_pools[stream_config.mark_pool]);
187                 }
188
189                 servers->set_pacing_rate(stream_index, stream_config.pacing_rate);
190
191                 string src = stream_config.src;
192                 if (!src.empty()) {
193                         multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
194                         if (input_it != inputs->end()) {
195                                 input_it->second.input->add_destination(stream_index);
196                                 ++input_it->second.refcount;
197                         }
198                 }
199         }
200
201         // Warn about any streams servers we've lost.
202         for (set<string>::const_iterator stream_it = expecting_urls.begin();
203              stream_it != expecting_urls.end();
204              ++stream_it) {
205                 string url = *stream_it;
206                 log(WARNING, "stream '%s' disappeared from the configuration file. "
207                              "It will not be deleted, but clients will not get any new inputs. "
208                              "If you really meant to delete it, set src=delete and reload.",
209                              url.c_str());
210         }
211
212         // UDP streams.
213         for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
214                 const UDPStreamConfig &udpstream_config = config.udpstreams[i];
215                 MarkPool *mark_pool = NULL;
216                 if (udpstream_config.mark_pool != -1) {
217                         mark_pool = mark_pools[udpstream_config.mark_pool];
218                 }
219                 int stream_index = servers->add_udpstream(udpstream_config.dst, mark_pool, udpstream_config.pacing_rate);
220
221                 string src = udpstream_config.src;
222                 if (!src.empty()) {
223                         multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
224                         assert(input_it != inputs->end());
225                         input_it->second.input->add_destination(stream_index);
226                         ++input_it->second.refcount;
227                 }
228         }
229 }
230         
231 void open_logs(const vector<LogConfig> &log_destinations)
232 {
233         for (size_t i = 0; i < log_destinations.size(); ++i) {
234                 if (log_destinations[i].type == LogConfig::LOG_TYPE_FILE) {
235                         add_log_destination_file(log_destinations[i].filename);
236                 } else if (log_destinations[i].type == LogConfig::LOG_TYPE_CONSOLE) {
237                         add_log_destination_console();
238                 } else if (log_destinations[i].type == LogConfig::LOG_TYPE_SYSLOG) {
239                         add_log_destination_syslog();
240                 } else {
241                         assert(false);
242                 }
243         }
244         start_logging();
245 }
246         
247 bool dry_run_config(const std::string &argv0, const std::string &config_filename)
248 {
249         char *argv0_copy = strdup(argv0.c_str());
250         char *config_filename_copy = strdup(config_filename.c_str());
251
252         pid_t pid = fork();
253         switch (pid) {
254         case -1:
255                 log_perror("fork()");
256                 free(argv0_copy);
257                 free(config_filename_copy);
258                 return false;
259         case 0:
260                 // Child.
261                 execlp(argv0_copy, argv0_copy, "--test-config", config_filename_copy, NULL);
262                 log_perror(argv0_copy);
263                 _exit(1);
264         default:
265                 // Parent.
266                 break;
267         }
268                 
269         free(argv0_copy);
270         free(config_filename_copy);
271
272         int status;
273         pid_t err;
274         do {
275                 err = waitpid(pid, &status, 0);
276         } while (err == -1 && errno == EINTR);
277
278         if (err == -1) {
279                 log_perror("waitpid()");
280                 return false;
281         }       
282
283         return (WIFEXITED(status) && WEXITSTATUS(status) == 0);
284 }
285
286 void find_deleted_streams(const Config &config, set<string> *deleted_urls)
287 {
288         for (unsigned i = 0; i < config.streams.size(); ++i) {
289                 const StreamConfig &stream_config = config.streams[i];
290                 if (stream_config.src == "delete") {
291                         log(INFO, "Deleting stream '%s'.", stream_config.url.c_str());
292                         deleted_urls->insert(stream_config.url);
293                 }
294         }
295 }
296
297 int main(int argc, char **argv)
298 {
299         signal(SIGHUP, hup);
300         signal(SIGINT, hup);
301         signal(SIGUSR1, do_nothing);  // Used in internal signalling.
302         signal(SIGPIPE, SIG_IGN);
303         
304         // Parse options.
305         int state_fd = -1;
306         bool test_config = false;
307         for ( ;; ) {
308                 static const option long_options[] = {
309                         { "state", required_argument, 0, 's' },
310                         { "test-config", no_argument, 0, 't' },
311                         { 0, 0, 0, 0 }
312                 };
313                 int option_index = 0;
314                 int c = getopt_long(argc, argv, "s:t", long_options, &option_index);
315      
316                 if (c == -1) {
317                         break;
318                 }
319                 switch (c) {
320                 case 's':
321                         state_fd = atoi(optarg);
322                         break;
323                 case 't':
324                         test_config = true;
325                         break;
326                 default:
327                         fprintf(stderr, "Unknown option '%s'\n", argv[option_index]);
328                         exit(1);
329                 }
330         }
331
332         string config_filename = "cubemap.config";
333         if (optind < argc) {
334                 config_filename = argv[optind++];
335         }
336
337         // Canonicalize argv[0] and config_filename.
338         char argv0_canon[PATH_MAX];
339         char config_filename_canon[PATH_MAX];
340
341         if (realpath(argv[0], argv0_canon) == NULL) {
342                 log_perror(argv[0]);
343                 exit(1);
344         }
345         if (realpath(config_filename.c_str(), config_filename_canon) == NULL) {
346                 log_perror(config_filename.c_str());
347                 exit(1);
348         }
349
350         // Now parse the configuration file.
351         Config config;
352         if (!parse_config(config_filename_canon, &config)) {
353                 exit(1);
354         }
355         if (test_config) {
356                 exit(0);
357         }
358         
359         // Ideally we'd like to daemonize only when we've started up all threads etc.,
360         // but daemon() forks, which is not good in multithreaded software, so we'll
361         // have to do it here.
362         if (config.daemonize) {
363                 if (daemon(0, 0) == -1) {
364                         log_perror("daemon");
365                         exit(1);
366                 }
367         }
368
369 start:
370         // Open logs as soon as possible.
371         open_logs(config.log_destinations);
372
373         log(INFO, "Cubemap " SERVER_VERSION " starting.");
374         if (config.access_log_file.empty()) {
375                 // Create a dummy logger.
376                 access_log = new AccessLogThread();
377         } else {
378                 access_log = new AccessLogThread(config.access_log_file);
379         }
380         access_log->run();
381
382         servers = new ServerPool(config.num_servers);
383
384         // Find all the streams that are to be deleted.
385         set<string> deleted_urls;
386         find_deleted_streams(config, &deleted_urls);
387
388         CubemapStateProto loaded_state;
389         struct timeval serialize_start;
390         set<string> deserialized_urls;
391         map<sockaddr_in6, Acceptor *, Sockaddr6Compare> deserialized_acceptors;
392         multimap<string, InputWithRefcount> inputs;  // multimap due to older versions without deduplication.
393         if (state_fd != -1) {
394                 log(INFO, "Deserializing state from previous process...");
395                 string serialized;
396                 if (!read_tempfile(state_fd, &serialized)) {
397                         exit(1);
398                 }
399                 if (!loaded_state.ParseFromString(serialized)) {
400                         log(ERROR, "Failed deserialization of state.");
401                         exit(1);
402                 }
403
404                 serialize_start.tv_sec = loaded_state.serialize_start_sec();
405                 serialize_start.tv_usec = loaded_state.serialize_start_usec();
406
407                 // Deserialize the streams.
408                 map<string, string> stream_headers_for_url;  // See below.
409                 for (int i = 0; i < loaded_state.streams_size(); ++i) {
410                         const StreamProto &stream = loaded_state.streams(i);
411
412                         if (deleted_urls.count(stream.url()) != 0) {
413                                 // Delete the stream backlogs.
414                                 for (int j = 0; j < stream.data_fds_size(); ++j) {
415                                         safe_close(stream.data_fds(j));
416                                 }
417                         } else {
418                                 vector<int> data_fds;
419                                 for (int j = 0; j < stream.data_fds_size(); ++j) {
420                                         data_fds.push_back(stream.data_fds(j));
421                                 }
422
423                                 // Older versions stored the data once in the protobuf instead of
424                                 // sending around file descriptors.
425                                 if (data_fds.empty() && stream.has_data()) {
426                                         data_fds.push_back(make_tempfile(stream.data()));
427                                 }
428
429                                 servers->add_stream_from_serialized(stream, data_fds);
430                                 deserialized_urls.insert(stream.url());
431
432                                 stream_headers_for_url.insert(make_pair(stream.url(), stream.stream_header()));
433                         }
434                 }
435
436                 // Deserialize the inputs. Note that we don't actually add them to any stream yet.
437                 for (int i = 0; i < loaded_state.inputs_size(); ++i) {
438                         InputProto serialized_input = loaded_state.inputs(i);
439
440                         // Older versions did not store the stream header in the input,
441                         // only in each stream. We need to have the stream header in the
442                         // input as well, in case we create a new stream reusing the same input.
443                         // Thus, we put it into place here if it's missing.
444                         if (!serialized_input.has_stream_header() &&
445                             stream_headers_for_url.count(serialized_input.url()) != 0) {
446                                 serialized_input.set_stream_header(stream_headers_for_url[serialized_input.url()]);
447                         }
448
449                         InputWithRefcount iwr;
450                         iwr.input = create_input(serialized_input);
451                         iwr.refcount = 0;
452                         inputs.insert(make_pair(serialized_input.url(), iwr));
453                 } 
454
455                 // Deserialize the acceptors.
456                 for (int i = 0; i < loaded_state.acceptors_size(); ++i) {
457                         sockaddr_in6 sin6 = ExtractAddressFromAcceptorProto(loaded_state.acceptors(i));
458                         deserialized_acceptors.insert(make_pair(
459                                 sin6,
460                                 new Acceptor(loaded_state.acceptors(i))));
461                 }
462
463                 log(INFO, "Deserialization done.");
464         }
465
466         // Add any new inputs coming from the config.
467         create_config_inputs(config, &inputs);
468         
469         // Find all streams in the configuration file, create them, and connect to the inputs.
470         create_streams(config, deserialized_urls, &inputs);
471         vector<Acceptor *> acceptors = create_acceptors(config, &deserialized_acceptors);
472         
473         // Put back the existing clients. It doesn't matter which server we
474         // allocate them to, so just do round-robin. However, we need to add
475         // them after the mark pools have been set up.
476         for (int i = 0; i < loaded_state.clients_size(); ++i) {
477                 if (deleted_urls.count(loaded_state.clients(i).url()) != 0) {
478                         safe_close(loaded_state.clients(i).sock());
479                 } else {
480                         servers->add_client_from_serialized(loaded_state.clients(i));
481                 }
482         }
483         
484         servers->run();
485
486         // Now delete all inputs that are longer in use, and start the others.
487         for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
488              input_it != inputs.end(); ) {
489                 if (input_it->second.refcount == 0) {
490                         log(WARNING, "Input '%s' no longer in use, closing.",
491                             input_it->first.c_str());
492                         input_it->second.input->close_socket();
493                         delete input_it->second.input;
494                         inputs.erase(input_it++);
495                 } else {
496                         input_it->second.input->run();
497                         ++input_it;
498                 }
499         }
500
501         // Start writing statistics.
502         StatsThread *stats_thread = NULL;
503         if (!config.stats_file.empty()) {
504                 stats_thread = new StatsThread(config.stats_file, config.stats_interval);
505                 stats_thread->run();
506         }
507
508         InputStatsThread *input_stats_thread = NULL;
509         if (!config.input_stats_file.empty()) {
510                 vector<Input*> inputs_no_refcount;
511                 for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
512                      input_it != inputs.end(); ++input_it) {
513                         inputs_no_refcount.push_back(input_it->second.input);
514                 }
515
516                 input_stats_thread = new InputStatsThread(config.input_stats_file, config.input_stats_interval, inputs_no_refcount);
517                 input_stats_thread->run();
518         }
519
520         struct timeval server_start;
521         gettimeofday(&server_start, NULL);
522         if (state_fd != -1) {
523                 // Measure time from we started deserializing (below) to now, when basically everything
524                 // is up and running. This is, in other words, a conservative estimate of how long our
525                 // “glitch” period was, not counting of course reconnects if the configuration changed.
526                 double glitch_time = server_start.tv_sec - serialize_start.tv_sec +
527                         1e-6 * (server_start.tv_usec - serialize_start.tv_usec);
528                 log(INFO, "Re-exec happened in approx. %.0f ms.", glitch_time * 1000.0);
529         }
530
531         while (!hupped) {
532                 usleep(100000);
533         }
534
535         // OK, we've been HUPed. Time to shut down everything, serialize, and re-exec.
536         gettimeofday(&serialize_start, NULL);
537
538         if (input_stats_thread != NULL) {
539                 input_stats_thread->stop();
540                 delete input_stats_thread;
541         }
542         if (stats_thread != NULL) {
543                 stats_thread->stop();
544                 delete stats_thread;
545         }
546         for (size_t i = 0; i < acceptors.size(); ++i) {
547                 acceptors[i]->stop();
548         }
549         for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
550              input_it != inputs.end();
551              ++input_it) {
552                 input_it->second.input->stop();
553         }
554         servers->stop();
555
556         CubemapStateProto state;
557         if (stopped) {
558                 log(INFO, "Shutting down.");
559         } else {
560                 log(INFO, "Serializing state and re-execing...");
561                 state = collect_state(
562                         serialize_start, acceptors, inputs, servers);
563                 string serialized;
564                 state.SerializeToString(&serialized);
565                 state_fd = make_tempfile(serialized);
566                 if (state_fd == -1) {
567                         exit(1);
568                 }
569         }
570         delete servers;
571
572         for (unsigned i = 0; i < mark_pools.size(); ++i) {
573                 delete mark_pools[i];
574         }
575         mark_pools.clear();
576
577         access_log->stop();
578         delete access_log;
579         shut_down_logging();
580
581         if (stopped) {
582                 exit(0);
583         }
584
585         // OK, so the signal was SIGHUP. Check that the new config is okay, then exec the new binary.
586         if (!dry_run_config(argv0_canon, config_filename_canon)) {
587                 open_logs(config.log_destinations);
588                 log(ERROR, "%s --test-config failed. Restarting old version instead of new.", argv[0]);
589                 hupped = false;
590                 shut_down_logging();
591                 goto start;
592         }
593          
594         char buf[16];
595         sprintf(buf, "%d", state_fd);
596
597         for ( ;; ) {
598                 execlp(argv0_canon, argv0_canon, config_filename_canon, "--state", buf, NULL);
599                 open_logs(config.log_destinations);
600                 log_perror("execlp");
601                 log(ERROR, "re-exec of %s failed. Waiting 0.2 seconds and trying again...", argv0_canon);
602                 shut_down_logging();
603                 usleep(200000);
604         }
605 }