]> git.sesse.net Git - cubemap/blob - main.cpp
When adding new streams that are copies of old streams, copy the HTTP header.
[cubemap] / main.cpp
1 #include <assert.h>
2 #include <errno.h>
3 #include <getopt.h>
4 #include <limits.h>
5 #include <signal.h>
6 #include <stddef.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <sys/time.h>
11 #include <sys/wait.h>
12 #include <unistd.h>
13 #include <map>
14 #include <set>
15 #include <string>
16 #include <utility>
17 #include <vector>
18
19 #include "acceptor.h"
20 #include "accesslog.h"
21 #include "config.h"
22 #include "input.h"
23 #include "input_stats.h"
24 #include "log.h"
25 #include "markpool.h"
26 #include "serverpool.h"
27 #include "state.pb.h"
28 #include "stats.h"
29 #include "stream.h"
30 #include "util.h"
31 #include "version.h"
32
33 using namespace std;
34
35 AccessLogThread *access_log = NULL;
36 ServerPool *servers = NULL;
37 vector<MarkPool *> mark_pools;
38 volatile bool hupped = false;
39 volatile bool stopped = false;
40
41 struct InputWithRefcount {
42         Input *input;
43         int refcount;
44 };
45
46 void hup(int signum)
47 {
48         hupped = true;
49         if (signum == SIGINT) {
50                 stopped = true;
51         }
52 }
53
54 void do_nothing(int signum)
55 {
56 }
57
58 CubemapStateProto collect_state(const timeval &serialize_start,
59                                 const vector<Acceptor *> acceptors,
60                                 const multimap<string, InputWithRefcount> inputs,
61                                 ServerPool *servers)
62 {
63         CubemapStateProto state = servers->serialize();  // Fills streams() and clients().
64         state.set_serialize_start_sec(serialize_start.tv_sec);
65         state.set_serialize_start_usec(serialize_start.tv_usec);
66         
67         for (size_t i = 0; i < acceptors.size(); ++i) {
68                 state.add_acceptors()->MergeFrom(acceptors[i]->serialize());
69         }
70
71         for (multimap<string, InputWithRefcount>::const_iterator input_it = inputs.begin();
72              input_it != inputs.end();
73              ++input_it) {
74                 state.add_inputs()->MergeFrom(input_it->second.input->serialize());
75         }
76
77         return state;
78 }
79
80 // Find all port statements in the configuration file, and create acceptors for htem.
81 vector<Acceptor *> create_acceptors(
82         const Config &config,
83         map<int, Acceptor *> *deserialized_acceptors)
84 {
85         vector<Acceptor *> acceptors;
86         for (unsigned i = 0; i < config.acceptors.size(); ++i) {
87                 const AcceptorConfig &acceptor_config = config.acceptors[i];
88                 Acceptor *acceptor = NULL;
89                 map<int, Acceptor *>::iterator deserialized_acceptor_it =
90                         deserialized_acceptors->find(acceptor_config.port);
91                 if (deserialized_acceptor_it != deserialized_acceptors->end()) {
92                         acceptor = deserialized_acceptor_it->second;
93                         deserialized_acceptors->erase(deserialized_acceptor_it);
94                 } else {
95                         int server_sock = create_server_socket(acceptor_config.port, TCP_SOCKET);
96                         acceptor = new Acceptor(server_sock, acceptor_config.port);
97                 }
98                 acceptor->run();
99                 acceptors.push_back(acceptor);
100         }
101
102         // Close all acceptors that are no longer in the configuration file.
103         for (map<int, Acceptor *>::iterator acceptor_it = deserialized_acceptors->begin();
104              acceptor_it != deserialized_acceptors->end();
105              ++acceptor_it) {
106                 acceptor_it->second->close_socket();
107                 delete acceptor_it->second;
108         }
109
110         return acceptors;
111 }
112
113 void create_config_input(const string &src, multimap<string, InputWithRefcount> *inputs)
114 {
115         if (src.empty()) {
116                 return;
117         }
118         if (inputs->count(src) != 0) {
119                 return;
120         }
121
122         InputWithRefcount iwr;
123         iwr.input = create_input(src);
124         if (iwr.input == NULL) {
125                 log(ERROR, "did not understand URL '%s', clients will not get any data.",
126                         src.c_str());
127                 return;
128         }
129         iwr.refcount = 0;
130         inputs->insert(make_pair(src, iwr));
131 }
132
133 // Find all streams in the configuration file, and create inputs for them.
134 void create_config_inputs(const Config &config, multimap<string, InputWithRefcount> *inputs)
135 {
136         for (unsigned i = 0; i < config.streams.size(); ++i) {
137                 const StreamConfig &stream_config = config.streams[i];
138                 if (stream_config.src != "delete") {
139                         create_config_input(stream_config.src, inputs);
140                 }
141         }
142         for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
143                 const UDPStreamConfig &udpstream_config = config.udpstreams[i];
144                 create_config_input(udpstream_config.src, inputs);
145         }
146 }
147
148 void create_streams(const Config &config,
149                     const set<string> &deserialized_urls,
150                     multimap<string, InputWithRefcount> *inputs)
151 {
152         for (unsigned i = 0; i < config.mark_pools.size(); ++i) {
153                 const MarkPoolConfig &mp_config = config.mark_pools[i];
154                 mark_pools.push_back(new MarkPool(mp_config.from, mp_config.to));
155         }
156
157         // HTTP streams.
158         set<string> expecting_urls = deserialized_urls;
159         for (unsigned i = 0; i < config.streams.size(); ++i) {
160                 const StreamConfig &stream_config = config.streams[i];
161                 int stream_index;
162
163                 expecting_urls.erase(stream_config.url);
164
165                 // Special-case deleted streams; they were never deserialized in the first place,
166                 // so just ignore them.
167                 if (stream_config.src == "delete") {
168                         continue;
169                 }
170
171                 if (deserialized_urls.count(stream_config.url) == 0) {
172                         stream_index = servers->add_stream(stream_config.url,
173                                                            stream_config.backlog_size,
174                                                            Stream::Encoding(stream_config.encoding));
175                 } else {
176                         stream_index = servers->lookup_stream_by_url(stream_config.url);
177                         assert(stream_index != -1);
178                         servers->set_backlog_size(stream_index, stream_config.backlog_size);
179                         servers->set_encoding(stream_index,
180                                               Stream::Encoding(stream_config.encoding));
181                 }
182
183                 if (stream_config.mark_pool != -1) {
184                         servers->set_mark_pool(stream_index, mark_pools[stream_config.mark_pool]);
185                 }
186
187                 string src = stream_config.src;
188                 if (!src.empty()) {
189                         multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
190                         if (input_it != inputs->end()) {
191                                 input_it->second.input->add_destination(stream_index);
192                                 ++input_it->second.refcount;
193                         }
194                 }
195         }
196
197         // Warn about any streams servers we've lost.
198         for (set<string>::const_iterator stream_it = expecting_urls.begin();
199              stream_it != expecting_urls.end();
200              ++stream_it) {
201                 string url = *stream_it;
202                 log(WARNING, "stream '%s' disappeared from the configuration file. "
203                              "It will not be deleted, but clients will not get any new inputs. "
204                              "If you really meant to delete it, set src=delete and reload.",
205                              url.c_str());
206         }
207
208         // UDP streams.
209         for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
210                 const UDPStreamConfig &udpstream_config = config.udpstreams[i];
211                 MarkPool *mark_pool = NULL;
212                 if (udpstream_config.mark_pool != -1) {
213                         mark_pool = mark_pools[udpstream_config.mark_pool];
214                 }
215                 int stream_index = servers->add_udpstream(udpstream_config.dst, mark_pool);
216
217                 string src = udpstream_config.src;
218                 if (!src.empty()) {
219                         multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
220                         assert(input_it != inputs->end());
221                         input_it->second.input->add_destination(stream_index);
222                         ++input_it->second.refcount;
223                 }
224         }
225 }
226         
227 void open_logs(const vector<LogConfig> &log_destinations)
228 {
229         for (size_t i = 0; i < log_destinations.size(); ++i) {
230                 if (log_destinations[i].type == LogConfig::LOG_TYPE_FILE) {
231                         add_log_destination_file(log_destinations[i].filename);
232                 } else if (log_destinations[i].type == LogConfig::LOG_TYPE_CONSOLE) {
233                         add_log_destination_console();
234                 } else if (log_destinations[i].type == LogConfig::LOG_TYPE_SYSLOG) {
235                         add_log_destination_syslog();
236                 } else {
237                         assert(false);
238                 }
239         }
240         start_logging();
241 }
242         
243 bool dry_run_config(const std::string &argv0, const std::string &config_filename)
244 {
245         char *argv0_copy = strdup(argv0.c_str());
246         char *config_filename_copy = strdup(config_filename.c_str());
247
248         pid_t pid = fork();
249         switch (pid) {
250         case -1:
251                 log_perror("fork()");
252                 free(argv0_copy);
253                 free(config_filename_copy);
254                 return false;
255         case 0:
256                 // Child.
257                 execlp(argv0_copy, argv0_copy, "--test-config", config_filename_copy, NULL);
258                 log_perror(argv0_copy);
259                 _exit(1);
260         default:
261                 // Parent.
262                 break;
263         }
264                 
265         free(argv0_copy);
266         free(config_filename_copy);
267
268         int status;
269         pid_t err;
270         do {
271                 err = waitpid(pid, &status, 0);
272         } while (err == -1 && errno == EINTR);
273
274         if (err == -1) {
275                 log_perror("waitpid()");
276                 return false;
277         }       
278
279         return (WIFEXITED(status) && WEXITSTATUS(status) == 0);
280 }
281
282 void find_deleted_streams(const Config &config, set<string> *deleted_urls)
283 {
284         for (unsigned i = 0; i < config.streams.size(); ++i) {
285                 const StreamConfig &stream_config = config.streams[i];
286                 if (stream_config.src == "delete") {
287                         log(INFO, "Deleting stream '%s'.", stream_config.url.c_str());
288                         deleted_urls->insert(stream_config.url);
289                 }
290         }
291 }
292
293 int main(int argc, char **argv)
294 {
295         signal(SIGHUP, hup);
296         signal(SIGINT, hup);
297         signal(SIGUSR1, do_nothing);  // Used in internal signalling.
298         signal(SIGPIPE, SIG_IGN);
299         
300         // Parse options.
301         int state_fd = -1;
302         bool test_config = false;
303         for ( ;; ) {
304                 static const option long_options[] = {
305                         { "state", required_argument, 0, 's' },
306                         { "test-config", no_argument, 0, 't' },
307                         { 0, 0, 0, 0 }
308                 };
309                 int option_index = 0;
310                 int c = getopt_long(argc, argv, "s:t", long_options, &option_index);
311      
312                 if (c == -1) {
313                         break;
314                 }
315                 switch (c) {
316                 case 's':
317                         state_fd = atoi(optarg);
318                         break;
319                 case 't':
320                         test_config = true;
321                         break;
322                 default:
323                         fprintf(stderr, "Unknown option '%s'\n", argv[option_index]);
324                         exit(1);
325                 }
326         }
327
328         string config_filename = "cubemap.config";
329         if (optind < argc) {
330                 config_filename = argv[optind++];
331         }
332
333         // Canonicalize argv[0] and config_filename.
334         char argv0_canon[PATH_MAX];
335         char config_filename_canon[PATH_MAX];
336
337         if (realpath(argv[0], argv0_canon) == NULL) {
338                 log_perror(argv[0]);
339                 exit(1);
340         }
341         if (realpath(config_filename.c_str(), config_filename_canon) == NULL) {
342                 log_perror(config_filename.c_str());
343                 exit(1);
344         }
345
346         // Now parse the configuration file.
347         Config config;
348         if (!parse_config(config_filename_canon, &config)) {
349                 exit(1);
350         }
351         if (test_config) {
352                 exit(0);
353         }
354         
355         // Ideally we'd like to daemonize only when we've started up all threads etc.,
356         // but daemon() forks, which is not good in multithreaded software, so we'll
357         // have to do it here.
358         if (config.daemonize) {
359                 if (daemon(0, 0) == -1) {
360                         log_perror("daemon");
361                         exit(1);
362                 }
363         }
364
365 start:
366         // Open logs as soon as possible.
367         open_logs(config.log_destinations);
368
369         log(INFO, "Cubemap " SERVER_VERSION " starting.");
370         if (config.access_log_file.empty()) {
371                 // Create a dummy logger.
372                 access_log = new AccessLogThread();
373         } else {
374                 access_log = new AccessLogThread(config.access_log_file);
375         }
376         access_log->run();
377
378         servers = new ServerPool(config.num_servers);
379
380         // Find all the streams that are to be deleted.
381         set<string> deleted_urls;
382         find_deleted_streams(config, &deleted_urls);
383
384         CubemapStateProto loaded_state;
385         struct timeval serialize_start;
386         set<string> deserialized_urls;
387         map<int, Acceptor *> deserialized_acceptors;
388         multimap<string, InputWithRefcount> inputs;  // multimap due to older versions without deduplication.
389         if (state_fd != -1) {
390                 log(INFO, "Deserializing state from previous process...");
391                 string serialized;
392                 if (!read_tempfile(state_fd, &serialized)) {
393                         exit(1);
394                 }
395                 if (!loaded_state.ParseFromString(serialized)) {
396                         log(ERROR, "Failed deserialization of state.");
397                         exit(1);
398                 }
399
400                 serialize_start.tv_sec = loaded_state.serialize_start_sec();
401                 serialize_start.tv_usec = loaded_state.serialize_start_usec();
402
403                 // Deserialize the streams.
404                 for (int i = 0; i < loaded_state.streams_size(); ++i) {
405                         const StreamProto &stream = loaded_state.streams(i);
406
407                         if (deleted_urls.count(stream.url()) != 0) {
408                                 // Delete the stream backlogs.
409                                 for (int j = 0; j < stream.data_fds_size(); ++j) {
410                                         safe_close(stream.data_fds(j));
411                                 }
412                         } else {
413                                 vector<int> data_fds;
414                                 for (int j = 0; j < stream.data_fds_size(); ++j) {
415                                         data_fds.push_back(stream.data_fds(j));
416                                 }
417
418                                 // Older versions stored the data once in the protobuf instead of
419                                 // sending around file descriptors.
420                                 if (data_fds.empty() && stream.has_data()) {
421                                         data_fds.push_back(make_tempfile(stream.data()));
422                                 }
423
424                                 servers->add_stream_from_serialized(stream, data_fds);
425                                 deserialized_urls.insert(stream.url());
426                         }
427                 }
428
429                 // Deserialize the inputs. Note that we don't actually add them to any stream yet.
430                 for (int i = 0; i < loaded_state.inputs_size(); ++i) {
431                         InputWithRefcount iwr;
432                         iwr.input = create_input(loaded_state.inputs(i));
433                         iwr.refcount = 0;
434                         inputs.insert(make_pair(loaded_state.inputs(i).url(), iwr));
435                 } 
436
437                 // Deserialize the acceptors.
438                 for (int i = 0; i < loaded_state.acceptors_size(); ++i) {
439                         deserialized_acceptors.insert(make_pair(
440                                 loaded_state.acceptors(i).port(),
441                                 new Acceptor(loaded_state.acceptors(i))));
442                 }
443
444                 log(INFO, "Deserialization done.");
445         }
446
447         // Add any new inputs coming from the config.
448         create_config_inputs(config, &inputs);
449         
450         // Find all streams in the configuration file, create them, and connect to the inputs.
451         create_streams(config, deserialized_urls, &inputs);
452         vector<Acceptor *> acceptors = create_acceptors(config, &deserialized_acceptors);
453         
454         // Put back the existing clients. It doesn't matter which server we
455         // allocate them to, so just do round-robin. However, we need to add
456         // them after the mark pools have been set up.
457         for (int i = 0; i < loaded_state.clients_size(); ++i) {
458                 if (deleted_urls.count(loaded_state.clients(i).url()) != 0) {
459                         safe_close(loaded_state.clients(i).sock());
460                 } else {
461                         servers->add_client_from_serialized(loaded_state.clients(i));
462                 }
463         }
464         
465         servers->run();
466
467         // Now delete all inputs that are longer in use, and start the others.
468         for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
469              input_it != inputs.end(); ) {
470                 if (input_it->second.refcount == 0) {
471                         log(WARNING, "Input '%s' no longer in use, closing.",
472                             input_it->first.c_str());
473                         input_it->second.input->close_socket();
474                         delete input_it->second.input;
475                         inputs.erase(input_it++);
476                 } else {
477                         input_it->second.input->run();
478                         ++input_it;
479                 }
480         }
481
482         // Start writing statistics.
483         StatsThread *stats_thread = NULL;
484         if (!config.stats_file.empty()) {
485                 stats_thread = new StatsThread(config.stats_file, config.stats_interval);
486                 stats_thread->run();
487         }
488
489         InputStatsThread *input_stats_thread = NULL;
490         if (!config.input_stats_file.empty()) {
491                 vector<Input*> inputs_no_refcount;
492                 for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
493                      input_it != inputs.end(); ++input_it) {
494                         inputs_no_refcount.push_back(input_it->second.input);
495                 }
496
497                 input_stats_thread = new InputStatsThread(config.input_stats_file, config.input_stats_interval, inputs_no_refcount);
498                 input_stats_thread->run();
499         }
500
501         struct timeval server_start;
502         gettimeofday(&server_start, NULL);
503         if (state_fd != -1) {
504                 // Measure time from we started deserializing (below) to now, when basically everything
505                 // is up and running. This is, in other words, a conservative estimate of how long our
506                 // “glitch” period was, not counting of course reconnects if the configuration changed.
507                 double glitch_time = server_start.tv_sec - serialize_start.tv_sec +
508                         1e-6 * (server_start.tv_usec - serialize_start.tv_usec);
509                 log(INFO, "Re-exec happened in approx. %.0f ms.", glitch_time * 1000.0);
510         }
511
512         while (!hupped) {
513                 usleep(100000);
514         }
515
516         // OK, we've been HUPed. Time to shut down everything, serialize, and re-exec.
517         gettimeofday(&serialize_start, NULL);
518
519         if (input_stats_thread != NULL) {
520                 input_stats_thread->stop();
521                 delete input_stats_thread;
522         }
523         if (stats_thread != NULL) {
524                 stats_thread->stop();
525                 delete stats_thread;
526         }
527         for (size_t i = 0; i < acceptors.size(); ++i) {
528                 acceptors[i]->stop();
529         }
530         for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
531              input_it != inputs.end();
532              ++input_it) {
533                 input_it->second.input->stop();
534         }
535         servers->stop();
536
537         CubemapStateProto state;
538         if (stopped) {
539                 log(INFO, "Shutting down.");
540         } else {
541                 log(INFO, "Serializing state and re-execing...");
542                 state = collect_state(
543                         serialize_start, acceptors, inputs, servers);
544                 string serialized;
545                 state.SerializeToString(&serialized);
546                 state_fd = make_tempfile(serialized);
547                 if (state_fd == -1) {
548                         exit(1);
549                 }
550         }
551         delete servers;
552
553         for (unsigned i = 0; i < mark_pools.size(); ++i) {
554                 delete mark_pools[i];
555         }
556         mark_pools.clear();
557
558         access_log->stop();
559         delete access_log;
560         shut_down_logging();
561
562         if (stopped) {
563                 exit(0);
564         }
565
566         // OK, so the signal was SIGHUP. Check that the new config is okay, then exec the new binary.
567         if (!dry_run_config(argv0_canon, config_filename_canon)) {
568                 open_logs(config.log_destinations);
569                 log(ERROR, "%s --test-config failed. Restarting old version instead of new.", argv[0]);
570                 hupped = false;
571                 shut_down_logging();
572                 goto start;
573         }
574          
575         char buf[16];
576         sprintf(buf, "%d", state_fd);
577
578         for ( ;; ) {
579                 execlp(argv0_canon, argv0_canon, config_filename_canon, "--state", buf, NULL);
580                 open_logs(config.log_destinations);
581                 log_perror("execlp");
582                 log(ERROR, "re-exec of %s failed. Waiting 0.2 seconds and trying again...", argv0_canon);
583                 shut_down_logging();
584                 usleep(200000);
585         }
586 }