]> git.sesse.net Git - cubemap/blobdiff - main.cpp
Make an option to properly delete streams.
[cubemap] / main.cpp
index be3e8ebcce8e81ca462c34937b3068cdc12e24b1..3f36f32da35ab493c5ba987dccf18b751dc6b92d 100644 (file)
--- a/main.cpp
+++ b/main.cpp
 #include <utility>
 #include <vector>
 
-#include "accesslog.h"
 #include "acceptor.h"
+#include "accesslog.h"
 #include "config.h"
 #include "input.h"
+#include "input_stats.h"
 #include "log.h"
 #include "markpool.h"
 #include "serverpool.h"
 #include "state.pb.h"
 #include "stats.h"
+#include "stream.h"
 #include "util.h"
 #include "version.h"
 
@@ -32,9 +34,15 @@ using namespace std;
 
 AccessLogThread *access_log = NULL;
 ServerPool *servers = NULL;
+vector<MarkPool *> mark_pools;
 volatile bool hupped = false;
 volatile bool stopped = false;
 
+struct InputWithRefcount {
+       Input *input;
+       int refcount;
+};
+
 void hup(int signum)
 {
        hupped = true;
@@ -43,9 +51,13 @@ void hup(int signum)
        }
 }
 
+void do_nothing(int signum)
+{
+}
+
 CubemapStateProto collect_state(const timeval &serialize_start,
                                 const vector<Acceptor *> acceptors,
-                                const vector<Input *> inputs,
+                                const multimap<string, InputWithRefcount> inputs,
                                 ServerPool *servers)
 {
        CubemapStateProto state = servers->serialize();  // Fills streams() and clients().
@@ -56,8 +68,10 @@ CubemapStateProto collect_state(const timeval &serialize_start,
                state.add_acceptors()->MergeFrom(acceptors[i]->serialize());
        }
 
-       for (size_t i = 0; i < inputs.size(); ++i) {
-               state.add_inputs()->MergeFrom(inputs[i]->serialize());
+       for (multimap<string, InputWithRefcount>::const_iterator input_it = inputs.begin();
+            input_it != inputs.end();
+            ++input_it) {
+               state.add_inputs()->MergeFrom(input_it->second.input->serialize());
        }
 
        return state;
@@ -96,86 +110,116 @@ vector<Acceptor *> create_acceptors(
        return acceptors;
 }
 
+void create_config_input(const string &src, multimap<string, InputWithRefcount> *inputs)
+{
+       if (src.empty()) {
+               return;
+       }
+       if (inputs->count(src) != 0) {
+               return;
+       }
+
+       InputWithRefcount iwr;
+       iwr.input = create_input(src);
+       if (iwr.input == NULL) {
+               log(ERROR, "did not understand URL '%s', clients will not get any data.",
+                       src.c_str());
+               return;
+       }
+       iwr.refcount = 0;
+       inputs->insert(make_pair(src, iwr));
+}
+
 // Find all streams in the configuration file, and create inputs for them.
-vector<Input *> create_inputs(const Config &config,
-                              map<string, Input *> *deserialized_inputs)
+void create_config_inputs(const Config &config, multimap<string, InputWithRefcount> *inputs)
 {
-       vector<Input *> inputs;
        for (unsigned i = 0; i < config.streams.size(); ++i) {
                const StreamConfig &stream_config = config.streams[i];
-               if (stream_config.src.empty()) {
-                       continue;
+               if (stream_config.src != "delete") {
+                       create_config_input(stream_config.src, inputs);
                }
-
-               string stream_id = stream_config.stream_id;
-               string src = stream_config.src;
-
-               Input *input = NULL;
-               map<string, Input *>::iterator deserialized_input_it =
-                       deserialized_inputs->find(stream_id);
-               if (deserialized_input_it != deserialized_inputs->end()) {
-                       input = deserialized_input_it->second;
-                       if (input->get_url() != src) {
-                               log(INFO, "Stream '%s' has changed URL from '%s' to '%s', restarting input.",
-                                       stream_id.c_str(), input->get_url().c_str(), src.c_str());
-                               input->close_socket();
-                               delete input;
-                               input = NULL;
-                       }
-                       deserialized_inputs->erase(deserialized_input_it);
-               }
-               if (input == NULL) {
-                       input = create_input(stream_id, src);
-                       if (input == NULL) {
-                               log(ERROR, "did not understand URL '%s', clients will not get any data.",
-                                       src.c_str());
-                               continue;
-                       }
-               }
-               input->run();
-               inputs.push_back(input);
        }
-       return inputs;
+       for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
+               const UDPStreamConfig &udpstream_config = config.udpstreams[i];
+               create_config_input(udpstream_config.src, inputs);
+       }
 }
 
 void create_streams(const Config &config,
-                    const set<string> &deserialized_stream_ids,
-                   map<string, Input *> *deserialized_inputs)
+                    const set<string> &deserialized_urls,
+                    multimap<string, InputWithRefcount> *inputs)
 {
-       vector<MarkPool *> mark_pools;  // FIXME: leak
        for (unsigned i = 0; i < config.mark_pools.size(); ++i) {
                const MarkPoolConfig &mp_config = config.mark_pools[i];
                mark_pools.push_back(new MarkPool(mp_config.from, mp_config.to));
        }
 
-       set<string> expecting_stream_ids = deserialized_stream_ids;
+       // HTTP streams.
+       set<string> expecting_urls = deserialized_urls;
        for (unsigned i = 0; i < config.streams.size(); ++i) {
                const StreamConfig &stream_config = config.streams[i];
-               if (deserialized_stream_ids.count(stream_config.stream_id) == 0) {
-                       servers->add_stream(stream_config.stream_id, stream_config.backlog_size);
+               int stream_index;
+
+               expecting_urls.erase(stream_config.url);
+
+               // Special-case deleted streams; they were never deserialized in the first place,
+               // so just ignore them.
+               if (stream_config.src == "delete") {
+                       continue;
+               }
+
+               if (deserialized_urls.count(stream_config.url) == 0) {
+                       stream_index = servers->add_stream(stream_config.url,
+                                                          stream_config.backlog_size,
+                                                          Stream::Encoding(stream_config.encoding));
                } else {
-                       servers->set_backlog_size(stream_config.stream_id, stream_config.backlog_size);
+                       stream_index = servers->lookup_stream_by_url(stream_config.url);
+                       assert(stream_index != -1);
+                       servers->set_backlog_size(stream_index, stream_config.backlog_size);
+                       servers->set_encoding(stream_index,
+                                             Stream::Encoding(stream_config.encoding));
                }
-               expecting_stream_ids.erase(stream_config.stream_id);
 
                if (stream_config.mark_pool != -1) {
-                       servers->set_mark_pool(stream_config.stream_id,
-                                              mark_pools[stream_config.mark_pool]);
+                       servers->set_mark_pool(stream_index, mark_pools[stream_config.mark_pool]);
+               }
+
+               string src = stream_config.src;
+               if (!src.empty()) {
+                       multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
+                       if (input_it != inputs->end()) {
+                               input_it->second.input->add_destination(stream_index);
+                               ++input_it->second.refcount;
+                       }
                }
        }
 
-       // Warn about any servers we've lost.
-       // TODO: Make an option (delete=yes?) to actually shut down streams.
-       for (set<string>::const_iterator stream_it = expecting_stream_ids.begin();
-            stream_it != expecting_stream_ids.end();
+       // Warn about any streams servers we've lost.
+       for (set<string>::const_iterator stream_it = expecting_urls.begin();
+            stream_it != expecting_urls.end();
             ++stream_it) {
-               string stream_id = *stream_it;
+               string url = *stream_it;
                log(WARNING, "stream '%s' disappeared from the configuration file. "
-                            "It will not be deleted, but clients will not get any new inputs.",
-                            stream_id.c_str());
-               if (deserialized_inputs->count(stream_id) != 0) {
-                       delete (*deserialized_inputs)[stream_id];
-                       deserialized_inputs->erase(stream_id);
+                            "It will not be deleted, but clients will not get any new inputs. "
+                            "If you really meant to delete it, set src=delete and reload.",
+                            url.c_str());
+       }
+
+       // UDP streams.
+       for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
+               const UDPStreamConfig &udpstream_config = config.udpstreams[i];
+               MarkPool *mark_pool = NULL;
+               if (udpstream_config.mark_pool != -1) {
+                       mark_pool = mark_pools[udpstream_config.mark_pool];
+               }
+               int stream_index = servers->add_udpstream(udpstream_config.dst, mark_pool);
+
+               string src = udpstream_config.src;
+               if (!src.empty()) {
+                       multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
+                       assert(input_it != inputs->end());
+                       input_it->second.input->add_destination(stream_index);
+                       ++input_it->second.refcount;
                }
        }
 }
@@ -235,10 +279,22 @@ bool dry_run_config(const std::string &argv0, const std::string &config_filename
        return (WIFEXITED(status) && WEXITSTATUS(status) == 0);
 }
 
+void find_deleted_streams(const Config &config, set<string> *deleted_urls)
+{
+       for (unsigned i = 0; i < config.streams.size(); ++i) {
+               const StreamConfig &stream_config = config.streams[i];
+               if (stream_config.src == "delete") {
+                       log(INFO, "Deleting stream '%s'.", stream_config.url.c_str());
+                       deleted_urls->insert(stream_config.url);
+               }
+       }
+}
+
 int main(int argc, char **argv)
 {
        signal(SIGHUP, hup);
        signal(SIGINT, hup);
+       signal(SIGUSR1, do_nothing);  // Used in internal signalling.
        signal(SIGPIPE, SIG_IGN);
        
        // Parse options.
@@ -248,9 +304,10 @@ int main(int argc, char **argv)
                static const option long_options[] = {
                        { "state", required_argument, 0, 's' },
                        { "test-config", no_argument, 0, 't' },
+                       { 0, 0, 0, 0 }
                };
                int option_index = 0;
-               int c = getopt_long (argc, argv, "s:t", long_options, &option_index);
+               int c = getopt_long(argc, argv, "s:t", long_options, &option_index);
      
                if (c == -1) {
                        break;
@@ -263,7 +320,8 @@ int main(int argc, char **argv)
                        test_config = true;
                        break;
                default:
-                       assert(false);
+                       fprintf(stderr, "Unknown option '%s'\n", argv[option_index]);
+                       exit(1);
                }
        }
 
@@ -319,11 +377,15 @@ start:
 
        servers = new ServerPool(config.num_servers);
 
+       // Find all the streams that are to be deleted.
+       set<string> deleted_urls;
+       find_deleted_streams(config, &deleted_urls);
+
        CubemapStateProto loaded_state;
        struct timeval serialize_start;
-       set<string> deserialized_stream_ids;
-       map<string, Input *> deserialized_inputs;
+       set<string> deserialized_urls;
        map<int, Acceptor *> deserialized_acceptors;
+       multimap<string, InputWithRefcount> inputs;  // multimap due to older versions without deduplication.
        if (state_fd != -1) {
                log(INFO, "Deserializing state from previous process...");
                string serialized;
@@ -340,15 +402,36 @@ start:
 
                // Deserialize the streams.
                for (int i = 0; i < loaded_state.streams_size(); ++i) {
-                       servers->add_stream_from_serialized(loaded_state.streams(i));
-                       deserialized_stream_ids.insert(loaded_state.streams(i).stream_id());
+                       const StreamProto &stream = loaded_state.streams(i);
+
+                       if (deleted_urls.count(stream.url()) != 0) {
+                               // Delete the stream backlogs.
+                               for (int j = 0; j < stream.data_fds_size(); ++j) {
+                                       safe_close(stream.data_fds(j));
+                               }
+                       } else {
+                               vector<int> data_fds;
+                               for (int j = 0; j < stream.data_fds_size(); ++j) {
+                                       data_fds.push_back(stream.data_fds(j));
+                               }
+
+                               // Older versions stored the data once in the protobuf instead of
+                               // sending around file descriptors.
+                               if (data_fds.empty() && stream.has_data()) {
+                                       data_fds.push_back(make_tempfile(stream.data()));
+                               }
+
+                               servers->add_stream_from_serialized(stream, data_fds);
+                               deserialized_urls.insert(stream.url());
+                       }
                }
 
-               // Deserialize the inputs. Note that we don't actually add them to any state yet.
+               // Deserialize the inputs. Note that we don't actually add them to any stream yet.
                for (int i = 0; i < loaded_state.inputs_size(); ++i) {
-                       deserialized_inputs.insert(make_pair(
-                               loaded_state.inputs(i).stream_id(),
-                               create_input(loaded_state.inputs(i))));
+                       InputWithRefcount iwr;
+                       iwr.input = create_input(loaded_state.inputs(i));
+                       iwr.refcount = 0;
+                       inputs.insert(make_pair(loaded_state.inputs(i).url(), iwr));
                } 
 
                // Deserialize the acceptors.
@@ -361,24 +444,41 @@ start:
                log(INFO, "Deserialization done.");
        }
 
-       // Find all streams in the configuration file, and create them.
-       create_streams(config, deserialized_stream_ids, &deserialized_inputs);
-
-       vector<Acceptor *> acceptors = create_acceptors(config, &deserialized_acceptors);
-       vector<Input *> inputs = create_inputs(config, &deserialized_inputs);
+       // Add any new inputs coming from the config.
+       create_config_inputs(config, &inputs);
        
-       // All deserialized inputs should now have been taken care of, one way or the other.
-       assert(deserialized_inputs.empty());
+       // Find all streams in the configuration file, create them, and connect to the inputs.
+       create_streams(config, deserialized_urls, &inputs);
+       vector<Acceptor *> acceptors = create_acceptors(config, &deserialized_acceptors);
        
        // Put back the existing clients. It doesn't matter which server we
        // allocate them to, so just do round-robin. However, we need to add
        // them after the mark pools have been set up.
        for (int i = 0; i < loaded_state.clients_size(); ++i) {
-               servers->add_client_from_serialized(loaded_state.clients(i));
+               if (deleted_urls.count(loaded_state.clients(i).url()) != 0) {
+                       safe_close(loaded_state.clients(i).sock());
+               } else {
+                       servers->add_client_from_serialized(loaded_state.clients(i));
+               }
        }
        
        servers->run();
 
+       // Now delete all inputs that are longer in use, and start the others.
+       for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
+            input_it != inputs.end(); ) {
+               if (input_it->second.refcount == 0) {
+                       log(WARNING, "Input '%s' no longer in use, closing.",
+                           input_it->first.c_str());
+                       input_it->second.input->close_socket();
+                       delete input_it->second.input;
+                       inputs.erase(input_it++);
+               } else {
+                       input_it->second.input->run();
+                       ++input_it;
+               }
+       }
+
        // Start writing statistics.
        StatsThread *stats_thread = NULL;
        if (!config.stats_file.empty()) {
@@ -386,6 +486,18 @@ start:
                stats_thread->run();
        }
 
+       InputStatsThread *input_stats_thread = NULL;
+       if (!config.input_stats_file.empty()) {
+               vector<Input*> inputs_no_refcount;
+               for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
+                    input_it != inputs.end(); ++input_it) {
+                       inputs_no_refcount.push_back(input_it->second.input);
+               }
+
+               input_stats_thread = new InputStatsThread(config.input_stats_file, config.input_stats_interval, inputs_no_refcount);
+               input_stats_thread->run();
+       }
+
        struct timeval server_start;
        gettimeofday(&server_start, NULL);
        if (state_fd != -1) {
@@ -404,14 +516,21 @@ start:
        // OK, we've been HUPed. Time to shut down everything, serialize, and re-exec.
        gettimeofday(&serialize_start, NULL);
 
+       if (input_stats_thread != NULL) {
+               input_stats_thread->stop();
+               delete input_stats_thread;
+       }
        if (stats_thread != NULL) {
                stats_thread->stop();
+               delete stats_thread;
        }
        for (size_t i = 0; i < acceptors.size(); ++i) {
                acceptors[i]->stop();
        }
-       for (size_t i = 0; i < inputs.size(); ++i) {
-               inputs[i]->stop();
+       for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
+            input_it != inputs.end();
+            ++input_it) {
+               input_it->second.input->stop();
        }
        servers->stop();
 
@@ -430,6 +549,12 @@ start:
                }
        }
        delete servers;
+
+       for (unsigned i = 0; i < mark_pools.size(); ++i) {
+               delete mark_pools[i];
+       }
+       mark_pools.clear();
+
        access_log->stop();
        delete access_log;
        shut_down_logging();