]> git.sesse.net Git - cubemap/blobdiff - main.cpp
Tweak the MutexLock implementation slightly, so as to confuse Coverity less.
[cubemap] / main.cpp
index c40b78f5ef3ba20bea62cc86589903cd69aa9c0e..c4c5bd4187d3a74850a721fb2d8b90f77835405d 100644 (file)
--- a/main.cpp
+++ b/main.cpp
@@ -135,7 +135,9 @@ void create_config_inputs(const Config &config, multimap<string, InputWithRefcou
 {
        for (unsigned i = 0; i < config.streams.size(); ++i) {
                const StreamConfig &stream_config = config.streams[i];
-               create_config_input(stream_config.src, inputs);
+               if (stream_config.src != "delete") {
+                       create_config_input(stream_config.src, inputs);
+               }
        }
        for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
                const UDPStreamConfig &udpstream_config = config.udpstreams[i];
@@ -157,6 +159,15 @@ void create_streams(const Config &config,
        for (unsigned i = 0; i < config.streams.size(); ++i) {
                const StreamConfig &stream_config = config.streams[i];
                int stream_index;
+
+               expecting_urls.erase(stream_config.url);
+
+               // Special-case deleted streams; they were never deserialized in the first place,
+               // so just ignore them.
+               if (stream_config.src == "delete") {
+                       continue;
+               }
+
                if (deserialized_urls.count(stream_config.url) == 0) {
                        stream_index = servers->add_stream(stream_config.url,
                                                           stream_config.backlog_size,
@@ -168,7 +179,6 @@ void create_streams(const Config &config,
                        servers->set_encoding(stream_index,
                                              Stream::Encoding(stream_config.encoding));
                }
-               expecting_urls.erase(stream_config.url);
 
                if (stream_config.mark_pool != -1) {
                        servers->set_mark_pool(stream_index, mark_pools[stream_config.mark_pool]);
@@ -177,20 +187,21 @@ void create_streams(const Config &config,
                string src = stream_config.src;
                if (!src.empty()) {
                        multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
-                       assert(input_it != inputs->end());
-                       input_it->second.input->add_destination(stream_index);
-                       ++input_it->second.refcount;
+                       if (input_it != inputs->end()) {
+                               input_it->second.input->add_destination(stream_index);
+                               ++input_it->second.refcount;
+                       }
                }
        }
 
-       // Warn about any HTTP servers we've lost.
-       // TODO: Make an option (delete=yes?) to actually shut down streams.
+       // Warn about any streams servers we've lost.
        for (set<string>::const_iterator stream_it = expecting_urls.begin();
             stream_it != expecting_urls.end();
             ++stream_it) {
                string url = *stream_it;
                log(WARNING, "stream '%s' disappeared from the configuration file. "
-                            "It will not be deleted, but clients will not get any new inputs.",
+                            "It will not be deleted, but clients will not get any new inputs. "
+                            "If you really meant to delete it, set src=delete and reload.",
                             url.c_str());
        }
 
@@ -268,6 +279,17 @@ bool dry_run_config(const std::string &argv0, const std::string &config_filename
        return (WIFEXITED(status) && WEXITSTATUS(status) == 0);
 }
 
+void find_deleted_streams(const Config &config, set<string> *deleted_urls)
+{
+       for (unsigned i = 0; i < config.streams.size(); ++i) {
+               const StreamConfig &stream_config = config.streams[i];
+               if (stream_config.src == "delete") {
+                       log(INFO, "Deleting stream '%s'.", stream_config.url.c_str());
+                       deleted_urls->insert(stream_config.url);
+               }
+       }
+}
+
 int main(int argc, char **argv)
 {
        signal(SIGHUP, hup);
@@ -355,6 +377,10 @@ start:
 
        servers = new ServerPool(config.num_servers);
 
+       // Find all the streams that are to be deleted.
+       set<string> deleted_urls;
+       find_deleted_streams(config, &deleted_urls);
+
        CubemapStateProto loaded_state;
        struct timeval serialize_start;
        set<string> deserialized_urls;
@@ -375,30 +401,51 @@ start:
                serialize_start.tv_usec = loaded_state.serialize_start_usec();
 
                // Deserialize the streams.
+               map<string, string> stream_headers_for_url;  // See below.
                for (int i = 0; i < loaded_state.streams_size(); ++i) {
                        const StreamProto &stream = loaded_state.streams(i);
 
-                       vector<int> data_fds;
-                       for (int j = 0; j < stream.data_fds_size(); ++j) {
-                               data_fds.push_back(stream.data_fds(j));
+                       if (deleted_urls.count(stream.url()) != 0) {
+                               // Delete the stream backlogs.
+                               for (int j = 0; j < stream.data_fds_size(); ++j) {
+                                       safe_close(stream.data_fds(j));
+                               }
+                       } else {
+                               vector<int> data_fds;
+                               for (int j = 0; j < stream.data_fds_size(); ++j) {
+                                       data_fds.push_back(stream.data_fds(j));
+                               }
+
+                               // Older versions stored the data once in the protobuf instead of
+                               // sending around file descriptors.
+                               if (data_fds.empty() && stream.has_data()) {
+                                       data_fds.push_back(make_tempfile(stream.data()));
+                               }
+
+                               servers->add_stream_from_serialized(stream, data_fds);
+                               deserialized_urls.insert(stream.url());
+
+                               stream_headers_for_url.insert(make_pair(stream.url(), stream.stream_header()));
                        }
-
-                       // Older versions stored the data once in the protobuf instead of
-                       // sending around file descriptors.
-                       if (data_fds.empty() && stream.has_data()) {
-                               data_fds.push_back(make_tempfile(stream.data()));
-                       }
-
-                       servers->add_stream_from_serialized(stream, data_fds);
-                       deserialized_urls.insert(stream.url());
                }
 
                // Deserialize the inputs. Note that we don't actually add them to any stream yet.
                for (int i = 0; i < loaded_state.inputs_size(); ++i) {
+                       InputProto serialized_input = loaded_state.inputs(i);
+
+                       // Older versions did not store the stream header in the input,
+                       // only in each stream. We need to have the stream header in the
+                       // input as well, in case we create a new stream reusing the same input.
+                       // Thus, we put it into place here if it's missing.
+                       if (!serialized_input.has_stream_header() &&
+                           stream_headers_for_url.count(serialized_input.url()) != 0) {
+                               serialized_input.set_stream_header(stream_headers_for_url[serialized_input.url()]);
+                       }
+
                        InputWithRefcount iwr;
-                       iwr.input = create_input(loaded_state.inputs(i));
+                       iwr.input = create_input(serialized_input);
                        iwr.refcount = 0;
-                       inputs.insert(make_pair(loaded_state.inputs(i).url(), iwr));
+                       inputs.insert(make_pair(serialized_input.url(), iwr));
                } 
 
                // Deserialize the acceptors.
@@ -422,7 +469,11 @@ start:
        // allocate them to, so just do round-robin. However, we need to add
        // them after the mark pools have been set up.
        for (int i = 0; i < loaded_state.clients_size(); ++i) {
-               servers->add_client_from_serialized(loaded_state.clients(i));
+               if (deleted_urls.count(loaded_state.clients(i).url()) != 0) {
+                       safe_close(loaded_state.clients(i).sock());
+               } else {
+                       servers->add_client_from_serialized(loaded_state.clients(i));
+               }
        }
        
        servers->run();