]> git.sesse.net Git - cubemap/commitdiff
Make an option to properly delete streams.
authorSteinar H. Gunderson <sgunderson@bigfoot.com>
Fri, 16 Aug 2013 17:11:03 +0000 (19:11 +0200)
committerSteinar H. Gunderson <sgunderson@bigfoot.com>
Fri, 16 Aug 2013 17:11:03 +0000 (19:11 +0200)
main.cpp
serverpool.h

index cf728cadca6e04a709bc9e49ffa46ac1386ab209..3f36f32da35ab493c5ba987dccf18b751dc6b92d 100644 (file)
--- a/main.cpp
+++ b/main.cpp
@@ -135,7 +135,9 @@ void create_config_inputs(const Config &config, multimap<string, InputWithRefcou
 {
        for (unsigned i = 0; i < config.streams.size(); ++i) {
                const StreamConfig &stream_config = config.streams[i];
-               create_config_input(stream_config.src, inputs);
+               if (stream_config.src != "delete") {
+                       create_config_input(stream_config.src, inputs);
+               }
        }
        for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
                const UDPStreamConfig &udpstream_config = config.udpstreams[i];
@@ -157,6 +159,15 @@ void create_streams(const Config &config,
        for (unsigned i = 0; i < config.streams.size(); ++i) {
                const StreamConfig &stream_config = config.streams[i];
                int stream_index;
+
+               expecting_urls.erase(stream_config.url);
+
+               // Special-case deleted streams; they were never deserialized in the first place,
+               // so just ignore them.
+               if (stream_config.src == "delete") {
+                       continue;
+               }
+
                if (deserialized_urls.count(stream_config.url) == 0) {
                        stream_index = servers->add_stream(stream_config.url,
                                                           stream_config.backlog_size,
@@ -168,7 +179,6 @@ void create_streams(const Config &config,
                        servers->set_encoding(stream_index,
                                              Stream::Encoding(stream_config.encoding));
                }
-               expecting_urls.erase(stream_config.url);
 
                if (stream_config.mark_pool != -1) {
                        servers->set_mark_pool(stream_index, mark_pools[stream_config.mark_pool]);
@@ -184,14 +194,14 @@ void create_streams(const Config &config,
                }
        }
 
-       // Warn about any HTTP servers we've lost.
-       // TODO: Make an option (delete=yes?) to actually shut down streams.
+       // Warn about any streams servers we've lost.
        for (set<string>::const_iterator stream_it = expecting_urls.begin();
             stream_it != expecting_urls.end();
             ++stream_it) {
                string url = *stream_it;
                log(WARNING, "stream '%s' disappeared from the configuration file. "
-                            "It will not be deleted, but clients will not get any new inputs.",
+                            "It will not be deleted, but clients will not get any new inputs. "
+                            "If you really meant to delete it, set src=delete and reload.",
                             url.c_str());
        }
 
@@ -269,6 +279,17 @@ bool dry_run_config(const std::string &argv0, const std::string &config_filename
        return (WIFEXITED(status) && WEXITSTATUS(status) == 0);
 }
 
+void find_deleted_streams(const Config &config, set<string> *deleted_urls)
+{
+       for (unsigned i = 0; i < config.streams.size(); ++i) {
+               const StreamConfig &stream_config = config.streams[i];
+               if (stream_config.src == "delete") {
+                       log(INFO, "Deleting stream '%s'.", stream_config.url.c_str());
+                       deleted_urls->insert(stream_config.url);
+               }
+       }
+}
+
 int main(int argc, char **argv)
 {
        signal(SIGHUP, hup);
@@ -356,6 +377,10 @@ start:
 
        servers = new ServerPool(config.num_servers);
 
+       // Find all the streams that are to be deleted.
+       set<string> deleted_urls;
+       find_deleted_streams(config, &deleted_urls);
+
        CubemapStateProto loaded_state;
        struct timeval serialize_start;
        set<string> deserialized_urls;
@@ -379,19 +404,26 @@ start:
                for (int i = 0; i < loaded_state.streams_size(); ++i) {
                        const StreamProto &stream = loaded_state.streams(i);
 
-                       vector<int> data_fds;
-                       for (int j = 0; j < stream.data_fds_size(); ++j) {
-                               data_fds.push_back(stream.data_fds(j));
+                       if (deleted_urls.count(stream.url()) != 0) {
+                               // Delete the stream backlogs.
+                               for (int j = 0; j < stream.data_fds_size(); ++j) {
+                                       safe_close(stream.data_fds(j));
+                               }
+                       } else {
+                               vector<int> data_fds;
+                               for (int j = 0; j < stream.data_fds_size(); ++j) {
+                                       data_fds.push_back(stream.data_fds(j));
+                               }
+
+                               // Older versions stored the data once in the protobuf instead of
+                               // sending around file descriptors.
+                               if (data_fds.empty() && stream.has_data()) {
+                                       data_fds.push_back(make_tempfile(stream.data()));
+                               }
+
+                               servers->add_stream_from_serialized(stream, data_fds);
+                               deserialized_urls.insert(stream.url());
                        }
-
-                       // Older versions stored the data once in the protobuf instead of
-                       // sending around file descriptors.
-                       if (data_fds.empty() && stream.has_data()) {
-                               data_fds.push_back(make_tempfile(stream.data()));
-                       }
-
-                       servers->add_stream_from_serialized(stream, data_fds);
-                       deserialized_urls.insert(stream.url());
                }
 
                // Deserialize the inputs. Note that we don't actually add them to any stream yet.
@@ -423,7 +455,11 @@ start:
        // allocate them to, so just do round-robin. However, we need to add
        // them after the mark pools have been set up.
        for (int i = 0; i < loaded_state.clients_size(); ++i) {
-               servers->add_client_from_serialized(loaded_state.clients(i));
+               if (deleted_urls.count(loaded_state.clients(i).url()) != 0) {
+                       safe_close(loaded_state.clients(i).sock());
+               } else {
+                       servers->add_client_from_serialized(loaded_state.clients(i));
+               }
        }
        
        servers->run();
index 13211f918731a1084ffea3429ab1deb82e081cca..b67fa48159f8ca7f641097494284fe95e481ccfa 100644 (file)
@@ -32,6 +32,7 @@ public:
        // Adds the given stream to all the servers. Returns the stream index.
        int add_stream(const std::string &url, size_t backlog_size, Stream::Encoding encoding);
        int add_stream_from_serialized(const StreamProto &stream, const std::vector<int> &data_fds);
+       void delete_stream(const std::string &url);
        int add_udpstream(const sockaddr_in6 &dst, MarkPool *mark_pool);
 
        // Returns the stream index for the given URL (e.g. /foo.ts). Returns -1 on failure.