]> git.sesse.net Git - cubemap/blobdiff - serverpool.cpp
Refer to streams internally mostly by an index, not the stream_id.
[cubemap] / serverpool.cpp
index cf6933ccda181e23f4f7f7d85e353ec11b7d805a..fb8ca451d38cf1c10f8ab30268ffc71c9ff85a9e 100644 (file)
@@ -1,4 +1,15 @@
+#include <assert.h>
+#include <errno.h>
+#include <google/protobuf/repeated_field.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "client.h"
+#include "log.h"
+#include "server.h"
 #include "serverpool.h"
+#include "state.pb.h"
+#include "util.h"
 
 using namespace std;
 
@@ -13,6 +24,32 @@ ServerPool::~ServerPool()
 {
        delete[] servers;
 }
+       
+CubemapStateProto ServerPool::serialize()
+{
+       CubemapStateProto state;
+
+       for (int i = 0; i < num_servers; ++i) {
+                CubemapStateProto local_state = servers[i].serialize();
+
+               // The stream state should be identical between the servers, so we only store it once,
+               // save for the fds, which we keep around to distribute to the servers after re-exec.
+               if (i == 0) {
+                       state.mutable_streams()->MergeFrom(local_state.streams());
+               } else {
+                       assert(state.streams_size() == local_state.streams_size());
+                       for (int j = 0; j < local_state.streams_size(); ++j) {
+                               assert(local_state.streams(j).data_fds_size() == 1);
+                               state.mutable_streams(j)->add_data_fds(local_state.streams(j).data_fds(0));
+                       }
+               }
+               for (int j = 0; j < local_state.clients_size(); ++j) {
+                       state.add_clients()->MergeFrom(local_state.clients(j));
+               }
+        }
+
+       return state;
+}
 
 void ServerPool::add_client(int sock)
 {
@@ -24,31 +61,75 @@ void ServerPool::add_client_from_serialized(const ClientProto &client)
        servers[clients_added++ % num_servers].add_client_from_serialized(client);
 }
 
-void ServerPool::add_stream(const std::string &stream_id)
+int ServerPool::lookup_stream_by_url(const std::string &url) const
+{
+       assert(servers != NULL);
+       return servers[0].lookup_stream_by_url(url);
+}
+
+int ServerPool::add_stream(const string &url, size_t backlog_size, Stream::Encoding encoding)
 {
+       int stream_index = -1;
        for (int i = 0; i < num_servers; ++i) {
-               servers[i].add_stream(stream_id);
+               int stream_index2 = servers[i].add_stream(url, backlog_size, encoding);
+               if (i == 0) {
+                       stream_index = stream_index2;
+               } else {
+                       // Verify that all servers have this under the same stream index.
+                       assert(stream_index == stream_index2);
+               }
        }
+       return stream_index;
 }
 
-void ServerPool::add_stream_from_serialized(const StreamProto &stream)
+int ServerPool::add_stream_from_serialized(const StreamProto &stream, const vector<int> &data_fds)
 {
+       assert(!data_fds.empty());
+       string contents;
+       int stream_index = -1;
        for (int i = 0; i < num_servers; ++i) {
-               servers[i].add_stream_from_serialized(stream);
+               int data_fd;
+               if (i < int(data_fds.size())) {
+                       // Reuse one of the existing file descriptors.
+                       data_fd = data_fds[i];
+               } else {
+                       // Clone the first one.
+                       if (contents.empty()) {
+                               if (!read_tempfile(data_fds[0], &contents)) {
+                                       exit(1);
+                               }
+                       }
+                       data_fd = make_tempfile(contents);
+               }
+
+               int stream_index2 = servers[i].add_stream_from_serialized(stream, data_fd);
+               if (i == 0) {
+                       stream_index = stream_index2;
+               } else {
+                       // Verify that all servers have this under the same stream index.
+                       assert(stream_index == stream_index2);
+               }
        }
+
+       // Close and delete any leftovers, if the number of servers was reduced.
+       for (size_t i = num_servers; i < data_fds.size(); ++i) {
+               safe_close(data_fds[i]);  // Implicitly deletes the file.
+       }
+
+       return stream_index;
 }
 
-void ServerPool::set_header(const std::string &stream_id, const std::string &header)
+void ServerPool::set_header(int stream_index, const string &http_header, const string &stream_header)
 {
        for (int i = 0; i < num_servers; ++i) {
-               servers[i].set_header(stream_id, header);
+               servers[i].set_header(stream_index, http_header, stream_header);
        }
 }
 
-void ServerPool::add_data(const std::string &stream_id, const char *data, size_t bytes)
+void ServerPool::add_data(int stream_index, const char *data, size_t bytes)
 {
        for (int i = 0; i < num_servers; ++i) {
-               servers[i].add_data_deferred(stream_id, data, bytes);
+               servers[i].add_data_deferred(stream_index, data, bytes);
        }
 }
 
@@ -76,9 +157,23 @@ vector<ClientStats> ServerPool::get_client_stats() const
        return ret;
 }
        
-void ServerPool::set_mark_pool(const std::string &stream_id, MarkPool *mark_pool)
+void ServerPool::set_mark_pool(int stream_index, MarkPool *mark_pool)
+{
+       for (int i = 0; i < num_servers; ++i) {
+               servers[i].set_mark_pool(stream_index, mark_pool);
+       }       
+}
+
+void ServerPool::set_backlog_size(int stream_index, size_t new_size)
+{
+       for (int i = 0; i < num_servers; ++i) {
+               servers[i].set_backlog_size(stream_index, new_size);
+       }       
+}
+
+void ServerPool::set_encoding(int stream_index, Stream::Encoding encoding)
 {
        for (int i = 0; i < num_servers; ++i) {
-               servers[i].set_mark_pool(stream_id, mark_pool);
+               servers[i].set_encoding(stream_index, encoding);
        }       
 }