X-Git-Url: https://git.sesse.net/?p=cubemap;a=blobdiff_plain;f=serverpool.cpp;h=8e233dafe8d9fb319bb29e09e6b4974b4ddedaad;hp=1c99d4065ef50a39fe808ebcb72331a81d8e0856;hb=061988af511f42da3cd584b4d983177504ddc177;hpb=ce0d42a14da5686366d0c73fa9b152e90619ff91 diff --git a/serverpool.cpp b/serverpool.cpp index 1c99d40..8e233da 100644 --- a/serverpool.cpp +++ b/serverpool.cpp @@ -16,27 +16,18 @@ using namespace std; ServerPool::ServerPool(int size) : servers(new Server[size]), - num_servers(size), - clients_added(0), - num_http_streams(0) + num_servers(size) { } -ServerPool::~ServerPool() -{ - delete[] servers; - - for (size_t i = 0; i < udp_streams.size(); ++i) { - delete udp_streams[i]; - } -} - CubemapStateProto ServerPool::serialize() { CubemapStateProto state; + unordered_map short_response_pool; + for (int i = 0; i < num_servers; ++i) { - CubemapStateProto local_state = servers[i].serialize(); + CubemapStateProto local_state = servers[i].serialize(&short_response_pool); // The stream state should be identical between the servers, so we only store it once, // save for the fds, which we keep around to distribute to the servers after re-exec. @@ -49,38 +40,45 @@ CubemapStateProto ServerPool::serialize() state.mutable_streams(j)->add_data_fds(local_state.streams(j).data_fds(0)); } } - for (int j = 0; j < local_state.clients_size(); ++j) { - state.add_clients()->MergeFrom(local_state.clients(j)); + for (const ClientProto &client : local_state.clients()) { + state.add_clients()->MergeFrom(client); } } + for (size_t i = 0; i < short_response_pool.size(); ++i) { + state.mutable_short_response_pool()->Add(); + } + for (const auto &string_and_index : short_response_pool) { + state.mutable_short_response_pool(string_and_index.second)->set_header_or_short_response(*string_and_index.first); + } + return state; } -void ServerPool::add_client(int sock) +void ServerPool::add_client(int sock, Acceptor *acceptor) { - servers[clients_added++ % num_servers].add_client_deferred(sock); + servers[clients_added++ % num_servers].add_client_deferred(sock, acceptor); } -void ServerPool::add_client_from_serialized(const ClientProto &client) +void ServerPool::add_client_from_serialized(const ClientProto &client, const std::vector> &short_responses) { - servers[clients_added++ % num_servers].add_client_from_serialized(client); + servers[clients_added++ % num_servers].add_client_from_serialized(client, short_responses); } -int ServerPool::lookup_stream_by_url(const std::string &url) const +int ServerPool::lookup_stream_by_url(const string &url) const { - assert(servers != NULL); + assert(servers != nullptr); return servers[0].lookup_stream_by_url(url); } -int ServerPool::add_stream(const string &url, size_t backlog_size, Stream::Encoding encoding) +int ServerPool::add_stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Stream::Encoding encoding, Stream::Encoding src_encoding) { // Adding more HTTP streams after UDP streams would cause the UDP stream // indices to move around, which is obviously not good. assert(udp_streams.empty()); for (int i = 0; i < num_servers; ++i) { - int stream_index = servers[i].add_stream(url, backlog_size, encoding); + int stream_index = servers[i].add_stream(url, backlog_size, prebuffering_bytes, encoding, src_encoding); assert(stream_index == num_http_streams); } return num_http_streams++; @@ -121,9 +119,9 @@ int ServerPool::add_stream_from_serialized(const StreamProto &stream, const vect return num_http_streams++; } -int ServerPool::add_udpstream(const sockaddr_in6 &dst, MarkPool *mark_pool, int pacing_rate) +int ServerPool::add_udpstream(const sockaddr_in6 &dst, int pacing_rate, int ttl, int multicast_iface_index) { - udp_streams.push_back(new UDPStream(dst, mark_pool, pacing_rate)); + udp_streams.emplace_back(new UDPStream(dst, pacing_rate, ttl, multicast_iface_index)); return num_http_streams + udp_streams.size() - 1; } @@ -147,7 +145,7 @@ void ServerPool::set_header(int stream_index, const string &http_header, const s } } -void ServerPool::add_data(int stream_index, const char *data, size_t bytes, StreamStartSuitability suitable_for_stream_start) +void ServerPool::add_data(int stream_index, const char *data, size_t bytes, uint16_t metacube_flags) { assert(stream_index >= 0 && stream_index < ssize_t(num_http_streams + udp_streams.size())); @@ -159,7 +157,21 @@ void ServerPool::add_data(int stream_index, const char *data, size_t bytes, Stre // HTTP stream. for (int i = 0; i < num_servers; ++i) { - servers[i].add_data_deferred(stream_index, data, bytes, suitable_for_stream_start); + servers[i].add_data_deferred(stream_index, data, bytes, metacube_flags); + } +} + +void ServerPool::add_gen204(const std::string &url, const std::string &allow_origin) +{ + for (int i = 0; i < num_servers; ++i) { + servers[i].add_gen204(url, allow_origin); + } +} + +void ServerPool::create_tls_context_for_acceptor(const Acceptor *acceptor) +{ + for (int i = 0; i < num_servers; ++i) { + servers[i].create_tls_context_for_acceptor(acceptor); } } @@ -187,13 +199,6 @@ vector ServerPool::get_client_stats() const return ret; } -void ServerPool::set_mark_pool(int stream_index, MarkPool *mark_pool) -{ - for (int i = 0; i < num_servers; ++i) { - servers[i].set_mark_pool(stream_index, mark_pool); - } -} - void ServerPool::set_pacing_rate(int stream_index, uint32_t pacing_rate) { for (int i = 0; i < num_servers; ++i) { @@ -208,9 +213,23 @@ void ServerPool::set_backlog_size(int stream_index, size_t new_size) } } +void ServerPool::set_prebuffering_bytes(int stream_index, size_t new_amount) +{ + for (int i = 0; i < num_servers; ++i) { + servers[i].set_prebuffering_bytes(stream_index, new_amount); + } +} + void ServerPool::set_encoding(int stream_index, Stream::Encoding encoding) { for (int i = 0; i < num_servers; ++i) { servers[i].set_encoding(stream_index, encoding); } } + +void ServerPool::set_src_encoding(int stream_index, Stream::Encoding encoding) +{ + for (int i = 0; i < num_servers; ++i) { + servers[i].set_src_encoding(stream_index, encoding); + } +}