+#include <assert.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+#include "client.h"
+#include "log.h"
+#include "server.h"
#include "serverpool.h"
#include "state.pb.h"
+#include "udpstream.h"
+#include "util.h"
+
+struct sockaddr_in6;
using namespace std;
ServerPool::ServerPool(int size)
: servers(new Server[size]),
num_servers(size),
- clients_added(0)
+ clients_added(0),
+ num_http_streams(0)
{
}
ServerPool::~ServerPool()
{
delete[] servers;
+
+ for (size_t i = 0; i < udp_streams.size(); ++i) {
+ delete udp_streams[i];
+ }
}
CubemapStateProto ServerPool::serialize()
for (int i = 0; i < num_servers; ++i) {
CubemapStateProto local_state = servers[i].serialize();
- // The stream state should be identical between the servers, so we only store it once.
+ // The stream state should be identical between the servers, so we only store it once,
+ // save for the fds, which we keep around to distribute to the servers after re-exec.
if (i == 0) {
state.mutable_streams()->MergeFrom(local_state.streams());
+ } else {
+ assert(state.streams_size() == local_state.streams_size());
+ for (int j = 0; j < local_state.streams_size(); ++j) {
+ assert(local_state.streams(j).data_fds_size() == 1);
+ state.mutable_streams(j)->add_data_fds(local_state.streams(j).data_fds(0));
+ }
}
for (int j = 0; j < local_state.clients_size(); ++j) {
state.add_clients()->MergeFrom(local_state.clients(j));
servers[clients_added++ % num_servers].add_client_from_serialized(client);
}
-void ServerPool::add_stream(const std::string &stream_id, size_t backlog_size)
+int ServerPool::lookup_stream_by_url(const std::string &url) const
{
+ assert(servers != NULL);
+ return servers[0].lookup_stream_by_url(url);
+}
+
+int ServerPool::add_stream(const string &url, size_t backlog_size, Stream::Encoding encoding)
+{
+ // Adding more HTTP streams after UDP streams would cause the UDP stream
+ // indices to move around, which is obviously not good.
+ assert(udp_streams.empty());
+
for (int i = 0; i < num_servers; ++i) {
- servers[i].add_stream(stream_id, backlog_size);
+ int stream_index = servers[i].add_stream(url, backlog_size, encoding);
+ assert(stream_index == num_http_streams);
}
+ return num_http_streams++;
}
-void ServerPool::add_stream_from_serialized(const StreamProto &stream)
+int ServerPool::add_stream_from_serialized(const StreamProto &stream, const vector<int> &data_fds)
{
+ // Adding more HTTP streams after UDP streams would cause the UDP stream
+ // indices to move around, which is obviously not good.
+ assert(udp_streams.empty());
+
+ assert(!data_fds.empty());
+ string contents;
for (int i = 0; i < num_servers; ++i) {
- servers[i].add_stream_from_serialized(stream);
+ int data_fd;
+ if (i < int(data_fds.size())) {
+ // Reuse one of the existing file descriptors.
+ data_fd = data_fds[i];
+ } else {
+ // Clone the first one.
+ if (contents.empty()) {
+ if (!read_tempfile(data_fds[0], &contents)) {
+ exit(1);
+ }
+ }
+ data_fd = make_tempfile(contents);
+ }
+
+ int stream_index = servers[i].add_stream_from_serialized(stream, data_fd);
+ assert(stream_index == num_http_streams);
}
+
+ // Close and delete any leftovers, if the number of servers was reduced.
+ for (size_t i = num_servers; i < data_fds.size(); ++i) {
+ safe_close(data_fds[i]); // Implicitly deletes the file.
+ }
+
+ return num_http_streams++;
+}
+
+int ServerPool::add_udpstream(const sockaddr_in6 &dst, int pacing_rate, int ttl, int multicast_iface_index)
+{
+ udp_streams.push_back(new UDPStream(dst, pacing_rate, ttl, multicast_iface_index));
+ return num_http_streams + udp_streams.size() - 1;
}
-void ServerPool::set_header(const std::string &stream_id, const std::string &header)
+void ServerPool::set_header(int stream_index, const string &http_header, const string &stream_header)
{
+ assert(stream_index >= 0 && stream_index < ssize_t(num_http_streams + udp_streams.size()));
+
+ if (stream_index >= num_http_streams) {
+ // UDP stream. TODO: Log which stream this is.
+ if (!stream_header.empty()) {
+ log(WARNING, "Trying to send stream format with headers to a UDP destination. This is unlikely to work well.");
+ }
+
+ // Ignore the HTTP header.
+ return;
+ }
+
+ // HTTP stream.
for (int i = 0; i < num_servers; ++i) {
- servers[i].set_header(stream_id, header);
+ servers[i].set_header(stream_index, http_header, stream_header);
}
}
-void ServerPool::add_data(const std::string &stream_id, const char *data, size_t bytes)
+void ServerPool::add_data(int stream_index, const char *data, size_t bytes, StreamStartSuitability suitable_for_stream_start)
{
+ assert(stream_index >= 0 && stream_index < ssize_t(num_http_streams + udp_streams.size()));
+
+ if (stream_index >= num_http_streams) {
+ // UDP stream.
+ udp_streams[stream_index - num_http_streams]->send(data, bytes);
+ return;
+ }
+
+ // HTTP stream.
for (int i = 0; i < num_servers; ++i) {
- servers[i].add_data_deferred(stream_id, data, bytes);
+ servers[i].add_data_deferred(stream_index, data, bytes, suitable_for_stream_start);
}
}
return ret;
}
-void ServerPool::set_mark_pool(const std::string &stream_id, MarkPool *mark_pool)
+void ServerPool::set_pacing_rate(int stream_index, uint32_t pacing_rate)
+{
+ for (int i = 0; i < num_servers; ++i) {
+ servers[i].set_pacing_rate(stream_index, pacing_rate);
+ }
+}
+
+void ServerPool::set_backlog_size(int stream_index, size_t new_size)
+{
+ for (int i = 0; i < num_servers; ++i) {
+ servers[i].set_backlog_size(stream_index, new_size);
+ }
+}
+
+void ServerPool::set_encoding(int stream_index, Stream::Encoding encoding)
{
for (int i = 0; i < num_servers; ++i) {
- servers[i].set_mark_pool(stream_id, mark_pool);
+ servers[i].set_encoding(stream_index, encoding);
}
}