#include <sys/time.h>
#include <sys/wait.h>
#include <unistd.h>
+#include <algorithm>
#include <map>
#include <set>
#include <string>
#include "input.h"
#include "input_stats.h"
#include "log.h"
-#include "markpool.h"
+#include "sa_compare.h"
#include "serverpool.h"
#include "state.pb.h"
#include "stats.h"
AccessLogThread *access_log = NULL;
ServerPool *servers = NULL;
-vector<MarkPool *> mark_pools;
volatile bool hupped = false;
volatile bool stopped = false;
+namespace {
+
+struct OrderByConnectionTime {
+ bool operator() (const ClientProto &a, const ClientProto &b) const {
+ if (a.connect_time_sec() != b.connect_time_sec())
+ return a.connect_time_sec() < b.connect_time_sec();
+ return a.connect_time_nsec() < b.connect_time_nsec();
+ }
+};
+
+} // namespace
+
struct InputWithRefcount {
Input *input;
int refcount;
// Find all port statements in the configuration file, and create acceptors for htem.
vector<Acceptor *> create_acceptors(
const Config &config,
- map<int, Acceptor *> *deserialized_acceptors)
+ map<sockaddr_in6, Acceptor *, Sockaddr6Compare> *deserialized_acceptors)
{
vector<Acceptor *> acceptors;
for (unsigned i = 0; i < config.acceptors.size(); ++i) {
const AcceptorConfig &acceptor_config = config.acceptors[i];
Acceptor *acceptor = NULL;
- map<int, Acceptor *>::iterator deserialized_acceptor_it =
- deserialized_acceptors->find(acceptor_config.port);
+ map<sockaddr_in6, Acceptor *, Sockaddr6Compare>::iterator deserialized_acceptor_it =
+ deserialized_acceptors->find(acceptor_config.addr);
if (deserialized_acceptor_it != deserialized_acceptors->end()) {
acceptor = deserialized_acceptor_it->second;
deserialized_acceptors->erase(deserialized_acceptor_it);
} else {
- int server_sock = create_server_socket(acceptor_config.port, TCP_SOCKET);
- acceptor = new Acceptor(server_sock, acceptor_config.port);
+ int server_sock = create_server_socket(acceptor_config.addr, TCP_SOCKET);
+ acceptor = new Acceptor(server_sock, acceptor_config.addr);
}
acceptor->run();
acceptors.push_back(acceptor);
}
// Close all acceptors that are no longer in the configuration file.
- for (map<int, Acceptor *>::iterator acceptor_it = deserialized_acceptors->begin();
+ for (map<sockaddr_in6, Acceptor *, Sockaddr6Compare>::iterator
+ acceptor_it = deserialized_acceptors->begin();
acceptor_it != deserialized_acceptors->end();
++acceptor_it) {
acceptor_it->second->close_socket();
const set<string> &deserialized_urls,
multimap<string, InputWithRefcount> *inputs)
{
- for (unsigned i = 0; i < config.mark_pools.size(); ++i) {
- const MarkPoolConfig &mp_config = config.mark_pools[i];
- mark_pools.push_back(new MarkPool(mp_config.from, mp_config.to));
- }
-
// HTTP streams.
set<string> expecting_urls = deserialized_urls;
for (unsigned i = 0; i < config.streams.size(); ++i) {
if (deserialized_urls.count(stream_config.url) == 0) {
stream_index = servers->add_stream(stream_config.url,
stream_config.backlog_size,
+ stream_config.prebuffering_bytes,
Stream::Encoding(stream_config.encoding));
} else {
stream_index = servers->lookup_stream_by_url(stream_config.url);
Stream::Encoding(stream_config.encoding));
}
- if (stream_config.mark_pool != -1) {
- servers->set_mark_pool(stream_index, mark_pools[stream_config.mark_pool]);
- }
+ servers->set_pacing_rate(stream_index, stream_config.pacing_rate);
string src = stream_config.src;
if (!src.empty()) {
// UDP streams.
for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
const UDPStreamConfig &udpstream_config = config.udpstreams[i];
- MarkPool *mark_pool = NULL;
- if (udpstream_config.mark_pool != -1) {
- mark_pool = mark_pools[udpstream_config.mark_pool];
- }
- int stream_index = servers->add_udpstream(udpstream_config.dst, mark_pool);
+ int stream_index = servers->add_udpstream(
+ udpstream_config.dst,
+ udpstream_config.pacing_rate,
+ udpstream_config.ttl,
+ udpstream_config.multicast_iface_index);
string src = udpstream_config.src;
if (!src.empty()) {
char argv0_canon[PATH_MAX];
char config_filename_canon[PATH_MAX];
- if (realpath(argv[0], argv0_canon) == NULL) {
+ if (realpath("/proc/self/exe", argv0_canon) == NULL) {
log_perror(argv[0]);
exit(1);
}
CubemapStateProto loaded_state;
struct timeval serialize_start;
set<string> deserialized_urls;
- map<int, Acceptor *> deserialized_acceptors;
+ map<sockaddr_in6, Acceptor *, Sockaddr6Compare> deserialized_acceptors;
multimap<string, InputWithRefcount> inputs; // multimap due to older versions without deduplication.
if (state_fd != -1) {
log(INFO, "Deserializing state from previous process...");
string serialized;
- if (!read_tempfile(state_fd, &serialized)) {
+ if (!read_tempfile_and_close(state_fd, &serialized)) {
exit(1);
}
if (!loaded_state.ParseFromString(serialized)) {
data_fds.push_back(stream.data_fds(j));
}
- // Older versions stored the data once in the protobuf instead of
- // sending around file descriptors.
- if (data_fds.empty() && stream.has_data()) {
- data_fds.push_back(make_tempfile(stream.data()));
- }
-
servers->add_stream_from_serialized(stream, data_fds);
deserialized_urls.insert(stream.url());
for (int i = 0; i < loaded_state.inputs_size(); ++i) {
InputProto serialized_input = loaded_state.inputs(i);
- // Older versions did not store the stream header in the input,
- // only in each stream. We need to have the stream header in the
- // input as well, in case we create a new stream reusing the same input.
- // Thus, we put it into place here if it's missing.
- if (!serialized_input.has_stream_header() &&
- stream_headers_for_url.count(serialized_input.url()) != 0) {
- serialized_input.set_stream_header(stream_headers_for_url[serialized_input.url()]);
- }
-
InputWithRefcount iwr;
iwr.input = create_input(serialized_input);
iwr.refcount = 0;
// Deserialize the acceptors.
for (int i = 0; i < loaded_state.acceptors_size(); ++i) {
+ sockaddr_in6 sin6 = extract_address_from_acceptor_proto(loaded_state.acceptors(i));
deserialized_acceptors.insert(make_pair(
- loaded_state.acceptors(i).port(),
+ sin6,
new Acceptor(loaded_state.acceptors(i))));
}
// Find all streams in the configuration file, create them, and connect to the inputs.
create_streams(config, deserialized_urls, &inputs);
vector<Acceptor *> acceptors = create_acceptors(config, &deserialized_acceptors);
+
+ // Convert old-style timestamps to new-style timestamps for all clients;
+ // this simplifies the sort below.
+ {
+ timespec now_monotonic;
+ if (clock_gettime(CLOCK_MONOTONIC_COARSE, &now_monotonic) == -1) {
+ log(ERROR, "clock_gettime(CLOCK_MONOTONIC_COARSE) failed.");
+ exit(1);
+ }
+ long delta_sec = now_monotonic.tv_sec - time(NULL);
+
+ for (int i = 0; i < loaded_state.clients_size(); ++i) {
+ ClientProto* client = loaded_state.mutable_clients(i);
+ if (client->has_connect_time_old()) {
+ client->set_connect_time_sec(client->connect_time_old() + delta_sec);
+ client->set_connect_time_nsec(now_monotonic.tv_nsec);
+ client->clear_connect_time_old();
+ }
+ }
+ }
// Put back the existing clients. It doesn't matter which server we
- // allocate them to, so just do round-robin. However, we need to add
- // them after the mark pools have been set up.
+ // allocate them to, so just do round-robin. However, we need to sort them
+ // by connection time first, since add_client_serialized() expects that.
+ sort(loaded_state.mutable_clients()->begin(),
+ loaded_state.mutable_clients()->end(),
+ OrderByConnectionTime());
for (int i = 0; i < loaded_state.clients_size(); ++i) {
if (deleted_urls.count(loaded_state.clients(i).url()) != 0) {
safe_close(loaded_state.clients(i).sock());
}
delete servers;
- for (unsigned i = 0; i < mark_pools.size(); ++i) {
- delete mark_pools[i];
- }
- mark_pools.clear();
-
access_log->stop();
delete access_log;
shut_down_logging();