#include <string.h>
#include <sys/time.h>
#include <sys/wait.h>
+#include <systemd/sd-daemon.h>
#include <unistd.h>
+#include <algorithm>
#include <map>
#include <set>
#include <string>
#include "input.h"
#include "input_stats.h"
#include "log.h"
-#include "markpool.h"
#include "sa_compare.h"
#include "serverpool.h"
#include "state.pb.h"
AccessLogThread *access_log = NULL;
ServerPool *servers = NULL;
-vector<MarkPool *> mark_pools;
volatile bool hupped = false;
volatile bool stopped = false;
+typedef pair<string, Input::Encoding> InputKey;
+
+namespace {
+
+struct OrderByConnectionTime {
+ bool operator() (const ClientProto &a, const ClientProto &b) const {
+ if (a.connect_time_sec() != b.connect_time_sec())
+ return a.connect_time_sec() < b.connect_time_sec();
+ return a.connect_time_nsec() < b.connect_time_nsec();
+ }
+};
+
+} // namespace
+
struct InputWithRefcount {
Input *input;
int refcount;
{
}
-CubemapStateProto collect_state(const timeval &serialize_start,
+CubemapStateProto collect_state(const timespec &serialize_start,
const vector<Acceptor *> acceptors,
- const multimap<string, InputWithRefcount> inputs,
+ const multimap<InputKey, InputWithRefcount> inputs,
ServerPool *servers)
{
CubemapStateProto state = servers->serialize(); // Fills streams() and clients().
state.set_serialize_start_sec(serialize_start.tv_sec);
- state.set_serialize_start_usec(serialize_start.tv_usec);
+ state.set_serialize_start_usec(serialize_start.tv_nsec / 1000);
for (size_t i = 0; i < acceptors.size(); ++i) {
state.add_acceptors()->MergeFrom(acceptors[i]->serialize());
}
- for (multimap<string, InputWithRefcount>::const_iterator input_it = inputs.begin();
+ for (multimap<InputKey, InputWithRefcount>::const_iterator input_it = inputs.begin();
input_it != inputs.end();
++input_it) {
state.add_inputs()->MergeFrom(input_it->second.input->serialize());
return acceptors;
}
-void create_config_input(const string &src, multimap<string, InputWithRefcount> *inputs)
+void create_config_input(const string &src, Input::Encoding encoding, multimap<InputKey, InputWithRefcount> *inputs)
{
if (src.empty()) {
return;
}
- if (inputs->count(src) != 0) {
+ InputKey key(src, encoding);
+ if (inputs->count(key) != 0) {
return;
}
InputWithRefcount iwr;
- iwr.input = create_input(src);
+ iwr.input = create_input(src, encoding);
if (iwr.input == NULL) {
- log(ERROR, "did not understand URL '%s', clients will not get any data.",
+ log(ERROR, "did not understand URL '%s' or source encoding was invalid, clients will not get any data.",
src.c_str());
return;
}
iwr.refcount = 0;
- inputs->insert(make_pair(src, iwr));
+ inputs->insert(make_pair(key, iwr));
}
// Find all streams in the configuration file, and create inputs for them.
-void create_config_inputs(const Config &config, multimap<string, InputWithRefcount> *inputs)
+void create_config_inputs(const Config &config, multimap<InputKey, InputWithRefcount> *inputs)
{
for (unsigned i = 0; i < config.streams.size(); ++i) {
const StreamConfig &stream_config = config.streams[i];
if (stream_config.src != "delete") {
- create_config_input(stream_config.src, inputs);
+ create_config_input(stream_config.src, Input::Encoding(stream_config.src_encoding), inputs);
}
}
for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
const UDPStreamConfig &udpstream_config = config.udpstreams[i];
- create_config_input(udpstream_config.src, inputs);
+ create_config_input(udpstream_config.src, Input::INPUT_ENCODING_RAW, inputs);
}
}
void create_streams(const Config &config,
const set<string> &deserialized_urls,
- multimap<string, InputWithRefcount> *inputs)
+ multimap<InputKey, InputWithRefcount> *inputs)
{
- for (unsigned i = 0; i < config.mark_pools.size(); ++i) {
- const MarkPoolConfig &mp_config = config.mark_pools[i];
- mark_pools.push_back(new MarkPool(mp_config.from, mp_config.to));
- }
-
// HTTP streams.
set<string> expecting_urls = deserialized_urls;
for (unsigned i = 0; i < config.streams.size(); ++i) {
if (deserialized_urls.count(stream_config.url) == 0) {
stream_index = servers->add_stream(stream_config.url,
stream_config.backlog_size,
- Stream::Encoding(stream_config.encoding));
+ stream_config.prebuffering_bytes,
+ Stream::Encoding(stream_config.encoding),
+ Stream::Encoding(stream_config.src_encoding));
} else {
stream_index = servers->lookup_stream_by_url(stream_config.url);
assert(stream_index != -1);
servers->set_backlog_size(stream_index, stream_config.backlog_size);
+ servers->set_prebuffering_bytes(stream_index, stream_config.prebuffering_bytes);
servers->set_encoding(stream_index,
Stream::Encoding(stream_config.encoding));
- }
-
- if (stream_config.mark_pool != -1) {
- servers->set_mark_pool(stream_index, mark_pools[stream_config.mark_pool]);
+ servers->set_src_encoding(stream_index,
+ Stream::Encoding(stream_config.src_encoding));
}
servers->set_pacing_rate(stream_index, stream_config.pacing_rate);
string src = stream_config.src;
+ Input::Encoding src_encoding = Input::Encoding(stream_config.src_encoding);
if (!src.empty()) {
- multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
+ multimap<InputKey, InputWithRefcount>::iterator input_it = inputs->find(make_pair(src, src_encoding));
if (input_it != inputs->end()) {
input_it->second.input->add_destination(stream_index);
++input_it->second.refcount;
// UDP streams.
for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
const UDPStreamConfig &udpstream_config = config.udpstreams[i];
- MarkPool *mark_pool = NULL;
- if (udpstream_config.mark_pool != -1) {
- mark_pool = mark_pools[udpstream_config.mark_pool];
- }
- int stream_index = servers->add_udpstream(udpstream_config.dst, mark_pool, udpstream_config.pacing_rate);
+ int stream_index = servers->add_udpstream(
+ udpstream_config.dst,
+ udpstream_config.pacing_rate,
+ udpstream_config.ttl,
+ udpstream_config.multicast_iface_index);
string src = udpstream_config.src;
if (!src.empty()) {
- multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
+ multimap<InputKey, InputWithRefcount>::iterator input_it = inputs->find(make_pair(src, Input::INPUT_ENCODING_RAW));
assert(input_it != inputs->end());
input_it->second.input->add_destination(stream_index);
++input_it->second.refcount;
}
}
+
+ // HTTP gen204 endpoints.
+ for (unsigned i = 0; i < config.pings.size(); ++i) {
+ const Gen204Config &ping_config = config.pings[i];
+ servers->add_gen204(ping_config.url, ping_config.allow_origin);
+ }
}
void open_logs(const vector<LogConfig> &log_destinations)
start_logging();
}
-bool dry_run_config(const std::string &argv0, const std::string &config_filename)
+bool dry_run_config(const string &argv0, const string &config_filename)
{
char *argv0_copy = strdup(argv0.c_str());
char *config_filename_copy = strdup(config_filename.c_str());
find_deleted_streams(config, &deleted_urls);
CubemapStateProto loaded_state;
- struct timeval serialize_start;
+ timespec serialize_start;
set<string> deserialized_urls;
map<sockaddr_in6, Acceptor *, Sockaddr6Compare> deserialized_acceptors;
- multimap<string, InputWithRefcount> inputs; // multimap due to older versions without deduplication.
+ multimap<InputKey, InputWithRefcount> inputs; // multimap due to older versions without deduplication.
if (state_fd != -1) {
log(INFO, "Deserializing state from previous process...");
string serialized;
- if (!read_tempfile(state_fd, &serialized)) {
+ if (!read_tempfile_and_close(state_fd, &serialized)) {
exit(1);
}
if (!loaded_state.ParseFromString(serialized)) {
}
serialize_start.tv_sec = loaded_state.serialize_start_sec();
- serialize_start.tv_usec = loaded_state.serialize_start_usec();
+ serialize_start.tv_nsec = loaded_state.serialize_start_usec() * 1000ull;
// Deserialize the streams.
map<string, string> stream_headers_for_url; // See below.
InputWithRefcount iwr;
iwr.input = create_input(serialized_input);
iwr.refcount = 0;
- inputs.insert(make_pair(serialized_input.url(), iwr));
+
+ Input::Encoding src_encoding = serialized_input.is_metacube_encoded() ?
+ Input::INPUT_ENCODING_METACUBE :
+ Input::INPUT_ENCODING_RAW;
+ InputKey key(serialized_input.url(), src_encoding);
+ inputs.insert(make_pair(key, iwr));
}
// Deserialize the acceptors.
for (int i = 0; i < loaded_state.acceptors_size(); ++i) {
- sockaddr_in6 sin6 = ExtractAddressFromAcceptorProto(loaded_state.acceptors(i));
+ sockaddr_in6 sin6 = extract_address_from_acceptor_proto(loaded_state.acceptors(i));
deserialized_acceptors.insert(make_pair(
sin6,
new Acceptor(loaded_state.acceptors(i))));
vector<Acceptor *> acceptors = create_acceptors(config, &deserialized_acceptors);
// Put back the existing clients. It doesn't matter which server we
- // allocate them to, so just do round-robin. However, we need to add
- // them after the mark pools have been set up.
+ // allocate them to, so just do round-robin. However, we need to sort them
+ // by connection time first, since add_client_serialized() expects that.
+ sort(loaded_state.mutable_clients()->begin(),
+ loaded_state.mutable_clients()->end(),
+ OrderByConnectionTime());
for (int i = 0; i < loaded_state.clients_size(); ++i) {
if (deleted_urls.count(loaded_state.clients(i).url()) != 0) {
safe_close(loaded_state.clients(i).sock());
servers->run();
// Now delete all inputs that are longer in use, and start the others.
- for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
+ for (multimap<InputKey, InputWithRefcount>::iterator input_it = inputs.begin();
input_it != inputs.end(); ) {
if (input_it->second.refcount == 0) {
- log(WARNING, "Input '%s' no longer in use, closing.",
- input_it->first.c_str());
+ if (input_it->first.second == Input::INPUT_ENCODING_RAW) {
+ log(WARNING, "Raw input '%s' no longer in use, closing.",
+ input_it->first.first.c_str());
+ } else {
+ assert(input_it->first.second == Input::INPUT_ENCODING_METACUBE);
+ log(WARNING, "Metacube input '%s' no longer in use, closing.",
+ input_it->first.first.c_str());
+ }
input_it->second.input->close_socket();
delete input_it->second.input;
inputs.erase(input_it++);
InputStatsThread *input_stats_thread = NULL;
if (!config.input_stats_file.empty()) {
vector<Input*> inputs_no_refcount;
- for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
+ for (multimap<InputKey, InputWithRefcount>::iterator input_it = inputs.begin();
input_it != inputs.end(); ++input_it) {
inputs_no_refcount.push_back(input_it->second.input);
}
input_stats_thread->run();
}
- struct timeval server_start;
- gettimeofday(&server_start, NULL);
+ timespec server_start;
+ int err = clock_gettime(CLOCK_MONOTONIC, &server_start);
+ assert(err != -1);
if (state_fd != -1) {
// Measure time from we started deserializing (below) to now, when basically everything
// is up and running. This is, in other words, a conservative estimate of how long our
// “glitch” period was, not counting of course reconnects if the configuration changed.
double glitch_time = server_start.tv_sec - serialize_start.tv_sec +
- 1e-6 * (server_start.tv_usec - serialize_start.tv_usec);
+ 1e-9 * (server_start.tv_nsec - serialize_start.tv_nsec);
log(INFO, "Re-exec happened in approx. %.0f ms.", glitch_time * 1000.0);
}
+ sd_notify(0, "READY=1");
+
while (!hupped) {
usleep(100000);
}
+ if (stopped) {
+ sd_notify(0, "STOPPING=1");
+ } else {
+ sd_notify(0, "RELOADING=1");
+ }
+
// OK, we've been HUPed. Time to shut down everything, serialize, and re-exec.
- gettimeofday(&serialize_start, NULL);
+ err = clock_gettime(CLOCK_MONOTONIC, &serialize_start);
+ assert(err != -1);
if (input_stats_thread != NULL) {
input_stats_thread->stop();
for (size_t i = 0; i < acceptors.size(); ++i) {
acceptors[i]->stop();
}
- for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
+ for (multimap<InputKey, InputWithRefcount>::iterator input_it = inputs.begin();
input_it != inputs.end();
++input_it) {
input_it->second.input->stop();
}
delete servers;
- for (unsigned i = 0; i < mark_pools.size(); ++i) {
- delete mark_pools[i];
- }
- mark_pools.clear();
-
access_log->stop();
delete access_log;
shut_down_logging();