#include <assert.h>
#include <errno.h>
#include <getopt.h>
+#include <limits.h>
#include <signal.h>
#include <stddef.h>
#include <stdio.h>
#include <vector>
#include "acceptor.h"
+#include "accesslog.h"
#include "config.h"
#include "input.h"
+#include "input_stats.h"
#include "log.h"
#include "markpool.h"
+#include "sa_compare.h"
#include "serverpool.h"
#include "state.pb.h"
#include "stats.h"
+#include "stream.h"
#include "util.h"
#include "version.h"
using namespace std;
+AccessLogThread *access_log = NULL;
ServerPool *servers = NULL;
+vector<MarkPool *> mark_pools;
volatile bool hupped = false;
+volatile bool stopped = false;
-void hup(int ignored)
+struct InputWithRefcount {
+ Input *input;
+ int refcount;
+};
+
+void hup(int signum)
{
hupped = true;
+ if (signum == SIGINT) {
+ stopped = true;
+ }
+}
+
+void do_nothing(int signum)
+{
}
CubemapStateProto collect_state(const timeval &serialize_start,
const vector<Acceptor *> acceptors,
- const vector<Input *> inputs,
+ const multimap<string, InputWithRefcount> inputs,
ServerPool *servers)
{
CubemapStateProto state = servers->serialize(); // Fills streams() and clients().
state.add_acceptors()->MergeFrom(acceptors[i]->serialize());
}
- for (size_t i = 0; i < inputs.size(); ++i) {
- state.add_inputs()->MergeFrom(inputs[i]->serialize());
+ for (multimap<string, InputWithRefcount>::const_iterator input_it = inputs.begin();
+ input_it != inputs.end();
+ ++input_it) {
+ state.add_inputs()->MergeFrom(input_it->second.input->serialize());
}
return state;
// Find all port statements in the configuration file, and create acceptors for htem.
vector<Acceptor *> create_acceptors(
const Config &config,
- map<int, Acceptor *> *deserialized_acceptors)
+ map<sockaddr_in6, Acceptor *, Sockaddr6Compare> *deserialized_acceptors)
{
vector<Acceptor *> acceptors;
for (unsigned i = 0; i < config.acceptors.size(); ++i) {
const AcceptorConfig &acceptor_config = config.acceptors[i];
Acceptor *acceptor = NULL;
- map<int, Acceptor *>::iterator deserialized_acceptor_it =
- deserialized_acceptors->find(acceptor_config.port);
+ map<sockaddr_in6, Acceptor *, Sockaddr6Compare>::iterator deserialized_acceptor_it =
+ deserialized_acceptors->find(acceptor_config.addr);
if (deserialized_acceptor_it != deserialized_acceptors->end()) {
acceptor = deserialized_acceptor_it->second;
deserialized_acceptors->erase(deserialized_acceptor_it);
} else {
- int server_sock = create_server_socket(acceptor_config.port, TCP_SOCKET);
- acceptor = new Acceptor(server_sock, acceptor_config.port);
+ int server_sock = create_server_socket(acceptor_config.addr, TCP_SOCKET);
+ acceptor = new Acceptor(server_sock, acceptor_config.addr);
}
acceptor->run();
acceptors.push_back(acceptor);
}
// Close all acceptors that are no longer in the configuration file.
- for (map<int, Acceptor *>::iterator acceptor_it = deserialized_acceptors->begin();
+ for (map<sockaddr_in6, Acceptor *, Sockaddr6Compare>::iterator
+ acceptor_it = deserialized_acceptors->begin();
acceptor_it != deserialized_acceptors->end();
++acceptor_it) {
acceptor_it->second->close_socket();
return acceptors;
}
+void create_config_input(const string &src, multimap<string, InputWithRefcount> *inputs)
+{
+ if (src.empty()) {
+ return;
+ }
+ if (inputs->count(src) != 0) {
+ return;
+ }
+
+ InputWithRefcount iwr;
+ iwr.input = create_input(src);
+ if (iwr.input == NULL) {
+ log(ERROR, "did not understand URL '%s', clients will not get any data.",
+ src.c_str());
+ return;
+ }
+ iwr.refcount = 0;
+ inputs->insert(make_pair(src, iwr));
+}
+
// Find all streams in the configuration file, and create inputs for them.
-vector<Input *> create_inputs(const Config &config,
- map<string, Input *> *deserialized_inputs)
+void create_config_inputs(const Config &config, multimap<string, InputWithRefcount> *inputs)
{
- vector<Input *> inputs;
for (unsigned i = 0; i < config.streams.size(); ++i) {
const StreamConfig &stream_config = config.streams[i];
- if (stream_config.src.empty()) {
- continue;
- }
-
- string stream_id = stream_config.stream_id;
- string src = stream_config.src;
-
- Input *input = NULL;
- map<string, Input *>::iterator deserialized_input_it =
- deserialized_inputs->find(stream_id);
- if (deserialized_input_it != deserialized_inputs->end()) {
- input = deserialized_input_it->second;
- if (input->get_url() != src) {
- log(INFO, "Stream '%s' has changed URL from '%s' to '%s', restarting input.",
- stream_id.c_str(), input->get_url().c_str(), src.c_str());
- input->close_socket();
- delete input;
- input = NULL;
- }
- deserialized_inputs->erase(deserialized_input_it);
+ if (stream_config.src != "delete") {
+ create_config_input(stream_config.src, inputs);
}
- if (input == NULL) {
- input = create_input(stream_id, src);
- if (input == NULL) {
- log(ERROR, "did not understand URL '%s', clients will not get any data.",
- src.c_str());
- continue;
- }
- }
- input->run();
- inputs.push_back(input);
}
- return inputs;
+ for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
+ const UDPStreamConfig &udpstream_config = config.udpstreams[i];
+ create_config_input(udpstream_config.src, inputs);
+ }
}
void create_streams(const Config &config,
- const set<string> &deserialized_stream_ids,
- map<string, Input *> *deserialized_inputs)
+ const set<string> &deserialized_urls,
+ multimap<string, InputWithRefcount> *inputs)
{
- vector<MarkPool *> mark_pools; // FIXME: leak
for (unsigned i = 0; i < config.mark_pools.size(); ++i) {
const MarkPoolConfig &mp_config = config.mark_pools[i];
mark_pools.push_back(new MarkPool(mp_config.from, mp_config.to));
}
- set<string> expecting_stream_ids = deserialized_stream_ids;
+ // HTTP streams.
+ set<string> expecting_urls = deserialized_urls;
for (unsigned i = 0; i < config.streams.size(); ++i) {
const StreamConfig &stream_config = config.streams[i];
- if (deserialized_stream_ids.count(stream_config.stream_id) == 0) {
- servers->add_stream(stream_config.stream_id, stream_config.backlog_size);
+ int stream_index;
+
+ expecting_urls.erase(stream_config.url);
+
+ // Special-case deleted streams; they were never deserialized in the first place,
+ // so just ignore them.
+ if (stream_config.src == "delete") {
+ continue;
+ }
+
+ if (deserialized_urls.count(stream_config.url) == 0) {
+ stream_index = servers->add_stream(stream_config.url,
+ stream_config.backlog_size,
+ Stream::Encoding(stream_config.encoding));
} else {
- servers->set_backlog_size(stream_config.stream_id, stream_config.backlog_size);
+ stream_index = servers->lookup_stream_by_url(stream_config.url);
+ assert(stream_index != -1);
+ servers->set_backlog_size(stream_index, stream_config.backlog_size);
+ servers->set_encoding(stream_index,
+ Stream::Encoding(stream_config.encoding));
}
- expecting_stream_ids.erase(stream_config.stream_id);
if (stream_config.mark_pool != -1) {
- servers->set_mark_pool(stream_config.stream_id,
- mark_pools[stream_config.mark_pool]);
+ servers->set_mark_pool(stream_index, mark_pools[stream_config.mark_pool]);
+ }
+
+ servers->set_pacing_rate(stream_index, stream_config.pacing_rate);
+
+ string src = stream_config.src;
+ if (!src.empty()) {
+ multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
+ if (input_it != inputs->end()) {
+ input_it->second.input->add_destination(stream_index);
+ ++input_it->second.refcount;
+ }
}
}
- // Warn about any servers we've lost.
- // TODO: Make an option (delete=yes?) to actually shut down streams.
- for (set<string>::const_iterator stream_it = expecting_stream_ids.begin();
- stream_it != expecting_stream_ids.end();
+ // Warn about any streams servers we've lost.
+ for (set<string>::const_iterator stream_it = expecting_urls.begin();
+ stream_it != expecting_urls.end();
++stream_it) {
- string stream_id = *stream_it;
+ string url = *stream_it;
log(WARNING, "stream '%s' disappeared from the configuration file. "
- "It will not be deleted, but clients will not get any new inputs.",
- stream_id.c_str());
- if (deserialized_inputs->count(stream_id) != 0) {
- delete (*deserialized_inputs)[stream_id];
- deserialized_inputs->erase(stream_id);
+ "It will not be deleted, but clients will not get any new inputs. "
+ "If you really meant to delete it, set src=delete and reload.",
+ url.c_str());
+ }
+
+ // UDP streams.
+ for (unsigned i = 0; i < config.udpstreams.size(); ++i) {
+ const UDPStreamConfig &udpstream_config = config.udpstreams[i];
+ MarkPool *mark_pool = NULL;
+ if (udpstream_config.mark_pool != -1) {
+ mark_pool = mark_pools[udpstream_config.mark_pool];
+ }
+ int stream_index = servers->add_udpstream(udpstream_config.dst, mark_pool, udpstream_config.pacing_rate);
+
+ string src = udpstream_config.src;
+ if (!src.empty()) {
+ multimap<string, InputWithRefcount>::iterator input_it = inputs->find(src);
+ assert(input_it != inputs->end());
+ input_it->second.input->add_destination(stream_index);
+ ++input_it->second.refcount;
}
}
}
return (WIFEXITED(status) && WEXITSTATUS(status) == 0);
}
+void find_deleted_streams(const Config &config, set<string> *deleted_urls)
+{
+ for (unsigned i = 0; i < config.streams.size(); ++i) {
+ const StreamConfig &stream_config = config.streams[i];
+ if (stream_config.src == "delete") {
+ log(INFO, "Deleting stream '%s'.", stream_config.url.c_str());
+ deleted_urls->insert(stream_config.url);
+ }
+ }
+}
+
int main(int argc, char **argv)
{
signal(SIGHUP, hup);
+ signal(SIGINT, hup);
+ signal(SIGUSR1, do_nothing); // Used in internal signalling.
signal(SIGPIPE, SIG_IGN);
// Parse options.
static const option long_options[] = {
{ "state", required_argument, 0, 's' },
{ "test-config", no_argument, 0, 't' },
+ { 0, 0, 0, 0 }
};
int option_index = 0;
- int c = getopt_long (argc, argv, "s:t", long_options, &option_index);
+ int c = getopt_long(argc, argv, "s:t", long_options, &option_index);
if (c == -1) {
break;
test_config = true;
break;
default:
- assert(false);
+ fprintf(stderr, "Unknown option '%s'\n", argv[option_index]);
+ exit(1);
}
}
config_filename = argv[optind++];
}
+ // Canonicalize argv[0] and config_filename.
+ char argv0_canon[PATH_MAX];
+ char config_filename_canon[PATH_MAX];
+
+ if (realpath(argv[0], argv0_canon) == NULL) {
+ log_perror(argv[0]);
+ exit(1);
+ }
+ if (realpath(config_filename.c_str(), config_filename_canon) == NULL) {
+ log_perror(config_filename.c_str());
+ exit(1);
+ }
+
+ // Now parse the configuration file.
Config config;
- if (!parse_config(config_filename, &config)) {
+ if (!parse_config(config_filename_canon, &config)) {
exit(1);
}
if (test_config) {
exit(0);
}
+
+ // Ideally we'd like to daemonize only when we've started up all threads etc.,
+ // but daemon() forks, which is not good in multithreaded software, so we'll
+ // have to do it here.
+ if (config.daemonize) {
+ if (daemon(0, 0) == -1) {
+ log_perror("daemon");
+ exit(1);
+ }
+ }
start:
// Open logs as soon as possible.
open_logs(config.log_destinations);
- log(NO_LEVEL, "Cubemap " SERVER_VERSION " starting.");
+ log(INFO, "Cubemap " SERVER_VERSION " starting.");
+ if (config.access_log_file.empty()) {
+ // Create a dummy logger.
+ access_log = new AccessLogThread();
+ } else {
+ access_log = new AccessLogThread(config.access_log_file);
+ }
+ access_log->run();
+
servers = new ServerPool(config.num_servers);
+ // Find all the streams that are to be deleted.
+ set<string> deleted_urls;
+ find_deleted_streams(config, &deleted_urls);
+
CubemapStateProto loaded_state;
struct timeval serialize_start;
- set<string> deserialized_stream_ids;
- map<string, Input *> deserialized_inputs;
- map<int, Acceptor *> deserialized_acceptors;
+ set<string> deserialized_urls;
+ map<sockaddr_in6, Acceptor *, Sockaddr6Compare> deserialized_acceptors;
+ multimap<string, InputWithRefcount> inputs; // multimap due to older versions without deduplication.
if (state_fd != -1) {
log(INFO, "Deserializing state from previous process...");
string serialized;
serialize_start.tv_usec = loaded_state.serialize_start_usec();
// Deserialize the streams.
+ map<string, string> stream_headers_for_url; // See below.
for (int i = 0; i < loaded_state.streams_size(); ++i) {
- servers->add_stream_from_serialized(loaded_state.streams(i));
- deserialized_stream_ids.insert(loaded_state.streams(i).stream_id());
+ const StreamProto &stream = loaded_state.streams(i);
+
+ if (deleted_urls.count(stream.url()) != 0) {
+ // Delete the stream backlogs.
+ for (int j = 0; j < stream.data_fds_size(); ++j) {
+ safe_close(stream.data_fds(j));
+ }
+ } else {
+ vector<int> data_fds;
+ for (int j = 0; j < stream.data_fds_size(); ++j) {
+ data_fds.push_back(stream.data_fds(j));
+ }
+
+ // Older versions stored the data once in the protobuf instead of
+ // sending around file descriptors.
+ if (data_fds.empty() && stream.has_data()) {
+ data_fds.push_back(make_tempfile(stream.data()));
+ }
+
+ servers->add_stream_from_serialized(stream, data_fds);
+ deserialized_urls.insert(stream.url());
+
+ stream_headers_for_url.insert(make_pair(stream.url(), stream.stream_header()));
+ }
}
- // Deserialize the inputs. Note that we don't actually add them to any state yet.
+ // Deserialize the inputs. Note that we don't actually add them to any stream yet.
for (int i = 0; i < loaded_state.inputs_size(); ++i) {
- deserialized_inputs.insert(make_pair(
- loaded_state.inputs(i).stream_id(),
- create_input(loaded_state.inputs(i))));
+ InputProto serialized_input = loaded_state.inputs(i);
+
+ // Older versions did not store the stream header in the input,
+ // only in each stream. We need to have the stream header in the
+ // input as well, in case we create a new stream reusing the same input.
+ // Thus, we put it into place here if it's missing.
+ if (!serialized_input.has_stream_header() &&
+ stream_headers_for_url.count(serialized_input.url()) != 0) {
+ serialized_input.set_stream_header(stream_headers_for_url[serialized_input.url()]);
+ }
+
+ InputWithRefcount iwr;
+ iwr.input = create_input(serialized_input);
+ iwr.refcount = 0;
+ inputs.insert(make_pair(serialized_input.url(), iwr));
}
// Deserialize the acceptors.
for (int i = 0; i < loaded_state.acceptors_size(); ++i) {
+ sockaddr_in6 sin6 = ExtractAddressFromAcceptorProto(loaded_state.acceptors(i));
deserialized_acceptors.insert(make_pair(
- loaded_state.acceptors(i).port(),
+ sin6,
new Acceptor(loaded_state.acceptors(i))));
}
log(INFO, "Deserialization done.");
}
- // Find all streams in the configuration file, and create them.
- create_streams(config, deserialized_stream_ids, &deserialized_inputs);
-
- vector<Acceptor *> acceptors = create_acceptors(config, &deserialized_acceptors);
- vector<Input *> inputs = create_inputs(config, &deserialized_inputs);
+ // Add any new inputs coming from the config.
+ create_config_inputs(config, &inputs);
- // All deserialized inputs should now have been taken care of, one way or the other.
- assert(deserialized_inputs.empty());
+ // Find all streams in the configuration file, create them, and connect to the inputs.
+ create_streams(config, deserialized_urls, &inputs);
+ vector<Acceptor *> acceptors = create_acceptors(config, &deserialized_acceptors);
// Put back the existing clients. It doesn't matter which server we
// allocate them to, so just do round-robin. However, we need to add
// them after the mark pools have been set up.
for (int i = 0; i < loaded_state.clients_size(); ++i) {
- servers->add_client_from_serialized(loaded_state.clients(i));
+ if (deleted_urls.count(loaded_state.clients(i).url()) != 0) {
+ safe_close(loaded_state.clients(i).sock());
+ } else {
+ servers->add_client_from_serialized(loaded_state.clients(i));
+ }
}
servers->run();
+ // Now delete all inputs that are longer in use, and start the others.
+ for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
+ input_it != inputs.end(); ) {
+ if (input_it->second.refcount == 0) {
+ log(WARNING, "Input '%s' no longer in use, closing.",
+ input_it->first.c_str());
+ input_it->second.input->close_socket();
+ delete input_it->second.input;
+ inputs.erase(input_it++);
+ } else {
+ input_it->second.input->run();
+ ++input_it;
+ }
+ }
+
// Start writing statistics.
StatsThread *stats_thread = NULL;
if (!config.stats_file.empty()) {
stats_thread->run();
}
+ InputStatsThread *input_stats_thread = NULL;
+ if (!config.input_stats_file.empty()) {
+ vector<Input*> inputs_no_refcount;
+ for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
+ input_it != inputs.end(); ++input_it) {
+ inputs_no_refcount.push_back(input_it->second.input);
+ }
+
+ input_stats_thread = new InputStatsThread(config.input_stats_file, config.input_stats_interval, inputs_no_refcount);
+ input_stats_thread->run();
+ }
+
struct timeval server_start;
gettimeofday(&server_start, NULL);
if (state_fd != -1) {
// OK, we've been HUPed. Time to shut down everything, serialize, and re-exec.
gettimeofday(&serialize_start, NULL);
+ if (input_stats_thread != NULL) {
+ input_stats_thread->stop();
+ delete input_stats_thread;
+ }
if (stats_thread != NULL) {
stats_thread->stop();
+ delete stats_thread;
}
for (size_t i = 0; i < acceptors.size(); ++i) {
acceptors[i]->stop();
}
- for (size_t i = 0; i < inputs.size(); ++i) {
- inputs[i]->stop();
+ for (multimap<string, InputWithRefcount>::iterator input_it = inputs.begin();
+ input_it != inputs.end();
+ ++input_it) {
+ input_it->second.input->stop();
}
servers->stop();
- log(INFO, "Serializing state and re-execing...");
- CubemapStateProto state = collect_state(
- serialize_start, acceptors, inputs, servers);
- string serialized;
- state.SerializeToString(&serialized);
- state_fd = make_tempfile(serialized);
- if (state_fd == -1) {
- exit(1);
+ CubemapStateProto state;
+ if (stopped) {
+ log(INFO, "Shutting down.");
+ } else {
+ log(INFO, "Serializing state and re-execing...");
+ state = collect_state(
+ serialize_start, acceptors, inputs, servers);
+ string serialized;
+ state.SerializeToString(&serialized);
+ state_fd = make_tempfile(serialized);
+ if (state_fd == -1) {
+ exit(1);
+ }
}
delete servers;
+
+ for (unsigned i = 0; i < mark_pools.size(); ++i) {
+ delete mark_pools[i];
+ }
+ mark_pools.clear();
+
+ access_log->stop();
+ delete access_log;
shut_down_logging();
- if (!dry_run_config(argv[0], config_filename)) {
+ if (stopped) {
+ exit(0);
+ }
+
+ // OK, so the signal was SIGHUP. Check that the new config is okay, then exec the new binary.
+ if (!dry_run_config(argv0_canon, config_filename_canon)) {
open_logs(config.log_destinations);
log(ERROR, "%s --test-config failed. Restarting old version instead of new.", argv[0]);
hupped = false;
shut_down_logging();
goto start;
}
-
char buf[16];
sprintf(buf, "%d", state_fd);
for ( ;; ) {
- execlp(argv[0], argv[0], config_filename.c_str(), "--state", buf, NULL);
+ execlp(argv0_canon, argv0_canon, config_filename_canon, "--state", buf, NULL);
open_logs(config.log_destinations);
log_perror("execlp");
- log(ERROR, "re-exec of %s failed. Waiting 0.2 seconds and trying again...", argv[0]);
+ log(ERROR, "re-exec of %s failed. Waiting 0.2 seconds and trying again...", argv0_canon);
shut_down_logging();
usleep(200000);
}