From f51b3892514540ff3f08ab052296091f3a6f7a93 Mon Sep 17 00:00:00 2001 From: "Steinar H. Gunderson" Date: Sat, 13 Apr 2013 21:53:45 +0200 Subject: [PATCH] Support configurable BACKLOG_SIZE (per-stream). No support for changing across restarts yet. --- config.cpp | 9 +++++++++ config.h | 1 + cubemap.config.sample | 2 +- main.cpp | 2 +- server.cpp | 45 +++++++++++++++++++++++-------------------- server.h | 16 ++++++++------- serverpool.cpp | 4 ++-- serverpool.h | 2 +- state.proto | 3 ++- 9 files changed, 50 insertions(+), 34 deletions(-) diff --git a/config.cpp b/config.cpp index 4897113..32e0e65 100644 --- a/config.cpp +++ b/config.cpp @@ -13,6 +13,8 @@ using namespace std; +#define DEFAULT_BACKLOG_SIZE 1048576 + struct ConfigLine { string keyword; vector arguments; @@ -197,6 +199,13 @@ bool parse_stream(const ConfigLine &line, Config *config) // TODO: Verify that the URL is parseable? } + map::const_iterator backlog_it = line.parameters.find("backlog_size"); + if (backlog_it == line.parameters.end()) { + stream.backlog_size = DEFAULT_BACKLOG_SIZE; + } else { + stream.backlog_size = atoi(backlog_it->second.c_str()); + } + // Parse marks, if so desired. map::const_iterator mark_parm_it = line.parameters.find("mark"); if (mark_parm_it == line.parameters.end()) { diff --git a/config.h b/config.h index d45ab08..e7385e2 100644 --- a/config.h +++ b/config.h @@ -13,6 +13,7 @@ struct MarkPoolConfig { struct StreamConfig { std::string stream_id; std::string src; // Can be empty. + size_t backlog_size; int mark_pool; // -1 for none. }; diff --git a/cubemap.config.sample b/cubemap.config.sample index 852e0ca..7f60e5f 100644 --- a/cubemap.config.sample +++ b/cubemap.config.sample @@ -12,4 +12,4 @@ stats_interval 60 # now the streams! # stream /test.flv src=http://gruessi.zrh.sesse.net:4013/test.flv mark=1000-5000 -stream /udp.ts src=udp://@:1234 +stream /udp.ts src=udp://@:1234 backlog_size=1048576 diff --git a/main.cpp b/main.cpp index 6c8051a..1369c9f 100644 --- a/main.cpp +++ b/main.cpp @@ -154,7 +154,7 @@ void create_streams(const Config &config, for (unsigned i = 0; i < config.streams.size(); ++i) { const StreamConfig &stream_config = config.streams[i]; if (deserialized_stream_ids.count(stream_config.stream_id) == 0) { - servers->add_stream(stream_config.stream_id); + servers->add_stream(stream_config.stream_id, stream_config.backlog_size); } expecting_stream_ids.erase(stream_config.stream_id); diff --git a/server.cpp b/server.cpp index c9b942f..0c36e49 100644 --- a/server.cpp +++ b/server.cpp @@ -106,10 +106,11 @@ ClientStats Client::get_stats() const return stats; } -Stream::Stream(const string &stream_id) +Stream::Stream(const string &stream_id, size_t backlog_size) : stream_id(stream_id), data_fd(make_tempfile("")), - data_size(0), + backlog_size(backlog_size), + bytes_received(0), mark_pool(NULL) { if (data_fd == -1) { @@ -134,7 +135,8 @@ Stream::Stream(const StreamProto &serialized) : stream_id(serialized.stream_id()), header(serialized.header()), data_fd(make_tempfile(serialized.data())), - data_size(serialized.data_size()), + backlog_size(serialized.backlog_size()), + bytes_received(serialized.bytes_received()), mark_pool(NULL) { if (data_fd == -1) { @@ -149,7 +151,8 @@ StreamProto Stream::serialize() if (!read_tempfile(data_fd, serialized.mutable_data())) { // Closes data_fd. exit(1); } - serialized.set_data_size(data_size); + serialized.set_backlog_size(backlog_size); + serialized.set_bytes_received(bytes_received); serialized.set_stream_id(stream_id); data_fd = -1; return serialized; @@ -321,17 +324,17 @@ void Server::add_client_from_serialized(const ClientProto &client) } if (client_ptr->state == Client::SENDING_DATA && - client_ptr->bytes_sent == client_ptr->stream->data_size) { + client_ptr->bytes_sent == client_ptr->stream->bytes_received) { client_ptr->stream->put_client_to_sleep(client_ptr); } else { process_client(client_ptr); } } -void Server::add_stream(const string &stream_id) +void Server::add_stream(const string &stream_id, size_t backlog_size) { MutexLock lock(&mutex); - streams.insert(make_pair(stream_id, new Stream(stream_id))); + streams.insert(make_pair(stream_id, new Stream(stream_id, backlog_size))); } void Server::add_stream_from_serialized(const StreamProto &stream) @@ -374,11 +377,11 @@ void Server::add_data_deferred(const string &stream_id, const char *data, size_t void Server::add_data(const string &stream_id, const char *data, ssize_t bytes) { Stream *stream = find_stream(stream_id); - size_t pos = stream->data_size % BACKLOG_SIZE; - stream->data_size += bytes; + size_t pos = stream->bytes_received % stream->backlog_size; + stream->bytes_received += bytes; - if (pos + bytes > BACKLOG_SIZE) { - ssize_t to_copy = BACKLOG_SIZE - pos; + if (pos + bytes > stream->backlog_size) { + ssize_t to_copy = stream->backlog_size - pos; while (to_copy > 0) { int ret = pwrite(stream->data_fd, data, to_copy, pos); if (ret == -1 && errno == EINTR) { @@ -523,7 +526,7 @@ sending_header_or_error_again: // but we'll start sending immediately as we get data. // This is postcondition #3. client->state = Client::SENDING_DATA; - client->bytes_sent = client->stream->data_size; + client->bytes_sent = client->stream->bytes_received; client->stream->put_client_to_sleep(client); return; } @@ -532,28 +535,28 @@ sending_data_again: // See if there's some data we've lost. Ideally, we should drop to a block boundary, // but resync will be the mux's problem. Stream *stream = client->stream; - size_t bytes_to_send = stream->data_size - client->bytes_sent; + size_t bytes_to_send = stream->bytes_received - client->bytes_sent; if (bytes_to_send == 0) { return; } - if (bytes_to_send > BACKLOG_SIZE) { + if (bytes_to_send > stream->backlog_size) { fprintf(stderr, "WARNING: fd %d lost %lld bytes, maybe too slow connection\n", client->sock, - (long long int)(bytes_to_send - BACKLOG_SIZE)); - client->bytes_sent = stream->data_size - BACKLOG_SIZE; - bytes_to_send = BACKLOG_SIZE; + (long long int)(bytes_to_send - stream->backlog_size)); + client->bytes_sent = stream->bytes_received - stream->backlog_size; + bytes_to_send = stream->backlog_size; } // See if we need to split across the circular buffer. bool more_data = false; - if ((client->bytes_sent % BACKLOG_SIZE) + bytes_to_send > BACKLOG_SIZE) { - bytes_to_send = BACKLOG_SIZE - (client->bytes_sent % BACKLOG_SIZE); + if ((client->bytes_sent % stream->backlog_size) + bytes_to_send > stream->backlog_size) { + bytes_to_send = stream->backlog_size - (client->bytes_sent % stream->backlog_size); more_data = true; } ssize_t ret; do { - loff_t offset = client->bytes_sent % BACKLOG_SIZE; + loff_t offset = client->bytes_sent % stream->backlog_size; ret = sendfile(client->sock, stream->data_fd, &offset, bytes_to_send); } while (ret == -1 && errno == EINTR); @@ -571,7 +574,7 @@ sending_data_again: } client->bytes_sent += ret; - if (client->bytes_sent == stream->data_size) { + if (client->bytes_sent == stream->bytes_received) { // We don't have any more data for this client, so put it to sleep. // This is postcondition #3. stream->put_client_to_sleep(client); diff --git a/server.h b/server.h index e9b7431..7910f4c 100644 --- a/server.h +++ b/server.h @@ -11,7 +11,6 @@ #include "thread.h" -#define BACKLOG_SIZE 1048576 #define EPOLL_MAX_EVENTS 8192 #define EPOLL_TIMEOUT_MS 20 #define MAX_CLIENT_REQUEST 16384 @@ -76,7 +75,7 @@ struct Client { }; struct Stream { - Stream(const std::string &stream_id); + Stream(const std::string &stream_id, size_t backlog_size); ~Stream(); // Serialization/deserialization. @@ -88,7 +87,7 @@ struct Stream { // The HTTP response header, plus the video stream header (if any). std::string header; - // The stream data itself, stored in a circular buffer.q + // The stream data itself, stored in a circular buffer. // // We store our data in a file, so that we can send the data to the // kernel only once (with write()). We then use sendfile() for each @@ -98,9 +97,12 @@ struct Stream { // the same data from userspace many times. int data_fd; - // How many bytes contains. Can very well be larger than BACKLOG_SIZE, - // since the buffer wraps. - size_t data_size; + // How many bytes can hold (the buffer size). + size_t backlog_size; + + // How many bytes this stream have received. Can very well be larger + // than , since the buffer wraps. + size_t bytes_received; // Clients that are in SENDING_DATA, but that we don't listen on, // because we currently don't have any data for them. @@ -152,7 +154,7 @@ public: // at the same time). CubemapStateProto serialize(); void add_client_from_serialized(const ClientProto &client); - void add_stream(const std::string &stream_id); + void add_stream(const std::string &stream_id, size_t bytes_received); void add_stream_from_serialized(const StreamProto &stream); private: diff --git a/serverpool.cpp b/serverpool.cpp index 9f1a728..e47c8d5 100644 --- a/serverpool.cpp +++ b/serverpool.cpp @@ -44,10 +44,10 @@ void ServerPool::add_client_from_serialized(const ClientProto &client) servers[clients_added++ % num_servers].add_client_from_serialized(client); } -void ServerPool::add_stream(const std::string &stream_id) +void ServerPool::add_stream(const std::string &stream_id, size_t backlog_size) { for (int i = 0; i < num_servers; ++i) { - servers[i].add_stream(stream_id); + servers[i].add_stream(stream_id, backlog_size); } } diff --git a/serverpool.h b/serverpool.h index 559f76a..1369db0 100644 --- a/serverpool.h +++ b/serverpool.h @@ -21,7 +21,7 @@ public: void add_client_from_serialized(const ClientProto &client); // Adds the given stream to all the servers. - void add_stream(const std::string &stream_id); + void add_stream(const std::string &stream_id, size_t backlog_size); void add_stream_from_serialized(const StreamProto &stream); // Adds the given data to all the servers. diff --git a/state.proto b/state.proto index eb4660f..17d5edc 100644 --- a/state.proto +++ b/state.proto @@ -15,7 +15,8 @@ message ClientProto { message StreamProto { optional bytes header = 1; optional bytes data = 2; - optional int64 data_size = 3; + optional int64 backlog_size = 5 [default=1048576]; + optional int64 bytes_received = 3; optional string stream_id = 4; }; -- 2.39.2