]> git.sesse.net Git - cubemap/commitdiff
Support configurable BACKLOG_SIZE (per-stream). No support for changing across restar...
authorSteinar H. Gunderson <sgunderson@bigfoot.com>
Sat, 13 Apr 2013 19:53:45 +0000 (21:53 +0200)
committerSteinar H. Gunderson <sgunderson@bigfoot.com>
Sat, 13 Apr 2013 19:53:45 +0000 (21:53 +0200)
config.cpp
config.h
cubemap.config.sample
main.cpp
server.cpp
server.h
serverpool.cpp
serverpool.h
state.proto

index 489711363482149c5b0f8d89621dc2c294c2b81a..32e0e654164142d6dcd6f8dffc419fffc35483dc 100644 (file)
@@ -13,6 +13,8 @@
 
 using namespace std;
 
+#define DEFAULT_BACKLOG_SIZE 1048576
+
 struct ConfigLine {
        string keyword;
        vector<string> arguments;
@@ -197,6 +199,13 @@ bool parse_stream(const ConfigLine &line, Config *config)
                // TODO: Verify that the URL is parseable?
        }
 
+       map<string, string>::const_iterator backlog_it = line.parameters.find("backlog_size");
+       if (backlog_it == line.parameters.end()) {
+               stream.backlog_size = DEFAULT_BACKLOG_SIZE;
+       } else {
+               stream.backlog_size = atoi(backlog_it->second.c_str());
+       }
+
        // Parse marks, if so desired.
        map<string, string>::const_iterator mark_parm_it = line.parameters.find("mark");
        if (mark_parm_it == line.parameters.end()) {
index d45ab08316b318c3e717d6c4a37252f047b0a9c5..e7385e2f488904a488adf00d079d67509022df9e 100644 (file)
--- a/config.h
+++ b/config.h
@@ -13,6 +13,7 @@ struct MarkPoolConfig {
 struct StreamConfig {
        std::string stream_id;
        std::string src;  // Can be empty.
+       size_t backlog_size;
        int mark_pool;  // -1 for none.
 };
 
index 852e0caba4d7f43db98343c22b99a0dd98a6dc43..7f60e5ffc0b64848383b363ba256e2560fd01e05 100644 (file)
@@ -12,4 +12,4 @@ stats_interval 60
 # now the streams!
 #
 stream /test.flv src=http://gruessi.zrh.sesse.net:4013/test.flv mark=1000-5000
-stream /udp.ts src=udp://@:1234
+stream /udp.ts src=udp://@:1234 backlog_size=1048576
index 6c8051a351c471fac56d1e46cc695387ed8c382b..1369c9faeb8f42394e1afe4b0878f4831592f3a7 100644 (file)
--- a/main.cpp
+++ b/main.cpp
@@ -154,7 +154,7 @@ void create_streams(const Config &config,
        for (unsigned i = 0; i < config.streams.size(); ++i) {
                const StreamConfig &stream_config = config.streams[i];
                if (deserialized_stream_ids.count(stream_config.stream_id) == 0) {
-                       servers->add_stream(stream_config.stream_id);
+                       servers->add_stream(stream_config.stream_id, stream_config.backlog_size);
                }
                expecting_stream_ids.erase(stream_config.stream_id);
 
index c9b942f2c686e19a5d06682eb79075f7e59defad..0c36e49363178f1ccd0a4955ef119a5ea6ebe9bd 100644 (file)
@@ -106,10 +106,11 @@ ClientStats Client::get_stats() const
        return stats;
 }
 
-Stream::Stream(const string &stream_id)
+Stream::Stream(const string &stream_id, size_t backlog_size)
        : stream_id(stream_id),
          data_fd(make_tempfile("")),
-         data_size(0),
+          backlog_size(backlog_size),
+         bytes_received(0),
          mark_pool(NULL)
 {
        if (data_fd == -1) {
@@ -134,7 +135,8 @@ Stream::Stream(const StreamProto &serialized)
        : stream_id(serialized.stream_id()),
          header(serialized.header()),
          data_fd(make_tempfile(serialized.data())),
-         data_size(serialized.data_size()),
+         backlog_size(serialized.backlog_size()),
+         bytes_received(serialized.bytes_received()),
          mark_pool(NULL)
 {
        if (data_fd == -1) {
@@ -149,7 +151,8 @@ StreamProto Stream::serialize()
        if (!read_tempfile(data_fd, serialized.mutable_data())) {  // Closes data_fd.
                exit(1);
        }
-       serialized.set_data_size(data_size);
+       serialized.set_backlog_size(backlog_size);
+       serialized.set_bytes_received(bytes_received);
        serialized.set_stream_id(stream_id);
        data_fd = -1;
        return serialized;
@@ -321,17 +324,17 @@ void Server::add_client_from_serialized(const ClientProto &client)
        }
 
        if (client_ptr->state == Client::SENDING_DATA && 
-           client_ptr->bytes_sent == client_ptr->stream->data_size) {
+           client_ptr->bytes_sent == client_ptr->stream->bytes_received) {
                client_ptr->stream->put_client_to_sleep(client_ptr);
        } else {
                process_client(client_ptr);
        }
 }
 
-void Server::add_stream(const string &stream_id)
+void Server::add_stream(const string &stream_id, size_t backlog_size)
 {
        MutexLock lock(&mutex);
-       streams.insert(make_pair(stream_id, new Stream(stream_id)));
+       streams.insert(make_pair(stream_id, new Stream(stream_id, backlog_size)));
 }
 
 void Server::add_stream_from_serialized(const StreamProto &stream)
@@ -374,11 +377,11 @@ void Server::add_data_deferred(const string &stream_id, const char *data, size_t
 void Server::add_data(const string &stream_id, const char *data, ssize_t bytes)
 {
        Stream *stream = find_stream(stream_id);
-       size_t pos = stream->data_size % BACKLOG_SIZE;
-       stream->data_size += bytes;
+       size_t pos = stream->bytes_received % stream->backlog_size;
+       stream->bytes_received += bytes;
 
-       if (pos + bytes > BACKLOG_SIZE) {
-               ssize_t to_copy = BACKLOG_SIZE - pos;
+       if (pos + bytes > stream->backlog_size) {
+               ssize_t to_copy = stream->backlog_size - pos;
                while (to_copy > 0) {
                        int ret = pwrite(stream->data_fd, data, to_copy, pos);
                        if (ret == -1 && errno == EINTR) {
@@ -523,7 +526,7 @@ sending_header_or_error_again:
                // but we'll start sending immediately as we get data.
                // This is postcondition #3.
                client->state = Client::SENDING_DATA;
-               client->bytes_sent = client->stream->data_size;
+               client->bytes_sent = client->stream->bytes_received;
                client->stream->put_client_to_sleep(client);
                return;
        }
@@ -532,28 +535,28 @@ sending_data_again:
                // See if there's some data we've lost. Ideally, we should drop to a block boundary,
                // but resync will be the mux's problem.
                Stream *stream = client->stream;
-               size_t bytes_to_send = stream->data_size - client->bytes_sent;
+               size_t bytes_to_send = stream->bytes_received - client->bytes_sent;
                if (bytes_to_send == 0) {
                        return;
                }
-               if (bytes_to_send > BACKLOG_SIZE) {
+               if (bytes_to_send > stream->backlog_size) {
                        fprintf(stderr, "WARNING: fd %d lost %lld bytes, maybe too slow connection\n",
                                client->sock,
-                               (long long int)(bytes_to_send - BACKLOG_SIZE));
-                       client->bytes_sent = stream->data_size - BACKLOG_SIZE;
-                       bytes_to_send = BACKLOG_SIZE;
+                               (long long int)(bytes_to_send - stream->backlog_size));
+                       client->bytes_sent = stream->bytes_received - stream->backlog_size;
+                       bytes_to_send = stream->backlog_size;
                }
 
                // See if we need to split across the circular buffer.
                bool more_data = false;
-               if ((client->bytes_sent % BACKLOG_SIZE) + bytes_to_send > BACKLOG_SIZE) {
-                       bytes_to_send = BACKLOG_SIZE - (client->bytes_sent % BACKLOG_SIZE);
+               if ((client->bytes_sent % stream->backlog_size) + bytes_to_send > stream->backlog_size) {
+                       bytes_to_send = stream->backlog_size - (client->bytes_sent % stream->backlog_size);
                        more_data = true;
                }
 
                ssize_t ret;
                do {
-                       loff_t offset = client->bytes_sent % BACKLOG_SIZE;
+                       loff_t offset = client->bytes_sent % stream->backlog_size;
                        ret = sendfile(client->sock, stream->data_fd, &offset, bytes_to_send);
                } while (ret == -1 && errno == EINTR);
 
@@ -571,7 +574,7 @@ sending_data_again:
                }
                client->bytes_sent += ret;
 
-               if (client->bytes_sent == stream->data_size) {
+               if (client->bytes_sent == stream->bytes_received) {
                        // We don't have any more data for this client, so put it to sleep.
                        // This is postcondition #3.
                        stream->put_client_to_sleep(client);
index e9b74315f1b3d5fdd6227d67c2c4160ccf89be58..7910f4c9c37ffdf190245f60edf5fbce1512dc29 100644 (file)
--- a/server.h
+++ b/server.h
@@ -11,7 +11,6 @@
 
 #include "thread.h"
 
-#define BACKLOG_SIZE 1048576
 #define EPOLL_MAX_EVENTS 8192
 #define EPOLL_TIMEOUT_MS 20
 #define MAX_CLIENT_REQUEST 16384
@@ -76,7 +75,7 @@ struct Client {
 };
 
 struct Stream {
-       Stream(const std::string &stream_id);
+       Stream(const std::string &stream_id, size_t backlog_size);
        ~Stream();
 
        // Serialization/deserialization.
@@ -88,7 +87,7 @@ struct Stream {
        // The HTTP response header, plus the video stream header (if any).
        std::string header;
 
-       // The stream data itself, stored in a circular buffer.q
+       // The stream data itself, stored in a circular buffer.
        //
        // We store our data in a file, so that we can send the data to the
        // kernel only once (with write()). We then use sendfile() for each
@@ -98,9 +97,12 @@ struct Stream {
        // the same data from userspace many times.
        int data_fd;
 
-       // How many bytes <data> contains. Can very well be larger than BACKLOG_SIZE,
-       // since the buffer wraps.
-       size_t data_size;
+       // How many bytes <data_fd> can hold (the buffer size).
+       size_t backlog_size;
+
+       // How many bytes this stream have received. Can very well be larger
+       // than <backlog_size>, since the buffer wraps.
+       size_t bytes_received;
        
        // Clients that are in SENDING_DATA, but that we don't listen on,
        // because we currently don't have any data for them.
@@ -152,7 +154,7 @@ public:
        // at the same time).
        CubemapStateProto serialize();
        void add_client_from_serialized(const ClientProto &client);
-       void add_stream(const std::string &stream_id);
+       void add_stream(const std::string &stream_id, size_t bytes_received);
        void add_stream_from_serialized(const StreamProto &stream);
 
 private:
index 9f1a7280662a68f35fb40cb8230af9cb75384978..e47c8d567da2f847b1f7d39301074232480fa7a8 100644 (file)
@@ -44,10 +44,10 @@ void ServerPool::add_client_from_serialized(const ClientProto &client)
        servers[clients_added++ % num_servers].add_client_from_serialized(client);
 }
 
-void ServerPool::add_stream(const std::string &stream_id)
+void ServerPool::add_stream(const std::string &stream_id, size_t backlog_size)
 {
        for (int i = 0; i < num_servers; ++i) {
-               servers[i].add_stream(stream_id);
+               servers[i].add_stream(stream_id, backlog_size);
        }
 }
 
index 559f76a212c1aab24ceeafb985c95d9e9102a1c7..1369db0e44d2aa600820862d696fd7b256545585 100644 (file)
@@ -21,7 +21,7 @@ public:
        void add_client_from_serialized(const ClientProto &client);
 
        // Adds the given stream to all the servers.
-       void add_stream(const std::string &stream_id);
+       void add_stream(const std::string &stream_id, size_t backlog_size);
        void add_stream_from_serialized(const StreamProto &stream);
 
        // Adds the given data to all the servers.
index eb4660f0c6d787d12628483ed66c34a2739d2668..17d5edc419ba340eca2693b65f7866d335fb1ea3 100644 (file)
@@ -15,7 +15,8 @@ message ClientProto {
 message StreamProto {
        optional bytes header = 1;
        optional bytes data = 2;
-       optional int64 data_size = 3;
+       optional int64 backlog_size = 5 [default=1048576];
+       optional int64 bytes_received = 3;
        optional string stream_id = 4;
 };