]> git.sesse.net Git - cubemap/blobdiff - server.cpp
Remove std:: from all code in .cpp files, for consistency.
[cubemap] / server.cpp
index 7116a73f7e8ab9a3bf3fec9d6973275aa0f23307..72664e33f665eda4bd09e49d22789e546b4ee81f 100644 (file)
@@ -140,7 +140,7 @@ void Server::do_work()
                }
                timeout_time.tv_sec -= REQUEST_READ_TIMEOUT_SEC;
                while (!clients_ordered_by_connect_time.empty()) {
-                       pair<timespec, int> &connect_time_and_fd = clients_ordered_by_connect_time.front();
+                       const pair<timespec, int> &connect_time_and_fd = clients_ordered_by_connect_time.front();
 
                        // See if we have reached the end of clients to process.
                        if (is_earlier(timeout_time, connect_time_and_fd.first)) {
@@ -149,7 +149,7 @@ void Server::do_work()
 
                        // If this client doesn't exist anymore, just ignore it
                        // (it was deleted earlier).
-                       std::map<int, Client>::iterator client_it = clients.find(connect_time_and_fd.second);
+                       map<int, Client>::iterator client_it = clients.find(connect_time_and_fd.second);
                        if (client_it == clients.end()) {
                                clients_ordered_by_connect_time.pop();
                                continue;
@@ -286,7 +286,7 @@ void Server::add_client_from_serialized(const ClientProto &client)
        }
 }
 
-int Server::lookup_stream_by_url(const std::string &url) const
+int Server::lookup_stream_by_url(const string &url) const
 {
        map<string, int>::const_iterator url_it = url_map.find(url);
        if (url_it == url_map.end()) {
@@ -337,8 +337,18 @@ void Server::set_header(int stream_index, const string &http_header, const strin
        MutexLock lock(&mutex);
        assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
        streams[stream_index]->http_header = http_header;
+
+       if (stream_header != streams[stream_index]->stream_header) {
+               // We cannot start at any of the older starting points anymore,
+               // since they'd get the wrong header for the stream (not to mention
+               // that a changed header probably means the stream restarted,
+               // which means any client starting on the old one would probably
+               // stop playing properly at the change point). Next block
+               // should be a suitable starting point (if not, something is
+               // pretty strange), so it will fill up again soon enough.
+               streams[stream_index]->suitable_starting_points.clear();
+       }
        streams[stream_index]->stream_header = stream_header;
-       // FIXME: We should reset last_suitable_starting_point at this point.
 }
        
 void Server::set_pacing_rate(int stream_index, uint32_t pacing_rate)
@@ -459,38 +469,62 @@ sending_header_or_error_again:
                        return;
                }
 
-               // Start sending from the first keyframe we get. In other
-               // words, we won't send any of the backlog, but we'll start
-               // sending immediately as we get the next keyframe block.
-               // This is postcondition #3.
+               Stream *stream = client->stream;
                if (client->stream_pos == size_t(-2)) {
-                       client->stream_pos = std::min<size_t>(
-                           client->stream->bytes_received - client->stream->backlog_size,
+                       // Start sending from the beginning of the backlog.
+                       client->stream_pos = min<size_t>(
+                           stream->bytes_received - stream->backlog_size,
                            0);
                        client->state = Client::SENDING_DATA;
-               } else {
-                       // client->stream_pos should be -1, but it might not be,
-                       // if we have clients from an older version.
-                       client->stream_pos = client->stream->bytes_received;
+                       goto sending_data;
+               } else if (stream->prebuffering_bytes == 0) {
+                       // Start sending from the first keyframe we get. In other
+                       // words, we won't send any of the backlog, but we'll start
+                       // sending immediately as we get the next keyframe block.
+                       // Note that this is functionally identical to the next if branch,
+                       // except that we save a binary search.
+                       client->stream_pos = stream->bytes_received;
                        client->state = Client::WAITING_FOR_KEYFRAME;
+               } else {
+                       // We're not going to send anything to the client before we have
+                       // N bytes. However, this wait might be boring; we can just as well
+                       // use it to send older data if we have it. We use lower_bound()
+                       // so that we are conservative and never add extra latency over just
+                       // waiting (assuming CBR or nearly so); otherwise, we could want e.g.
+                       // 100 kB prebuffer but end up sending a 10 MB GOP.
+                       deque<size_t>::const_iterator starting_point_it =
+                               lower_bound(stream->suitable_starting_points.begin(),
+                                           stream->suitable_starting_points.end(),
+                                           stream->bytes_received - stream->prebuffering_bytes);
+                       if (starting_point_it == stream->suitable_starting_points.end()) {
+                               // None found. Just put us at the end, and then wait for the
+                               // first keyframe to appear.
+                               client->stream_pos = stream->bytes_received;
+                               client->state = Client::WAITING_FOR_KEYFRAME;
+                       } else {
+                               client->stream_pos = *starting_point_it;
+                               client->state = Client::PREBUFFERING;
+                               goto prebuffering;
+                       }
                }
-               client->stream->put_client_to_sleep(client);
-               return;
+               // Fall through.
        }
        case Client::WAITING_FOR_KEYFRAME: {
                Stream *stream = client->stream;
-               if (ssize_t(client->stream_pos) > stream->last_suitable_starting_point) {
+               if (stream->suitable_starting_points.empty() ||
+                   client->stream_pos > stream->suitable_starting_points.back()) {
                        // We haven't received a keyframe since this stream started waiting,
                        // so keep on waiting for one.
                        // This is postcondition #3.
                        stream->put_client_to_sleep(client);
                        return;
                }
-               client->stream_pos = stream->last_suitable_starting_point;
+               client->stream_pos = stream->suitable_starting_points.back();
                client->state = Client::PREBUFFERING;
                // Fall through.
        }
        case Client::PREBUFFERING: {
+prebuffering:
                Stream *stream = client->stream;
                size_t bytes_to_send = stream->bytes_received - client->stream_pos;
                assert(bytes_to_send <= stream->backlog_size);
@@ -504,6 +538,7 @@ sending_header_or_error_again:
                // Fall through.
        }
        case Client::SENDING_DATA: {
+sending_data:
                skip_lost_data(client);
                Stream *stream = client->stream;