]> git.sesse.net Git - cubemap/commitdiff
Allow prebuffer to happen by playing data from the backlog.
authorSteinar H. Gunderson <sgunderson@bigfoot.com>
Thu, 23 Jul 2015 16:31:06 +0000 (18:31 +0200)
committerSteinar H. Gunderson <sgunderson@bigfoot.com>
Thu, 23 Jul 2015 16:36:42 +0000 (18:36 +0200)
This won't reduce the latency (although it shouldn't increase it either, unless
you have extreme bitrate changes), but it should give a picture on screen much
sooner, hopefully getting rid of the “what's going on, why is nothing starting”
feeling.

server.cpp

index 14162f58e5a960cc37e35cb18796e270c883714e..e99dbeaa89b311788a9c0fc4eb132a667db3f5b1 100644 (file)
@@ -469,20 +469,43 @@ sending_header_or_error_again:
                        return;
                }
 
                        return;
                }
 
-               // Start sending from the first keyframe we get. In other
-               // words, we won't send any of the backlog, but we'll start
-               // sending immediately as we get the next keyframe block.
-               // This is postcondition #3.
                Stream *stream = client->stream;
                if (client->stream_pos == size_t(-2)) {
                Stream *stream = client->stream;
                if (client->stream_pos == size_t(-2)) {
+                       // Start sending from the beginning of the backlog.
                        client->stream_pos = std::min<size_t>(
                            stream->bytes_received - stream->backlog_size,
                            0);
                        client->state = Client::SENDING_DATA;
                        goto sending_data;
                        client->stream_pos = std::min<size_t>(
                            stream->bytes_received - stream->backlog_size,
                            0);
                        client->state = Client::SENDING_DATA;
                        goto sending_data;
-               } else {
+               } else if (stream->prebuffering_bytes == 0) {
+                       // Start sending from the first keyframe we get. In other
+                       // words, we won't send any of the backlog, but we'll start
+                       // sending immediately as we get the next keyframe block.
+                       // Note that this is functionally identical to the next if branch,
+                       // except that we save a binary search.
                        client->stream_pos = stream->bytes_received;
                        client->state = Client::WAITING_FOR_KEYFRAME;
                        client->stream_pos = stream->bytes_received;
                        client->state = Client::WAITING_FOR_KEYFRAME;
+               } else {
+                       // We're not going to send anything to the client before we have
+                       // N bytes. However, this wait might be boring; we can just as well
+                       // use it to send older data if we have it. We use lower_bound()
+                       // so that we are conservative and never add extra latency over just
+                       // waiting (assuming CBR or nearly so); otherwise, we could want e.g.
+                       // 100 kB prebuffer but end up sending a 10 MB GOP.
+                       deque<size_t>::const_iterator starting_point_it =
+                               lower_bound(stream->suitable_starting_points.begin(),
+                                           stream->suitable_starting_points.end(),
+                                           stream->bytes_received - stream->prebuffering_bytes);
+                       if (starting_point_it == stream->suitable_starting_points.end()) {
+                               // None found. Just put us at the end, and then wait for the
+                               // first keyframe to appear.
+                               client->stream_pos = stream->bytes_received;
+                               client->state = Client::WAITING_FOR_KEYFRAME;
+                       } else {
+                               client->stream_pos = *starting_point_it;
+                               client->state = Client::PREBUFFERING;
+                               goto prebuffering;
+                       }
                }
                // Fall through.
        }
                }
                // Fall through.
        }
@@ -501,6 +524,7 @@ sending_header_or_error_again:
                // Fall through.
        }
        case Client::PREBUFFERING: {
                // Fall through.
        }
        case Client::PREBUFFERING: {
+prebuffering:
                Stream *stream = client->stream;
                size_t bytes_to_send = stream->bytes_received - client->stream_pos;
                assert(bytes_to_send <= stream->backlog_size);
                Stream *stream = client->stream;
                size_t bytes_to_send = stream->bytes_received - client->stream_pos;
                assert(bytes_to_send <= stream->backlog_size);