]> git.sesse.net Git - cubemap/blobdiff - server.cpp
Make the SENDING_HEADER state fall through.
[cubemap] / server.cpp
index 079b629dcaa9b601b31033c3a4c55626711f7662..14162f58e5a960cc37e35cb18796e270c883714e 100644 (file)
@@ -317,6 +317,13 @@ void Server::set_backlog_size(int stream_index, size_t new_size)
        assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
        streams[stream_index]->set_backlog_size(new_size);
 }
+
+void Server::set_prebuffering_bytes(int stream_index, size_t new_amount)
+{
+       MutexLock lock(&mutex);
+       assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
+       streams[stream_index]->prebuffering_bytes = new_amount;
+}
        
 void Server::set_encoding(int stream_index, Stream::Encoding encoding)
 {
@@ -330,6 +337,17 @@ void Server::set_header(int stream_index, const string &http_header, const strin
        MutexLock lock(&mutex);
        assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
        streams[stream_index]->http_header = http_header;
+
+       if (stream_header != streams[stream_index]->stream_header) {
+               // We cannot start at any of the older starting points anymore,
+               // since they'd get the wrong header for the stream (not to mention
+               // that a changed header probably means the stream restarted,
+               // which means any client starting on the old one would probably
+               // stop playing properly at the change point). Next block
+               // should be a suitable starting point (if not, something is
+               // pretty strange), so it will fill up again soon enough.
+               streams[stream_index]->suitable_starting_points.clear();
+       }
        streams[stream_index]->stream_header = stream_header;
 }
        
@@ -455,30 +473,30 @@ sending_header_or_error_again:
                // words, we won't send any of the backlog, but we'll start
                // sending immediately as we get the next keyframe block.
                // This is postcondition #3.
+               Stream *stream = client->stream;
                if (client->stream_pos == size_t(-2)) {
                        client->stream_pos = std::min<size_t>(
-                           client->stream->bytes_received - client->stream->backlog_size,
+                           stream->bytes_received - stream->backlog_size,
                            0);
                        client->state = Client::SENDING_DATA;
+                       goto sending_data;
                } else {
-                       // client->stream_pos should be -1, but it might not be,
-                       // if we have clients from an older version.
-                       client->stream_pos = client->stream->bytes_received;
+                       client->stream_pos = stream->bytes_received;
                        client->state = Client::WAITING_FOR_KEYFRAME;
                }
-               client->stream->put_client_to_sleep(client);
-               return;
+               // Fall through.
        }
        case Client::WAITING_FOR_KEYFRAME: {
                Stream *stream = client->stream;
-               if (ssize_t(client->stream_pos) > stream->last_suitable_starting_point) {
+               if (stream->suitable_starting_points.empty() ||
+                   client->stream_pos > stream->suitable_starting_points.back()) {
                        // We haven't received a keyframe since this stream started waiting,
                        // so keep on waiting for one.
                        // This is postcondition #3.
                        stream->put_client_to_sleep(client);
                        return;
                }
-               client->stream_pos = stream->last_suitable_starting_point;
+               client->stream_pos = stream->suitable_starting_points.back();
                client->state = Client::PREBUFFERING;
                // Fall through.
        }
@@ -488,6 +506,7 @@ sending_header_or_error_again:
                assert(bytes_to_send <= stream->backlog_size);
                if (bytes_to_send < stream->prebuffering_bytes) {
                        // We don't have enough bytes buffered to start this client yet.
+                       // This is postcondition #3.
                        stream->put_client_to_sleep(client);
                        return;
                }
@@ -495,6 +514,7 @@ sending_header_or_error_again:
                // Fall through.
        }
        case Client::SENDING_DATA: {
+sending_data:
                skip_lost_data(client);
                Stream *stream = client->stream;
 
@@ -571,6 +591,18 @@ int Server::parse_request(Client *client)
                return 400;  // Bad request (empty).
        }
 
+       // Parse the headers, for logging purposes.
+       // TODO: Case-insensitivity.
+       multimap<string, string> headers = extract_headers(lines, client->remote_addr);
+       multimap<string, string>::const_iterator referer_it = headers.find("Referer");
+       if (referer_it != headers.end()) {
+               client->referer = referer_it->second;
+       }
+       multimap<string, string>::const_iterator user_agent_it = headers.find("User-Agent");
+       if (user_agent_it != headers.end()) {
+               client->user_agent = user_agent_it->second;
+       }
+
        vector<string> request_tokens = split_tokens(lines[0]);
        if (request_tokens.size() < 2) {
                return 400;  // Bad request (empty).
@@ -598,6 +630,7 @@ int Server::parse_request(Client *client)
        }
 
        client->url = request_tokens[1];
+
        client->stream = stream;
        if (setsockopt(client->sock, SOL_SOCKET, SO_MAX_PACING_RATE, &client->stream->pacing_rate, sizeof(client->stream->pacing_rate)) == -1) {
                if (client->stream->pacing_rate != ~0U) {