From: Steinar H. Gunderson Date: Sun, 21 Apr 2013 19:12:39 +0000 (+0200) Subject: Catch up on any lost data before serializing, so increasing the backlog will not... X-Git-Tag: 1.0.0~44 X-Git-Url: https://git.sesse.net/?p=cubemap;a=commitdiff_plain;h=dfa828027e66f823e1cd8e444dfb96492e296b42 Catch up on any lost data before serializing, so increasing the backlog will not inject zero bytes into the data of (too) backlogged clients if we increase the backlog size. --- diff --git a/server.cpp b/server.cpp index 5bd0d55..1f5477e 100644 --- a/server.cpp +++ b/server.cpp @@ -113,6 +113,19 @@ CubemapStateProto Server::serialize() // We don't serialize anything queued, so empty the queues. process_queued_data(); + // Set all clients in a consistent state before serializing + // (ie., they have no remaining lost data). Otherwise, increasing + // the backlog could take clients into a newly valid area of the backlog, + // sending a stream of zeros instead of skipping the data as it should. + // + // TODO: Do this when clients are added back from serialized state instead; + // it would probably be less wasteful. + for (map::iterator client_it = clients.begin(); + client_it != clients.end(); + ++client_it) { + skip_lost_data(&client_it->second); + } + CubemapStateProto serialized; for (map::const_iterator client_it = clients.begin(); client_it != clients.end(); @@ -376,27 +389,15 @@ sending_header_or_error_again: return; } case Client::SENDING_DATA: { -sending_data_again: - // See if there's some data we've lost. Ideally, we should drop to a block boundary, - // but resync will be the mux's problem. + skip_lost_data(client); Stream *stream = client->stream; + +sending_data_again: size_t bytes_to_send = stream->bytes_received - client->stream_pos; + assert(bytes_to_send <= stream->backlog_size); if (bytes_to_send == 0) { return; } - if (bytes_to_send > stream->backlog_size) { - size_t bytes_lost = bytes_to_send - stream->backlog_size; - client->stream_pos = stream->bytes_received - stream->backlog_size; - client->bytes_lost += bytes_lost; - ++client->num_loss_events; - bytes_to_send = stream->backlog_size; - - double loss_fraction = double(client->bytes_lost) / double(client->bytes_lost + client->bytes_sent); - log(WARNING, "[%s] Client lost %lld bytes (total loss: %.2f%%), maybe too slow connection", - client->remote_addr.c_str(), - (long long int)(bytes_lost), - 100.0 * loss_fraction); - } // See if we need to split across the circular buffer. bool more_data = false; @@ -440,6 +441,26 @@ sending_data_again: } } +// See if there's some data we've lost. Ideally, we should drop to a block boundary, +// but resync will be the mux's problem. +void Server::skip_lost_data(Client *client) +{ + Stream *stream = client->stream; + size_t bytes_to_send = stream->bytes_received - client->stream_pos; + if (bytes_to_send > stream->backlog_size) { + size_t bytes_lost = bytes_to_send - stream->backlog_size; + client->stream_pos = stream->bytes_received - stream->backlog_size; + client->bytes_lost += bytes_lost; + ++client->num_loss_events; + + double loss_fraction = double(client->bytes_lost) / double(client->bytes_lost + client->bytes_sent); + log(WARNING, "[%s] Client lost %lld bytes (total loss: %.2f%%), maybe too slow connection", + client->remote_addr.c_str(), + (long long int)(bytes_lost), + 100.0 * loss_fraction); + } +} + int Server::parse_request(Client *client) { vector lines = split_lines(client->request); diff --git a/server.h b/server.h index 476e790..963aca9 100644 --- a/server.h +++ b/server.h @@ -127,6 +127,7 @@ private: void construct_error(Client *client, int error_code); void process_queued_data(); + void skip_lost_data(Client *client); void add_client(int sock); };