]> git.sesse.net Git - cubemap/blobdiff - stream.cpp
Use in-class initialization for making it harder to forget to set a default.
[cubemap] / stream.cpp
index 7a2c0998f60f02e945045194e04415f7ad1dafc2..d7a78a602098ab5d77dda08873af8c3d7b29e11b 100644 (file)
@@ -26,15 +26,13 @@ Stream::Stream(const string &url, size_t backlog_size, size_t prebuffering_bytes
          src_encoding(src_encoding),
          data_fd(make_tempfile("")),
           backlog_size(backlog_size),
-         prebuffering_bytes(prebuffering_bytes),
-         bytes_received(0),
-         pacing_rate(~0U)
+         prebuffering_bytes(prebuffering_bytes)
 {
        if (data_fd == -1) {
                exit(1);
        }
 
-       pthread_mutex_init(&queued_data_mutex, NULL);
+       pthread_mutex_init(&queued_data_mutex, nullptr);
 }
 
 Stream::~Stream()
@@ -52,15 +50,13 @@ Stream::Stream(const StreamProto &serialized, int data_fd)
          data_fd(data_fd),
          backlog_size(serialized.backlog_size()),
          prebuffering_bytes(serialized.prebuffering_bytes()),
-         bytes_received(serialized.bytes_received()),
-         pacing_rate(~0U)
+         bytes_received(serialized.bytes_received())
 {
        if (data_fd == -1) {
                exit(1);
        }
 
-       for (int i = 0; i < serialized.suitable_starting_point_size(); ++i) {
-               ssize_t point = serialized.suitable_starting_point(i);
+       for (ssize_t point : serialized.suitable_starting_point()) {
                if (point == -1) {
                        // Can happen when upgrading from before 1.1.3,
                        // where this was an optional field with -1 signifying
@@ -70,7 +66,7 @@ Stream::Stream(const StreamProto &serialized, int data_fd)
                suitable_starting_points.push_back(point);
        }
 
-       pthread_mutex_init(&queued_data_mutex, NULL);
+       pthread_mutex_init(&queued_data_mutex, nullptr);
 }
 
 StreamProto Stream::serialize()
@@ -82,8 +78,8 @@ StreamProto Stream::serialize()
        serialized.set_backlog_size(backlog_size);
        serialized.set_prebuffering_bytes(prebuffering_bytes);
        serialized.set_bytes_received(bytes_received);
-       for (size_t i = 0; i < suitable_starting_points.size(); ++i) {
-               serialized.add_suitable_starting_point(suitable_starting_points[i]);
+       for (size_t point : suitable_starting_points) {
+               serialized.add_suitable_starting_point(point);
        }
        serialized.set_url(url);
        data_fd = -1;
@@ -292,8 +288,8 @@ void Stream::process_queued_data()
        // data, and 10 kB is a very fine granularity in most streams.
        static const int minimum_start_point_distance = 10240;
        size_t byte_position = bytes_received;
-       for (size_t i = 0; i < queued_data_copy.size(); ++i) {
-               if ((queued_data_copy[i].metacube_flags & METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START) == 0) {
+       for (const DataElement &elem : queued_data_copy) {
+               if ((elem.metacube_flags & METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START) == 0) {
                        size_t num_points = suitable_starting_points.size();
                        if (num_points >= 2 &&
                            suitable_starting_points[num_points - 1] - suitable_starting_points[num_points - 2] < minimum_start_point_distance) {
@@ -302,13 +298,13 @@ void Stream::process_queued_data()
                        }
                        suitable_starting_points.push_back(byte_position);
                }
-               byte_position += queued_data_copy[i].data.iov_len;
+               byte_position += elem.data.iov_len;
        }
 
        add_data_raw(queued_data_copy);
        remove_obsolete_starting_points();
-       for (size_t i = 0; i < queued_data_copy.size(); ++i) {
-               char *data = reinterpret_cast<char *>(queued_data_copy[i].data.iov_base);
+       for (const DataElement &elem : queued_data_copy) {
+               char *data = reinterpret_cast<char *>(elem.data.iov_base);
                delete[] data;
        }