]> git.sesse.net Git - nageru/blobdiff - mixer.cpp
Make the UI show free disk space, and a rough estimation of for how much longer we...
[nageru] / mixer.cpp
index c7387e76064a797a867cf2169d757c828b0bed25..1a4b2cf141ab1562e9eae438a3bc458ded7af476 100644 (file)
--- a/mixer.cpp
+++ b/mixer.cpp
@@ -3,6 +3,7 @@
 #include "mixer.h"
 
 #include <assert.h>
+#include <endian.h>
 #include <epoxy/egl.h>
 #include <movit/effect_chain.h>
 #include <movit/effect_util.h>
@@ -17,6 +18,7 @@
 #include <sys/time.h>
 #include <time.h>
 #include <algorithm>
+#include <chrono>
 #include <cmath>
 #include <condition_variable>
 #include <cstddef>
@@ -35,6 +37,7 @@
 #include "context.h"
 #include "decklink_capture.h"
 #include "defs.h"
+#include "disk_space_estimator.h"
 #include "flags.h"
 #include "video_encoder.h"
 #include "pbo_frame_allocator.h"
@@ -45,6 +48,7 @@ class QOpenGLContext;
 
 using namespace movit;
 using namespace std;
+using namespace std::chrono;
 using namespace std::placeholders;
 using namespace bmusb;
 
@@ -73,8 +77,7 @@ void convert_fixed32_to_fp32(float *dst, size_t out_channels, const uint8_t *src
        assert(in_channels >= out_channels);
        for (size_t i = 0; i < num_samples; ++i) {
                for (size_t j = 0; j < out_channels; ++j) {
-                       // Note: Assumes little-endian.
-                       int32_t s = *(int32_t *)src;
+                       int32_t s = le32toh(*(int32_t *)src);
                        dst[i * out_channels + j] = s * (1.0f / 2147483648.0f);
                        src += 4;
                }
@@ -163,7 +166,7 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
        display_chain->set_dither_bits(0);  // Don't bother.
        display_chain->finalize();
 
-       video_encoder.reset(new VideoEncoder(resource_pool.get(), h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd));
+       video_encoder.reset(new VideoEncoder(resource_pool.get(), h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd, global_disk_space_estimator));
 
        // Start listening for clients only once VideoEncoder has written its header, if any.
        httpd.start(9095);
@@ -380,10 +383,8 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
                        is_mode_scanning[card_index] = false;
                } else {
                        static constexpr double switch_time_s = 0.5;  // Should be enough time for the signal to stabilize.
-                       timespec now;
-                       clock_gettime(CLOCK_MONOTONIC, &now);
-                       double sec_since_last_switch = (now.tv_sec - last_mode_scan_change[card_index].tv_sec) +
-                               1e-9 * (now.tv_nsec - last_mode_scan_change[card_index].tv_nsec);
+                       steady_clock::time_point now = steady_clock::now();
+                       double sec_since_last_switch = duration<double>(steady_clock::now() - last_mode_scan_change[card_index]).count();
                        if (sec_since_last_switch > switch_time_s) {
                                // It isn't this mode; try the next one.
                                mode_scanlist_index[card_index]++;
@@ -505,7 +506,7 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
        PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)video_frame.userdata;
 
        unsigned num_fields = video_format.interlaced ? 2 : 1;
-       timespec frame_upload_start;
+       steady_clock::time_point frame_upload_start;
        if (video_format.interlaced) {
                // Send the two fields along as separate frames; the other side will need to add
                // a deinterlacer to actually get this right.
@@ -514,7 +515,7 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
                assert(frame_length % 2 == 0);
                frame_length /= 2;
                num_fields = 2;
-               clock_gettime(CLOCK_MONOTONIC, &frame_upload_start);
+               frame_upload_start = steady_clock::now();
        }
        userdata->last_interlaced = video_format.interlaced;
        userdata->last_has_signal = video_format.has_signal;
@@ -595,16 +596,9 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
                        // against the video display, although the latter is not as critical.)
                        // This requires our system clock to be reasonably close to the
                        // video clock, but that's not an unreasonable assumption.
-                       timespec second_field_start;
-                       second_field_start.tv_nsec = frame_upload_start.tv_nsec +
-                               frame_length * 1000000000 / TIMEBASE;
-                       second_field_start.tv_sec = frame_upload_start.tv_sec +
-                               second_field_start.tv_nsec / 1000000000;
-                       second_field_start.tv_nsec %= 1000000000;
-
-                       while (clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME,
-                                              &second_field_start, nullptr) == -1 &&
-                              errno == EINTR) ;
+                       steady_clock::time_point second_field_start = frame_upload_start +
+                               nanoseconds(frame_length * 1000000000 / TIMEBASE);
+                       this_thread::sleep_until(second_field_start);
                }
 
                {
@@ -642,8 +636,8 @@ void Mixer::thread_func()
                exit(1);
        }
 
-       struct timespec start, now;
-       clock_gettime(CLOCK_MONOTONIC, &start);
+       steady_clock::time_point start, now;
+       start = steady_clock::now();
 
        int frame = 0;
        int stats_dropped_frames = 0;
@@ -700,14 +694,13 @@ void Mixer::thread_func()
                        }
                }
 
-               int64_t duration = new_frames[master_card_index].length;
-               render_one_frame(duration);
+               int64_t frame_duration = new_frames[master_card_index].length;
+               render_one_frame(frame_duration);
                ++frame;
-               pts_int += duration;
+               pts_int += frame_duration;
 
-               clock_gettime(CLOCK_MONOTONIC, &now);
-               double elapsed = now.tv_sec - start.tv_sec +
-                       1e-9 * (now.tv_nsec - start.tv_nsec);
+               now = steady_clock::now();
+               double elapsed = duration<double>(now - start).count();
                if (frame % 100 == 0) {
                        printf("%d frames (%d dropped) in %.3f seconds = %.1f fps (%.1f ms/frame)",
                                frame, stats_dropped_frames, elapsed, frame / elapsed,
@@ -863,13 +856,27 @@ void Mixer::schedule_audio_resampling_tasks(unsigned dropped_frames, int num_sam
        // Resample the audio as needed, including from previously dropped frames.
        assert(num_cards > 0);
        for (unsigned frame_num = 0; frame_num < dropped_frames + 1; ++frame_num) {
+               const bool dropped_frame = (frame_num != dropped_frames);
                {
                        // Signal to the audio thread to process this frame.
+                       // Note that if the frame is a dropped frame, we signal that
+                       // we don't want to use this frame as base for adjusting
+                       // the resampler rate. The reason for this is that the timing
+                       // of these frames is often way too late; they typically don't
+                       // “arrive” before we synthesize them. Thus, we could end up
+                       // in a situation where we have inserted e.g. five audio frames
+                       // into the queue before we then start pulling five of them
+                       // back out. This makes ResamplingQueue overestimate the delay,
+                       // causing undue resampler changes. (We _do_ use the last,
+                       // non-dropped frame; perhaps we should just discard that as well,
+                       // since dropped frames are expected to be rare, and it might be
+                       // better to just wait until we have a slightly more normal situation).
                        unique_lock<mutex> lock(audio_mutex);
-                       audio_task_queue.push(AudioTask{pts_int, num_samples_per_frame});
+                       bool adjust_rate = !dropped_frame;
+                       audio_task_queue.push(AudioTask{pts_int, num_samples_per_frame, adjust_rate});
                        audio_task_queue_changed.notify_one();
                }
-               if (frame_num != dropped_frames) {
+               if (dropped_frame) {
                        // For dropped frames, increase the pts. Note that if the format changed
                        // in the meantime, we have no way of detecting that; we just have to
                        // assume the frame length is always the same.
@@ -969,11 +976,11 @@ void Mixer::audio_thread_func()
                        audio_task_queue.pop();
                }
 
-               process_audio_one_frame(task.pts_int, task.num_samples);
+               process_audio_one_frame(task.pts_int, task.num_samples, task.adjust_rate);
        }
 }
 
-void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples)
+void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples, bool adjust_rate)
 {
        vector<float> samples_card;
        vector<float> samples_out;
@@ -986,7 +993,13 @@ void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples)
                samples_card.resize(num_samples * 2);
                {
                        unique_lock<mutex> lock(cards[card_index].audio_mutex);
-                       cards[card_index].resampling_queue->get_output_samples(double(frame_pts_int) / TIMEBASE, &samples_card[0], num_samples);
+                       ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy =
+                               adjust_rate ? ResamplingQueue::ADJUST_RATE : ResamplingQueue::DO_NOT_ADJUST_RATE;
+                       cards[card_index].resampling_queue->get_output_samples(
+                               double(frame_pts_int) / TIMEBASE,
+                               &samples_card[0],
+                               num_samples,
+                               rate_adjustment_policy);
                }
                if (card_index == selected_audio_card) {
                        samples_out = move(samples_card);
@@ -1060,21 +1073,6 @@ void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples)
 
 //     printf("limiter=%+5.1f  compressor=%+5.1f\n", 20.0*log10(limiter_att), 20.0*log10(compressor_att));
 
-       // Upsample 4x to find interpolated peak.
-       peak_resampler.inp_data = samples_out.data();
-       peak_resampler.inp_count = samples_out.size() / 2;
-
-       vector<float> interpolated_samples_out;
-       interpolated_samples_out.resize(samples_out.size());
-       while (peak_resampler.inp_count > 0) {  // About four iterations.
-               peak_resampler.out_data = &interpolated_samples_out[0];
-               peak_resampler.out_count = interpolated_samples_out.size() / 2;
-               peak_resampler.process();
-               size_t out_stereo_samples = interpolated_samples_out.size() / 2 - peak_resampler.out_count;
-               peak = max<float>(peak, find_peak(interpolated_samples_out.data(), out_stereo_samples * 2));
-               peak_resampler.out_data = nullptr;
-       }
-
        // At this point, we are most likely close to +0 LU, but all of our
        // measurements have been on raw sample values, not R128 values.
        // So we have a final makeup gain to get us to +0 LU; the gain
@@ -1115,6 +1113,21 @@ void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples)
                final_makeup_gain = m;
        }
 
+       // Upsample 4x to find interpolated peak.
+       peak_resampler.inp_data = samples_out.data();
+       peak_resampler.inp_count = samples_out.size() / 2;
+
+       vector<float> interpolated_samples_out;
+       interpolated_samples_out.resize(samples_out.size());
+       while (peak_resampler.inp_count > 0) {  // About four iterations.
+               peak_resampler.out_data = &interpolated_samples_out[0];
+               peak_resampler.out_count = interpolated_samples_out.size() / 2;
+               peak_resampler.process();
+               size_t out_stereo_samples = interpolated_samples_out.size() / 2 - peak_resampler.out_count;
+               peak = max<float>(peak, find_peak(interpolated_samples_out.data(), out_stereo_samples * 2));
+               peak_resampler.out_data = nullptr;
+       }
+
        // Find R128 levels and L/R correlation.
        vector<float> left, right;
        deinterleave_samples(samples_out, &left, &right);
@@ -1250,7 +1263,7 @@ void Mixer::start_mode_scanning(unsigned card_index)
        assert(!mode_scanlist[card_index].empty());
        mode_scanlist_index[card_index] = 0;
        cards[card_index].capture->set_video_mode(mode_scanlist[card_index][0]);
-       clock_gettime(CLOCK_MONOTONIC, &last_mode_scan_change[card_index]);
+       last_mode_scan_change[card_index] = steady_clock::now();
 }
 
 Mixer::OutputChannel::~OutputChannel()