]> git.sesse.net Git - nageru/blobdiff - mixer.cpp
Rename Resampler to ResamplingQueue, to avoid conflicts with zita-resampler.
[nageru] / mixer.cpp
index 656cdfaf39feb9ef67b79464fffacde678c2e5c1..6bf81192fdfd99421398dedc189e23be75344a5d 100644 (file)
--- a/mixer.cpp
+++ b/mixer.cpp
@@ -124,7 +124,7 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
                        [this]{
                                resource_pool->clean_context();
                        });
-               card->resampler.reset(new Resampler(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
+               card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
                card->usb->configure_card();
        }
 
@@ -249,7 +249,7 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
                if (dropped_frames > FPS * 2) {
                        fprintf(stderr, "Card %d lost more than two seconds (or time code jumping around), resetting resampler\n",
                                card_index);
-                       card->resampler.reset(new Resampler(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
+                       card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
                } else if (dropped_frames > 0) {
                        // Insert silence as needed.
                        fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
@@ -257,10 +257,10 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
                        vector<float> silence;
                        silence.resize((OUTPUT_FREQUENCY / FPS) * 2);
                        for (int i = 0; i < dropped_frames; ++i) {
-                               card->resampler->add_input_samples((unwrapped_timecode - dropped_frames + i) / double(FPS), silence.data(), (OUTPUT_FREQUENCY / FPS));
+                               card->resampling_queue->add_input_samples((unwrapped_timecode - dropped_frames + i) / double(FPS), silence.data(), (OUTPUT_FREQUENCY / FPS));
                        }
                }
-               card->resampler->add_input_samples(unwrapped_timecode / double(FPS), audio.data(), num_samples);
+               card->resampling_queue->add_input_samples(unwrapped_timecode / double(FPS), audio.data(), num_samples);
        }
 
        // Done with the audio, so release it.
@@ -466,7 +466,7 @@ void Mixer::thread_func()
                for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
                        input_frames.push_back(bmusb_current_rendering_frame[card_index]);
                }
-               const int64_t av_delay = TIMEBASE / 10;  // Corresponds to the fixed delay in resampler.h. TODO: Make less hard-coded.
+               const int64_t av_delay = TIMEBASE / 10;  // Corresponds to the fixed delay in resampling_queue.h. TODO: Make less hard-coded.
                h264_encoder->end_frame(fence, pts_int + av_delay, input_frames);
                ++frame;
                pts_int += TIMEBASE / FPS;
@@ -533,7 +533,7 @@ void Mixer::process_audio_one_frame()
                samples_card.resize((OUTPUT_FREQUENCY / FPS) * 2);
                {
                        unique_lock<mutex> lock(cards[card_index].audio_mutex);
-                       if (!cards[card_index].resampler->get_output_samples(pts(), &samples_card[0], OUTPUT_FREQUENCY / FPS)) {
+                       if (!cards[card_index].resampling_queue->get_output_samples(pts(), &samples_card[0], OUTPUT_FREQUENCY / FPS)) {
                                printf("Card %d reported previous underrun.\n", card_index);
                        }
                }
@@ -575,10 +575,12 @@ void Mixer::process_audio_one_frame()
 //     float limiter_att, compressor_att;
 
        // Then a limiter at +0 dB (so, -14 dBFS) to take out the worst peaks only.
+       // Note that since ratio is not infinite, we could go slightly higher than this.
+       // Probably more tuning is warranted here.
        {
                float threshold = pow(10.0f, (ref_level_dbfs + 0.0f) / 20.0f);  // +0 dB.
-               float ratio = 1000.0f;  // Infinity.
-               float attack_time = 0.001f;
+               float ratio = 30.0f;
+               float attack_time = 0.0f;  // Instant.
                float release_time = 0.005f;
                float makeup_gain = 1.0f;  // 0 dB.
                limiter.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
@@ -591,7 +593,7 @@ void Mixer::process_audio_one_frame()
                float ratio = 20.0f;
                float attack_time = 0.005f;
                float release_time = 0.040f;
-               float makeup_gain = 2.0f;  // +3 dB.
+               float makeup_gain = 2.0f;  // +6 dB.
                compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
 //             compressor_att = compressor.get_attenuation();
        }
@@ -599,7 +601,7 @@ void Mixer::process_audio_one_frame()
 //     printf("limiter=%+5.1f  compressor=%+5.1f\n", 20.0*log10(limiter_att), 20.0*log10(compressor_att));
 
        // Find peak and R128 levels.
-       peak = std::max(peak, find_peak(samples_out));
+       peak = max<float>(peak, find_peak(samples_out));
        vector<float> left, right;
        deinterleave_samples(samples_out, &left, &right);
        float *ptrs[] = { left.data(), right.data() };
@@ -694,6 +696,13 @@ void Mixer::channel_clicked(int preview_num)
        theme->channel_clicked(preview_num);
 }
 
+void Mixer::reset_meters()
+{
+       peak = 0.0f;
+       r128.reset();
+       r128.integr_start();
+}
+
 Mixer::OutputChannel::~OutputChannel()
 {
        if (has_current_frame) {