#include "bmusb/bmusb.h"
#include "context.h"
+#include "defs.h"
#include "h264encode.h"
#include "pbo_frame_allocator.h"
#include "ref_counted_gl_sync.h"
} // namespace
Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
- : httpd("test.ts", WIDTH, HEIGHT),
+ : httpd(LOCAL_DUMP_FILE_NAME, WIDTH, HEIGHT),
num_cards(num_cards),
mixer_surface(create_surface(format)),
- h264_encoder_surface(create_surface(format))
+ h264_encoder_surface(create_surface(format)),
+ level_compressor(OUTPUT_FREQUENCY),
+ limiter(OUTPUT_FREQUENCY),
+ compressor(OUTPUT_FREQUENCY)
{
httpd.start(9095);
[this]{
resource_pool->clean_context();
});
- card->resampler.reset(new Resampler(48000.0, 48000.0, 2));
+ card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
card->usb->configure_card();
}
"} \n";
cbcr_program_num = resource_pool->compile_glsl_program(cbcr_vert_shader, cbcr_frag_shader);
- r128.init(2, 48000);
+ r128.init(2, OUTPUT_FREQUENCY);
r128.integr_start();
+
+ locut.init(FILTER_HPF, 2);
+
+ // hlen=16 is pretty low quality, but we use quite a bit of CPU otherwise,
+ // and there's a limit to how important the peak meter is.
+ peak_resampler.setup(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY * 4, /*num_channels=*/2, /*hlen=*/16);
+
+ alsa.reset(new ALSAOutput(OUTPUT_FREQUENCY, /*num_channels=*/2));
}
Mixer::~Mixer()
}
cards[card_index].usb->stop_dequeue_thread();
}
+
+ h264_encoder.reset(nullptr);
}
namespace {
}
}
-float find_peak(const vector<float> &samples)
+float find_peak(const float *samples, size_t num_samples)
{
float m = fabs(samples[0]);
- for (size_t i = 1; i < samples.size(); ++i) {
+ for (size_t i = 1; i < num_samples; ++i) {
m = std::max(m, fabs(samples[i]));
}
return m;
unique_lock<mutex> lock(card->audio_mutex);
int unwrapped_timecode = timecode;
- if (dropped_frames > 60 * 2) {
+ if (dropped_frames > FPS * 2) {
fprintf(stderr, "Card %d lost more than two seconds (or time code jumping around), resetting resampler\n",
card_index);
- card->resampler.reset(new Resampler(48000.0, 48000.0, 2));
+ card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
} else if (dropped_frames > 0) {
// Insert silence as needed.
fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
card_index, dropped_frames, timecode);
vector<float> silence;
- silence.resize((48000 / 60) * 2);
+ silence.resize((OUTPUT_FREQUENCY / FPS) * 2);
for (int i = 0; i < dropped_frames; ++i) {
- card->resampler->add_input_samples((unwrapped_timecode - dropped_frames + i) / 60.0, silence.data(), (48000 / 60));
+ card->resampling_queue->add_input_samples((unwrapped_timecode - dropped_frames + i) / double(FPS), silence.data(), (OUTPUT_FREQUENCY / FPS));
}
}
- card->resampler->add_input_samples(unwrapped_timecode / 60.0, audio.data(), num_samples);
+ card->resampling_queue->add_input_samples(unwrapped_timecode / double(FPS), audio.data(), num_samples);
}
// Done with the audio, so release it.
card_copy[card_index].new_data_ready = card->new_data_ready;
card_copy[card_index].new_frame = card->new_frame;
card_copy[card_index].new_data_ready_fence = card->new_data_ready_fence;
- card_copy[card_index].new_frame_audio = move(card->new_frame_audio);
card_copy[card_index].dropped_frames = card->dropped_frames;
card->new_data_ready = false;
card->new_data_ready_changed.notify_all();
if (frame_num != card_copy[0].dropped_frames) {
// For dropped frames, increase the pts.
++dropped_frames;
- pts_int += TIMEBASE / 60;
+ pts_int += TIMEBASE / FPS;
}
}
double loudness_range_high = r128.range_max();
audio_level_callback(loudness_s, 20.0 * log10(peak),
- loudness_i, loudness_range_low, loudness_range_high);
+ loudness_i, loudness_range_low, loudness_range_high,
+ last_gain_staging_db);
}
for (unsigned card_index = 1; card_index < num_cards; ++card_index) {
// just increase the pts (skipping over this frame) and don't try to compute anything new.
if (card_copy[0].new_frame->len == 0) {
++dropped_frames;
- pts_int += TIMEBASE / 60;
+ pts_int += TIMEBASE / FPS;
continue;
}
for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
input_frames.push_back(bmusb_current_rendering_frame[card_index]);
}
- const int64_t av_delay = TIMEBASE / 10; // Corresponds to the fixed delay in resampler.h. TODO: Make less hard-coded.
+ const int64_t av_delay = TIMEBASE / 10; // Corresponds to the fixed delay in resampling_queue.h. TODO: Make less hard-coded.
h264_encoder->end_frame(fence, pts_int + av_delay, input_frames);
++frame;
- pts_int += TIMEBASE / 60;
+ pts_int += TIMEBASE / FPS;
// The live frame just shows the RGBA texture we just rendered.
// It owns rgba_tex now.
void Mixer::process_audio_one_frame()
{
- // TODO: Allow using audio from the other card(s) as well.
+ vector<float> samples_card;
vector<float> samples_out;
for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
- samples_out.resize((48000 / 60) * 2);
+ samples_card.resize((OUTPUT_FREQUENCY / FPS) * 2);
{
unique_lock<mutex> lock(cards[card_index].audio_mutex);
- if (!cards[card_index].resampler->get_output_samples(pts(), &samples_out[0], 48000 / 60)) {
+ if (!cards[card_index].resampling_queue->get_output_samples(pts(), &samples_card[0], OUTPUT_FREQUENCY / FPS)) {
printf("Card %d reported previous underrun.\n", card_index);
}
}
+ // TODO: Allow using audio from the other card(s) as well.
if (card_index == 0) {
- vector<float> left, right;
- peak = std::max(peak, find_peak(samples_out));
- deinterleave_samples(samples_out, &left, &right);
- float *ptrs[] = { left.data(), right.data() };
- r128.process(left.size(), ptrs);
- h264_encoder->add_audio(pts_int, move(samples_out));
+ samples_out = move(samples_card);
}
}
+
+ // Cut away everything under 150 Hz; we don't need it for voice,
+ // and it will reduce headroom and confuse the compressor.
+ // (In particular, any hums at 50 or 60 Hz should be dampened.)
+ locut.render(samples_out.data(), samples_out.size() / 2, locut_cutoff_hz * 2.0 * M_PI / OUTPUT_FREQUENCY, 0.5f);
+
+ // Apply a level compressor to get the general level right.
+ // Basically, if it's over about -40 dBFS, we squeeze it down to that level
+ // (or more precisely, near it, since we don't use infinite ratio),
+ // then apply a makeup gain to get it to -14 dBFS. -14 dBFS is, of course,
+ // entirely arbitrary, but from practical tests with speech, it seems to
+ // put ut around -23 LUFS, so it's a reasonable starting point for later use.
+ float ref_level_dbfs = -14.0f;
+ {
+ float threshold = 0.01f; // -40 dBFS.
+ float ratio = 20.0f;
+ float attack_time = 0.5f;
+ float release_time = 20.0f;
+ float makeup_gain = pow(10.0f, (ref_level_dbfs - (-40.0f)) / 20.0f); // +26 dB.
+ level_compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
+ last_gain_staging_db = 20.0 * log10(level_compressor.get_attenuation() * makeup_gain);
+ }
+
+#if 0
+ printf("level=%f (%+5.2f dBFS) attenuation=%f (%+5.2f dB) end_result=%+5.2f dB\n",
+ level_compressor.get_level(), 20.0 * log10(level_compressor.get_level()),
+ level_compressor.get_attenuation(), 20.0 * log10(level_compressor.get_attenuation()),
+ 20.0 * log10(level_compressor.get_level() * level_compressor.get_attenuation() * makeup_gain));
+#endif
+
+// float limiter_att, compressor_att;
+
+ // The real compressor.
+ if (compressor_enabled) {
+ float threshold = pow(10.0f, compressor_threshold_dbfs / 20.0f);
+ float ratio = 20.0f;
+ float attack_time = 0.005f;
+ float release_time = 0.040f;
+ float makeup_gain = 2.0f; // +6 dB.
+ compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
+// compressor_att = compressor.get_attenuation();
+ }
+
+ // Finally a limiter at -4 dB (so, -10 dBFS) to take out the worst peaks only.
+ // Note that since ratio is not infinite, we could go slightly higher than this.
+ if (limiter_enabled) {
+ float threshold = pow(10.0f, limiter_threshold_dbfs / 20.0f);
+ float ratio = 30.0f;
+ float attack_time = 0.0f; // Instant.
+ float release_time = 0.020f;
+ float makeup_gain = 1.0f; // 0 dB.
+ limiter.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
+// limiter_att = limiter.get_attenuation();
+ }
+
+// printf("limiter=%+5.1f compressor=%+5.1f\n", 20.0*log10(limiter_att), 20.0*log10(compressor_att));
+
+ // Upsample 4x to find interpolated peak.
+ peak_resampler.inp_data = samples_out.data();
+ peak_resampler.inp_count = samples_out.size() / 2;
+
+ vector<float> interpolated_samples_out;
+ interpolated_samples_out.resize(samples_out.size());
+ while (peak_resampler.inp_count > 0) { // About four iterations.
+ peak_resampler.out_data = &interpolated_samples_out[0];
+ peak_resampler.out_count = interpolated_samples_out.size() / 2;
+ peak_resampler.process();
+ size_t out_stereo_samples = interpolated_samples_out.size() / 2 - peak_resampler.out_count;
+ peak = max<float>(peak, find_peak(interpolated_samples_out.data(), out_stereo_samples * 2));
+ }
+
+ // Find R128 levels.
+ vector<float> left, right;
+ deinterleave_samples(samples_out, &left, &right);
+ float *ptrs[] = { left.data(), right.data() };
+ r128.process(left.size(), ptrs);
+
+ // Send the samples to the sound card.
+ if (alsa) {
+ alsa->write(samples_out);
+ }
+
+ // And finally add them to the output.
+ h264_encoder->add_audio(pts_int, move(samples_out));
}
void Mixer::subsample_chroma(GLuint src_tex, GLuint dst_tex)
theme->channel_clicked(preview_num);
}
+void Mixer::reset_meters()
+{
+ peak_resampler.reset();
+ peak = 0.0f;
+ r128.reset();
+ r128.integr_start();
+}
+
Mixer::OutputChannel::~OutputChannel()
{
if (has_current_frame) {