#include <movit/flat_input.h>
#include <movit/image_format.h>
#include <movit/resource_pool.h>
+#include <movit/util.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
-#include <util.h>
#include <algorithm>
#include <cmath>
#include <condition_variable>
}
}
+void insert_new_frame(RefCountedFrame frame, unsigned field_num, bool interlaced, unsigned card_index, InputState *input_state)
+{
+ if (interlaced) {
+ for (unsigned frame_num = FRAME_HISTORY_LENGTH; frame_num --> 1; ) { // :-)
+ input_state->buffered_frames[card_index][frame_num] =
+ input_state->buffered_frames[card_index][frame_num - 1];
+ }
+ input_state->buffered_frames[card_index][0] = { frame, field_num };
+ } else {
+ for (unsigned frame_num = 0; frame_num < FRAME_HISTORY_LENGTH; ++frame_num) {
+ input_state->buffered_frames[card_index][frame_num] = { frame, field_num };
+ }
+ }
+}
+
+string generate_local_dump_filename(int frame)
+{
+ time_t now = time(NULL);
+ tm now_tm;
+ localtime_r(&now, &now_tm);
+
+ char timestamp[256];
+ strftime(timestamp, sizeof(timestamp), "%F-%T%z", &now_tm);
+
+ // Use the frame number to disambiguate between two cuts starting
+ // on the same second.
+ char filename[256];
+ snprintf(filename, sizeof(filename), "%s%s-f%02d%s",
+ LOCAL_DUMP_PREFIX, timestamp, frame % 100, LOCAL_DUMP_SUFFIX);
+ return filename;
+}
+
} // namespace
Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
- : httpd(LOCAL_DUMP_FILE_NAME, WIDTH, HEIGHT),
+ : httpd(WIDTH, HEIGHT),
num_cards(num_cards),
mixer_surface(create_surface(format)),
h264_encoder_surface(create_surface(format)),
+ correlation(OUTPUT_FREQUENCY),
level_compressor(OUTPUT_FREQUENCY),
limiter(OUTPUT_FREQUENCY),
compressor(OUTPUT_FREQUENCY)
{
+ httpd.open_output_file(generate_local_dump_filename(/*frame=*/0).c_str());
httpd.start(9095);
CHECK(init_movit(MOVIT_SHADER_DIR, MOVIT_DEBUG_OFF));
cards[card_index].usb->start_bm_capture();
}
- //chain->enable_phase_timing(true);
-
// Set up stuff for NV12 conversion.
// Cb/Cr shader.
"void main() { \n"
" gl_FragColor = texture2D(cbcr_tex, tc0); \n"
"} \n";
- cbcr_program_num = resource_pool->compile_glsl_program(cbcr_vert_shader, cbcr_frag_shader);
+ vector<string> frag_shader_outputs;
+ cbcr_program_num = resource_pool->compile_glsl_program(cbcr_vert_shader, cbcr_frag_shader, frag_shader_outputs);
r128.init(2, OUTPUT_FREQUENCY);
r128.integr_start();
// hlen=16 is pretty low quality, but we use quite a bit of CPU otherwise,
// and there's a limit to how important the peak meter is.
- peak_resampler.setup(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY * 4, /*num_channels=*/2, /*hlen=*/16);
+ peak_resampler.setup(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY * 4, /*num_channels=*/2, /*hlen=*/16, /*frel=*/1.0);
alsa.reset(new ALSAOutput(OUTPUT_FREQUENCY, /*num_channels=*/2));
}
{
float m = fabs(samples[0]);
for (size_t i = 1; i < num_samples; ++i) {
- m = std::max(m, fabs(samples[i]));
+ m = max(m, fabs(samples[i]));
}
return m;
}
decode_video_format(video_format, &width, &height, &second_field_start, &extra_lines_top, &extra_lines_bottom,
&frame_rate_nom, &frame_rate_den, &interlaced); // Ignore return value for now.
- int64_t frame_length = TIMEBASE * frame_rate_den / frame_rate_nom;
+ int64_t frame_length = int64_t(TIMEBASE * frame_rate_den) / frame_rate_nom;
size_t num_samples = (audio_frame.len >= audio_offset) ? (audio_frame.len - audio_offset) / 8 / 3 : 0;
if (num_samples > OUTPUT_FREQUENCY / 10) {
// Insert silence as needed.
fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
card_index, dropped_frames, timecode);
- vector<float> silence;
- silence.resize(silence_samples * 2);
+ vector<float> silence(silence_samples * 2, 0.0f);
for (int i = 0; i < dropped_frames; ++i) {
card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), silence.data(), silence_samples);
// Note that if the format changed in the meantime, we have
if (card->should_quit) return;
}
+ size_t expected_length = width * (height + extra_lines_top + extra_lines_bottom) * 2;
if (video_frame.len - video_offset == 0 ||
- video_frame.len - video_offset != size_t(width * (height + extra_lines_top + extra_lines_bottom) * 2)) {
+ video_frame.len - video_offset != expected_length) {
if (video_frame.len != 0) {
- printf("Card %d: Dropping video frame with wrong length (%ld)\n",
- card_index, video_frame.len - video_offset);
+ printf("Card %d: Dropping video frame with wrong length (%ld; expected %ld)\n",
+ card_index, video_frame.len - video_offset, expected_length);
}
if (video_frame.owner) {
video_frame.owner->release_frame(video_frame);
card->new_data_ready = true;
card->new_frame = RefCountedFrame(FrameAllocator::Frame());
card->new_frame_length = frame_length;
+ card->new_frame_interlaced = false;
card->new_data_ready_fence = nullptr;
card->dropped_frames = dropped_frames;
card->new_data_ready_changed.notify_all();
unsigned num_fields = interlaced ? 2 : 1;
timespec frame_upload_start;
if (interlaced) {
- // NOTE: This isn't deinterlacing. This is just sending the two fields along
- // as separate frames without considering anything like the half-field offset.
- // We'll need to add a proper deinterlacer on the receiving side to get this right.
+ // Send the two fields along as separate frames; the other side will need to add
+ // a deinterlacer to actually get this right.
assert(height % 2 == 0);
height /= 2;
assert(frame_length % 2 == 0);
num_fields = 2;
clock_gettime(CLOCK_MONOTONIC, &frame_upload_start);
}
+ userdata->last_interlaced = interlaced;
+ userdata->last_frame_rate_nom = frame_rate_nom;
+ userdata->last_frame_rate_den = frame_rate_den;
RefCountedFrame new_frame(video_frame);
// Upload the textures.
check_error();
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
check_error();
- glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, 0, video_frame.size);
+ glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
check_error();
- //glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
- //check_error();
glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]);
check_error();
card->new_frame = new_frame;
card->new_frame_length = frame_length;
card->new_frame_field = field;
+ card->new_frame_interlaced = interlaced;
card->new_data_ready_fence = fence;
card->dropped_frames = dropped_frames;
card->new_data_ready_changed.notify_all();
card_copy[card_index].new_frame = card->new_frame;
card_copy[card_index].new_frame_length = card->new_frame_length;
card_copy[card_index].new_frame_field = card->new_frame_field;
+ card_copy[card_index].new_frame_interlaced = card->new_frame_interlaced;
card_copy[card_index].new_data_ready_fence = card->new_data_ready_fence;
card_copy[card_index].dropped_frames = card->dropped_frames;
card->new_data_ready = false;
}
// Resample the audio as needed, including from previously dropped frames.
+ assert(num_cards > 0);
for (unsigned frame_num = 0; frame_num < card_copy[0].dropped_frames + 1; ++frame_num) {
{
// Signal to the audio thread to process this frame.
}
if (audio_level_callback != nullptr) {
+ unique_lock<mutex> lock(compressor_mutex);
double loudness_s = r128.loudness_S();
double loudness_i = r128.integrated();
double loudness_range_low = r128.range_min();
audio_level_callback(loudness_s, 20.0 * log10(peak),
loudness_i, loudness_range_low, loudness_range_high,
- last_gain_staging_db);
+ gain_staging_db, 20.0 * log10(final_makeup_gain),
+ correlation.get_correlation());
}
for (unsigned card_index = 1; card_index < num_cards; ++card_index) {
continue;
assert(card->new_frame != nullptr);
- if (card->new_frame_interlaced) {
- for (unsigned frame_num = FRAME_HISTORY_LENGTH; frame_num --> 1; ) { // :-)
- buffered_frames[card_index][frame_num] = buffered_frames[card_index][frame_num - 1];
- }
- buffered_frames[card_index][0] = { card->new_frame, card->new_frame_field };
- } else {
- for (unsigned frame_num = 0; frame_num < FRAME_HISTORY_LENGTH; ++frame_num) {
- buffered_frames[card_index][frame_num] = { card->new_frame, card->new_frame_field };
- }
- }
+ insert_new_frame(card->new_frame, card->new_frame_field, card->new_frame_interlaced, card_index, &input_state);
check_error();
// The new texture might still be uploaded,
}
// Get the main chain from the theme, and set its state immediately.
- Theme::Chain theme_main_chain = theme->get_chain(0, pts(), WIDTH, HEIGHT);
+ Theme::Chain theme_main_chain = theme->get_chain(0, pts(), WIDTH, HEIGHT, input_state);
EffectChain *chain = theme_main_chain.chain;
theme_main_chain.setup_chain();
+ //theme_main_chain.chain->enable_phase_timing(true);
GLuint y_tex, cbcr_tex;
bool got_frame = h264_encoder->begin_frame(&y_tex, &cbcr_tex);
// Set up preview and any additional channels.
for (int i = 1; i < theme->get_num_channels() + 2; ++i) {
DisplayFrame display_frame;
- Theme::Chain chain = theme->get_chain(i, pts(), WIDTH, HEIGHT); // FIXME: dimensions
+ Theme::Chain chain = theme->get_chain(i, pts(), WIDTH, HEIGHT, input_state); // FIXME: dimensions
display_frame.chain = chain.chain;
display_frame.setup_chain = chain.setup_chain;
display_frame.ready_fence = fence;
// chain->print_phase_timing();
}
+ if (should_cut.exchange(false)) { // Test and clear.
+ string filename = generate_local_dump_filename(frame);
+ printf("Starting new recording: %s\n", filename.c_str());
+ h264_encoder->shutdown();
+ httpd.close_output_file();
+ httpd.open_output_file(filename.c_str());
+ h264_encoder.reset(new H264Encoder(h264_encoder_surface, WIDTH, HEIGHT, &httpd));
+ }
+
#if 0
// Reset every 100 frames, so that local variations in frame times
// (especially for the first few frames, when the shaders are
// we don't need it for voice, and it will reduce headroom
// and confuse the compressor. (In particular, any hums at 50 or 60 Hz
// should be dampened.)
- locut.render(samples_out.data(), samples_out.size() / 2, locut_cutoff_hz * 2.0 * M_PI / OUTPUT_FREQUENCY, 0.5f);
+ if (locut_enabled) {
+ locut.render(samples_out.data(), samples_out.size() / 2, locut_cutoff_hz * 2.0 * M_PI / OUTPUT_FREQUENCY, 0.5f);
+ }
// Apply a level compressor to get the general level right.
// Basically, if it's over about -40 dBFS, we squeeze it down to that level
// then apply a makeup gain to get it to -14 dBFS. -14 dBFS is, of course,
// entirely arbitrary, but from practical tests with speech, it seems to
// put ut around -23 LUFS, so it's a reasonable starting point for later use.
- float ref_level_dbfs = -14.0f;
{
- float threshold = 0.01f; // -40 dBFS.
- float ratio = 20.0f;
- float attack_time = 0.5f;
- float release_time = 20.0f;
- float makeup_gain = pow(10.0f, (ref_level_dbfs - (-40.0f)) / 20.0f); // +26 dB.
- level_compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
- last_gain_staging_db = 20.0 * log10(level_compressor.get_attenuation() * makeup_gain);
+ unique_lock<mutex> lock(compressor_mutex);
+ if (level_compressor_enabled) {
+ float threshold = 0.01f; // -40 dBFS.
+ float ratio = 20.0f;
+ float attack_time = 0.5f;
+ float release_time = 20.0f;
+ float makeup_gain = pow(10.0f, (ref_level_dbfs - (-40.0f)) / 20.0f); // +26 dB.
+ level_compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
+ gain_staging_db = 20.0 * log10(level_compressor.get_attenuation() * makeup_gain);
+ } else {
+ // Just apply the gain we already had.
+ float g = pow(10.0f, gain_staging_db / 20.0f);
+ for (size_t i = 0; i < samples_out.size(); ++i) {
+ samples_out[i] *= g;
+ }
+ }
}
#if 0
peak_resampler.process();
size_t out_stereo_samples = interpolated_samples_out.size() / 2 - peak_resampler.out_count;
peak = max<float>(peak, find_peak(interpolated_samples_out.data(), out_stereo_samples * 2));
+ peak_resampler.out_data = nullptr;
+ }
+
+ // At this point, we are most likely close to +0 LU, but all of our
+ // measurements have been on raw sample values, not R128 values.
+ // So we have a final makeup gain to get us to +0 LU; the gain
+ // adjustments required should be relatively small, and also, the
+ // offset shouldn't change much (only if the type of audio changes
+ // significantly). Thus, we shoot for updating this value basically
+ // “whenever we process buffers”, since the R128 calculation isn't exactly
+ // something we get out per-sample.
+ //
+ // Note that there's a feedback loop here, so we choose a very slow filter
+ // (half-time of 100 seconds).
+ double target_loudness_factor, alpha;
+ {
+ unique_lock<mutex> lock(compressor_mutex);
+ double loudness_lu = r128.loudness_M() - ref_level_lufs;
+ double current_makeup_lu = 20.0f * log10(final_makeup_gain);
+ target_loudness_factor = pow(10.0f, -loudness_lu / 20.0f);
+
+ // If we're outside +/- 5 LU uncorrected, we don't count it as
+ // a normal signal (probably silence) and don't change the
+ // correction factor; just apply what we already have.
+ if (fabs(loudness_lu - current_makeup_lu) >= 5.0 || !final_makeup_gain_auto) {
+ alpha = 0.0;
+ } else {
+ // Formula adapted from
+ // https://en.wikipedia.org/wiki/Low-pass_filter#Simple_infinite_impulse_response_filter.
+ const double half_time_s = 100.0;
+ const double fc_mul_2pi_delta_t = 1.0 / (half_time_s * OUTPUT_FREQUENCY);
+ alpha = fc_mul_2pi_delta_t / (fc_mul_2pi_delta_t + 1.0);
+ }
+
+ double m = final_makeup_gain;
+ for (size_t i = 0; i < samples_out.size(); i += 2) {
+ samples_out[i + 0] *= m;
+ samples_out[i + 1] *= m;
+ m += (target_loudness_factor - m) * alpha;
+ }
+ final_makeup_gain = m;
}
- // Find R128 levels.
+ // Find R128 levels and L/R correlation.
vector<float> left, right;
deinterleave_samples(samples_out, &left, &right);
float *ptrs[] = { left.data(), right.data() };
- r128.process(left.size(), ptrs);
+ {
+ unique_lock<mutex> lock(compressor_mutex);
+ r128.process(left.size(), ptrs);
+ correlation.process_samples(samples_out);
+ }
// Send the samples to the sound card.
if (alsa) {
peak = 0.0f;
r128.reset();
r128.integr_start();
+ correlation.reset();
}
Mixer::OutputChannel::~OutputChannel()