X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=mixer.cpp;h=72022026f771056e0211ccac874e69c8ee0ed22a;hb=65d716be70e6295628dfa5bb0a72f3429b9696ba;hp=57cd5bdaf2d30daeabc370bffdffaa7703239ef7;hpb=2464152a8c084f39b52e063da888a6a0b7ea9306;p=nageru diff --git a/mixer.cpp b/mixer.cpp index 57cd5bd..7202202 100644 --- a/mixer.cpp +++ b/mixer.cpp @@ -59,10 +59,42 @@ void convert_fixed24_to_fp32(float *dst, size_t out_channels, const uint8_t *src } } +void insert_new_frame(RefCountedFrame frame, unsigned field_num, bool interlaced, unsigned card_index, InputState *input_state) +{ + if (interlaced) { + for (unsigned frame_num = FRAME_HISTORY_LENGTH; frame_num --> 1; ) { // :-) + input_state->buffered_frames[card_index][frame_num] = + input_state->buffered_frames[card_index][frame_num - 1]; + } + input_state->buffered_frames[card_index][0] = { frame, field_num }; + } else { + for (unsigned frame_num = 0; frame_num < FRAME_HISTORY_LENGTH; ++frame_num) { + input_state->buffered_frames[card_index][frame_num] = { frame, field_num }; + } + } +} + +string generate_local_dump_filename(int frame) +{ + time_t now = time(NULL); + tm now_tm; + localtime_r(&now, &now_tm); + + char timestamp[256]; + strftime(timestamp, sizeof(timestamp), "%F-%T%z", &now_tm); + + // Use the frame number to disambiguate between two cuts starting + // on the same second. + char filename[256]; + snprintf(filename, sizeof(filename), "%s%s-f%02d%s", + LOCAL_DUMP_PREFIX, timestamp, frame % 100, LOCAL_DUMP_SUFFIX); + return filename; +} + } // namespace Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) - : httpd(LOCAL_DUMP_FILE_NAME, WIDTH, HEIGHT), + : httpd(WIDTH, HEIGHT), num_cards(num_cards), mixer_surface(create_surface(format)), h264_encoder_surface(create_surface(format)), @@ -70,6 +102,7 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) limiter(OUTPUT_FREQUENCY), compressor(OUTPUT_FREQUENCY) { + httpd.open_output_file(generate_local_dump_filename(/*frame=*/0).c_str()); httpd.start(9095); CHECK(init_movit(MOVIT_SHADER_DIR, MOVIT_DEBUG_OFF)); @@ -130,8 +163,6 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) cards[card_index].usb->start_bm_capture(); } - //chain->enable_phase_timing(true); - // Set up stuff for NV12 conversion. // Cb/Cr shader. @@ -143,7 +174,8 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) "void main() { \n" " gl_FragColor = texture2D(cbcr_tex, tc0); \n" "} \n"; - cbcr_program_num = resource_pool->compile_glsl_program(cbcr_vert_shader, cbcr_frag_shader); + vector frag_shader_outputs; + cbcr_program_num = resource_pool->compile_glsl_program(cbcr_vert_shader, cbcr_frag_shader, frag_shader_outputs); r128.init(2, OUTPUT_FREQUENCY); r128.integr_start(); @@ -190,7 +222,7 @@ float find_peak(const float *samples, size_t num_samples) { float m = fabs(samples[0]); for (size_t i = 1; i < num_samples; ++i) { - m = std::max(m, fabs(samples[i])); + m = max(m, fabs(samples[i])); } return m; } @@ -298,11 +330,12 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode, if (card->should_quit) return; } + size_t expected_length = width * (height + extra_lines_top + extra_lines_bottom) * 2; if (video_frame.len - video_offset == 0 || - video_frame.len - video_offset != size_t(width * (height + extra_lines_top + extra_lines_bottom) * 2)) { + video_frame.len - video_offset != expected_length) { if (video_frame.len != 0) { - printf("Card %d: Dropping video frame with wrong length (%ld)\n", - card_index, video_frame.len - video_offset); + printf("Card %d: Dropping video frame with wrong length (%ld; expected %ld)\n", + card_index, video_frame.len - video_offset, expected_length); } if (video_frame.owner) { video_frame.owner->release_frame(video_frame); @@ -328,9 +361,8 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode, unsigned num_fields = interlaced ? 2 : 1; timespec frame_upload_start; if (interlaced) { - // NOTE: This isn't deinterlacing. This is just sending the two fields along - // as separate frames without considering anything like the half-field offset. - // We'll need to add a proper deinterlacer on the receiving side to get this right. + // Send the two fields along as separate frames; the other side will need to add + // a deinterlacer to actually get this right. assert(height % 2 == 0); height /= 2; assert(frame_length % 2 == 0); @@ -338,6 +370,9 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode, num_fields = 2; clock_gettime(CLOCK_MONOTONIC, &frame_upload_start); } + userdata->last_interlaced = interlaced; + userdata->last_frame_rate_nom = frame_rate_nom; + userdata->last_frame_rate_den = frame_rate_den; RefCountedFrame new_frame(video_frame); // Upload the textures. @@ -371,10 +406,8 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode, check_error(); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo); check_error(); - glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, 0, video_frame.size); + glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT); check_error(); - //glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT); - //check_error(); glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]); check_error(); @@ -493,6 +526,7 @@ void Mixer::thread_func() } if (audio_level_callback != nullptr) { + unique_lock lock(compressor_mutex); double loudness_s = r128.loudness_S(); double loudness_i = r128.integrated(); double loudness_range_low = r128.range_min(); @@ -500,7 +534,7 @@ void Mixer::thread_func() audio_level_callback(loudness_s, 20.0 * log10(peak), loudness_i, loudness_range_low, loudness_range_high, - last_gain_staging_db); + gain_staging_db); } for (unsigned card_index = 1; card_index < num_cards; ++card_index) { @@ -527,16 +561,7 @@ void Mixer::thread_func() continue; assert(card->new_frame != nullptr); - if (card->new_frame_interlaced) { - for (unsigned frame_num = FRAME_HISTORY_LENGTH; frame_num --> 1; ) { // :-) - buffered_frames[card_index][frame_num] = buffered_frames[card_index][frame_num - 1]; - } - buffered_frames[card_index][0] = { card->new_frame, card->new_frame_field }; - } else { - for (unsigned frame_num = 0; frame_num < FRAME_HISTORY_LENGTH; ++frame_num) { - buffered_frames[card_index][frame_num] = { card->new_frame, card->new_frame_field }; - } - } + insert_new_frame(card->new_frame, card->new_frame_field, card->new_frame_interlaced, card_index, &input_state); check_error(); // The new texture might still be uploaded, @@ -550,9 +575,10 @@ void Mixer::thread_func() } // Get the main chain from the theme, and set its state immediately. - Theme::Chain theme_main_chain = theme->get_chain(0, pts(), WIDTH, HEIGHT); + Theme::Chain theme_main_chain = theme->get_chain(0, pts(), WIDTH, HEIGHT, input_state); EffectChain *chain = theme_main_chain.chain; theme_main_chain.setup_chain(); + //theme_main_chain.chain->enable_phase_timing(true); GLuint y_tex, cbcr_tex; bool got_frame = h264_encoder->begin_frame(&y_tex, &cbcr_tex); @@ -599,7 +625,7 @@ void Mixer::thread_func() // Set up preview and any additional channels. for (int i = 1; i < theme->get_num_channels() + 2; ++i) { DisplayFrame display_frame; - Theme::Chain chain = theme->get_chain(i, pts(), WIDTH, HEIGHT); // FIXME: dimensions + Theme::Chain chain = theme->get_chain(i, pts(), WIDTH, HEIGHT, input_state); // FIXME: dimensions display_frame.chain = chain.chain; display_frame.setup_chain = chain.setup_chain; display_frame.ready_fence = fence; @@ -618,6 +644,15 @@ void Mixer::thread_func() // chain->print_phase_timing(); } + if (should_cut.exchange(false)) { // Test and clear. + string filename = generate_local_dump_filename(frame); + printf("Starting new recording: %s\n", filename.c_str()); + h264_encoder->shutdown(); + httpd.close_output_file(); + httpd.open_output_file(filename.c_str()); + h264_encoder.reset(new H264Encoder(h264_encoder_surface, WIDTH, HEIGHT, &httpd)); + } + #if 0 // Reset every 100 frames, so that local variations in frame times // (especially for the first few frames, when the shaders are @@ -680,15 +715,23 @@ void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples) // then apply a makeup gain to get it to -14 dBFS. -14 dBFS is, of course, // entirely arbitrary, but from practical tests with speech, it seems to // put ut around -23 LUFS, so it's a reasonable starting point for later use. - float ref_level_dbfs = -14.0f; { - float threshold = 0.01f; // -40 dBFS. - float ratio = 20.0f; - float attack_time = 0.5f; - float release_time = 20.0f; - float makeup_gain = pow(10.0f, (ref_level_dbfs - (-40.0f)) / 20.0f); // +26 dB. - level_compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain); - last_gain_staging_db = 20.0 * log10(level_compressor.get_attenuation() * makeup_gain); + unique_lock lock(level_compressor_mutex); + if (level_compressor_enabled) { + float threshold = 0.01f; // -40 dBFS. + float ratio = 20.0f; + float attack_time = 0.5f; + float release_time = 20.0f; + float makeup_gain = pow(10.0f, (ref_level_dbfs - (-40.0f)) / 20.0f); // +26 dB. + level_compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain); + gain_staging_db = 20.0 * log10(level_compressor.get_attenuation() * makeup_gain); + } else { + // Just apply the gain we already had. + float g = pow(10.0f, gain_staging_db / 20.0f); + for (size_t i = 0; i < samples_out.size(); ++i) { + samples_out[i] *= g; + } + } } #if 0 @@ -743,7 +786,10 @@ void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples) vector left, right; deinterleave_samples(samples_out, &left, &right); float *ptrs[] = { left.data(), right.data() }; - r128.process(left.size(), ptrs); + { + unique_lock lock(compressor_mutex); + r128.process(left.size(), ptrs); + } // Send the samples to the sound card. if (alsa) {