X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=mixer.cpp;h=db5980d2d79bbdf30d3b005bd3394644120309a3;hb=f4652550f7952d935640ca97bcd7bdbaa9820762;hp=506af4aeabb95fa8d554cc32baf80dfe5c61b3ff;hpb=fae8d2ae053d580ad27a7a0bd71031e3df8f618f;p=nageru diff --git a/mixer.cpp b/mixer.cpp index 506af4a..db5980d 100644 --- a/mixer.cpp +++ b/mixer.cpp @@ -1,5 +1,3 @@ -#define WIDTH 1280 -#define HEIGHT 720 #define EXTRAHEIGHT 30 #undef Success @@ -66,11 +64,13 @@ void convert_fixed24_to_fp32(float *dst, size_t out_channels, const uint8_t *src } // namespace Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) - : httpd("test.ts", WIDTH, HEIGHT), + : httpd(LOCAL_DUMP_FILE_NAME, WIDTH, HEIGHT), num_cards(num_cards), mixer_surface(create_surface(format)), h264_encoder_surface(create_surface(format)), - level_compressor(OUTPUT_FREQUENCY) + level_compressor(OUTPUT_FREQUENCY), + limiter(OUTPUT_FREQUENCY), + compressor(OUTPUT_FREQUENCY) { httpd.start(9095); @@ -107,7 +107,7 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) CaptureCard *card = &cards[card_index]; card->usb = new BMUSBCapture(card_index); card->usb->set_frame_callback(bind(&Mixer::bm_frame, this, card_index, _1, _2, _3, _4, _5, _6, _7)); - card->frame_allocator.reset(new PBOFrameAllocator(WIDTH * (HEIGHT+EXTRAHEIGHT) * 2 + 44, WIDTH, HEIGHT)); + card->frame_allocator.reset(new PBOFrameAllocator(WIDTH * (HEIGHT+EXTRAHEIGHT) * 2 + 44 + 1, WIDTH, HEIGHT)); card->usb->set_video_frame_allocator(card->frame_allocator.get()); card->surface = create_surface(format); card->usb->set_dequeue_thread_callbacks( @@ -122,7 +122,7 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) [this]{ resource_pool->clean_context(); }); - card->resampler.reset(new Resampler(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2)); + card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2)); card->usb->configure_card(); } @@ -151,6 +151,12 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards) r128.integr_start(); locut.init(FILTER_HPF, 2); + + // hlen=16 is pretty low quality, but we use quite a bit of CPU otherwise, + // and there's a limit to how important the peak meter is. + peak_resampler.setup(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY * 4, /*num_channels=*/2, /*hlen=*/16); + + alsa.reset(new ALSAOutput(OUTPUT_FREQUENCY, /*num_channels=*/2)); } Mixer::~Mixer() @@ -166,6 +172,8 @@ Mixer::~Mixer() } cards[card_index].usb->stop_dequeue_thread(); } + + h264_encoder.reset(nullptr); } namespace { @@ -180,10 +188,10 @@ int unwrap_timecode(uint16_t current_wrapped, int last) } } -float find_peak(const vector &samples) +float find_peak(const float *samples, size_t num_samples) { float m = fabs(samples[0]); - for (size_t i = 1; i < samples.size(); ++i) { + for (size_t i = 1; i < num_samples; ++i) { m = std::max(m, fabs(samples[i])); } return m; @@ -204,6 +212,57 @@ void deinterleave_samples(const vector &in, vector *out_l, vector< } } +// Returns length of a frame with the given format, in TIMEBASE units. +int64_t find_frame_length(uint16_t video_format) +{ + if (video_format == 0x0800) { + // No video signal. These green pseudo-frames seem to come at about 30.13 Hz. + // It's a strange thing, but what can you do. + return TIMEBASE * 100 / 3013; + } + if ((video_format & 0xe800) != 0xe800) { + printf("Video format 0x%04x does not appear to be a video format. Assuming 60 Hz.\n", + video_format); + return TIMEBASE / 60; + } + + // 0x8 seems to be a flag about availability of deep color on the input, + // except when it's not (e.g. it's the only difference between NTSC 23.98 + // and PAL). Rather confusing. But we clear it here nevertheless, because + // usually it doesn't mean anything. + // + // We don't really handle interlaced formats at all yet. + uint16_t normalized_video_format = video_format & ~0xe808; + if (normalized_video_format == 0x0143) { // 720p50. + return TIMEBASE / 50; + } else if (normalized_video_format == 0x0103) { // 720p60. + return TIMEBASE / 60; + } else if (normalized_video_format == 0x0121) { // 720p59.94. + return TIMEBASE * 1001 / 60000; + } else if (normalized_video_format == 0x01c3 || // 1080p30. + normalized_video_format == 0x0003) { // 1080i60. + return TIMEBASE / 30; + } else if (normalized_video_format == 0x01e1 || // 1080p29.97. + normalized_video_format == 0x0021 || // 1080i59.94. + video_format == 0xe901 || // NTSC (480i59.94, I suppose). + video_format == 0xe9c1 || // Ditto. + video_format == 0xe801) { // Ditto. + return TIMEBASE * 1001 / 30000; + } else if (normalized_video_format == 0x0063 || // 1080p25. + normalized_video_format == 0x0043 || // 1080i50. + video_format == 0xe909) { // PAL (576i50, I suppose). + return TIMEBASE / 25; + } else if (normalized_video_format == 0x008e) { // 1080p24. + return TIMEBASE / 24; + } else if (normalized_video_format == 0x00a1) { // 1080p23.98. + return TIMEBASE * 1001 / 24000; + return TIMEBASE / 25; + } else { + printf("Unknown video format 0x%04x. Assuming 60 Hz.\n", video_format); + return TIMEBASE / 60; + } +} + } // namespace void Mixer::bm_frame(unsigned card_index, uint16_t timecode, @@ -212,6 +271,8 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode, { CaptureCard *card = &cards[card_index]; + int64_t frame_length = find_frame_length(video_format); + if (audio_frame.len - audio_offset > 30000) { printf("Card %d: Dropping frame with implausible audio length (len=%d, offset=%d) [timecode=0x%04x video_len=%d video_offset=%d video_format=%x)\n", card_index, int(audio_frame.len), int(audio_offset), @@ -225,13 +286,12 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode, return; } - int unwrapped_timecode = timecode; + int64_t local_pts = card->next_local_pts; int dropped_frames = 0; if (card->last_timecode != -1) { - unwrapped_timecode = unwrap_timecode(unwrapped_timecode, card->last_timecode); - dropped_frames = unwrapped_timecode - card->last_timecode - 1; + dropped_frames = unwrap_timecode(timecode, card->last_timecode) - card->last_timecode - 1; } - card->last_timecode = unwrapped_timecode; + card->last_timecode = timecode; // Convert the audio to stereo fp32 and add it. size_t num_samples = (audio_frame.len >= audio_offset) ? (audio_frame.len - audio_offset) / 8 / 3 : 0; @@ -243,22 +303,27 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode, { unique_lock lock(card->audio_mutex); - int unwrapped_timecode = timecode; - if (dropped_frames > FPS * 2) { + if (dropped_frames > MAX_FPS * 2) { fprintf(stderr, "Card %d lost more than two seconds (or time code jumping around), resetting resampler\n", card_index); - card->resampler.reset(new Resampler(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2)); + card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2)); } else if (dropped_frames > 0) { - // Insert silence as needed. + // Insert silence as needed. (The number of samples could be nonintegral, + // but resampling will save us then.) fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n", card_index, dropped_frames, timecode); vector silence; - silence.resize((OUTPUT_FREQUENCY / FPS) * 2); + silence.resize((OUTPUT_FREQUENCY * frame_length / TIMEBASE) * 2); for (int i = 0; i < dropped_frames; ++i) { - card->resampler->add_input_samples((unwrapped_timecode - dropped_frames + i) / double(FPS), silence.data(), (OUTPUT_FREQUENCY / FPS)); + card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), silence.data(), silence.size() / 2); + // Note that if the format changed in the meantime, we have + // no way of detecting that; we just have to assume the frame length + // is always the same. + local_pts += frame_length; } } - card->resampler->add_input_samples(unwrapped_timecode / double(FPS), audio.data(), num_samples); + card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), audio.data(), num_samples); + card->next_local_pts = local_pts + frame_length; } // Done with the audio, so release it. @@ -288,6 +353,7 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode, unique_lock lock(bmusb_mutex); card->new_data_ready = true; card->new_frame = RefCountedFrame(FrameAllocator::Frame()); + card->new_frame_length = frame_length; card->new_data_ready_fence = nullptr; card->dropped_frames = dropped_frames; card->new_data_ready_changed.notify_all(); @@ -324,6 +390,7 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode, unique_lock lock(bmusb_mutex); card->new_data_ready = true; card->new_frame = RefCountedFrame(video_frame); + card->new_frame_length = frame_length; card->new_data_ready_fence = fence; card->dropped_frames = dropped_frames; card->new_data_ready_changed.notify_all(); @@ -347,6 +414,7 @@ void Mixer::thread_func() while (!should_quit) { CaptureCard card_copy[MAX_CARDS]; + int num_samples[MAX_CARDS]; { unique_lock lock(bmusb_mutex); @@ -360,20 +428,32 @@ void Mixer::thread_func() card_copy[card_index].usb = card->usb; card_copy[card_index].new_data_ready = card->new_data_ready; card_copy[card_index].new_frame = card->new_frame; + card_copy[card_index].new_frame_length = card->new_frame_length; card_copy[card_index].new_data_ready_fence = card->new_data_ready_fence; card_copy[card_index].dropped_frames = card->dropped_frames; card->new_data_ready = false; card->new_data_ready_changed.notify_all(); + + int num_samples_times_timebase = OUTPUT_FREQUENCY * card->new_frame_length + card->fractional_samples; + num_samples[card_index] = num_samples_times_timebase / TIMEBASE; + card->fractional_samples = num_samples_times_timebase % TIMEBASE; } } // Resample the audio as needed, including from previously dropped frames. for (unsigned frame_num = 0; frame_num < card_copy[0].dropped_frames + 1; ++frame_num) { - process_audio_one_frame(); + { + // Signal to the audio thread to process this frame. + unique_lock lock(audio_mutex); + audio_task_queue.push(AudioTask{pts_int, num_samples[0]}); + audio_task_queue_changed.notify_one(); + } if (frame_num != card_copy[0].dropped_frames) { - // For dropped frames, increase the pts. + // For dropped frames, increase the pts. Note that if the format changed + // in the meantime, we have no way of detecting that; we just have to + // assume the frame length is always the same. ++dropped_frames; - pts_int += TIMEBASE / FPS; + pts_int += card_copy[0].new_frame_length; } } @@ -402,7 +482,7 @@ void Mixer::thread_func() // just increase the pts (skipping over this frame) and don't try to compute anything new. if (card_copy[0].new_frame->len == 0) { ++dropped_frames; - pts_int += TIMEBASE / FPS; + pts_int += card_copy[0].new_frame_length; continue; } @@ -464,10 +544,10 @@ void Mixer::thread_func() for (unsigned card_index = 0; card_index < num_cards; ++card_index) { input_frames.push_back(bmusb_current_rendering_frame[card_index]); } - const int64_t av_delay = TIMEBASE / 10; // Corresponds to the fixed delay in resampler.h. TODO: Make less hard-coded. + const int64_t av_delay = TIMEBASE / 10; // Corresponds to the fixed delay in resampling_queue.h. TODO: Make less hard-coded. h264_encoder->end_frame(fence, pts_int + av_delay, input_frames); ++frame; - pts_int += TIMEBASE / FPS; + pts_int += card_copy[0].new_frame_length; // The live frame just shows the RGBA texture we just rendered. // It owns rgba_tex now. @@ -523,15 +603,31 @@ void Mixer::thread_func() resource_pool->clean_context(); } -void Mixer::process_audio_one_frame() +void Mixer::audio_thread_func() +{ + while (!should_quit) { + AudioTask task; + + { + unique_lock lock(audio_mutex); + audio_task_queue_changed.wait(lock, [this]{ return !audio_task_queue.empty(); }); + task = audio_task_queue.front(); + audio_task_queue.pop(); + } + + process_audio_one_frame(task.pts_int, task.num_samples); + } +} + +void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples) { vector samples_card; vector samples_out; for (unsigned card_index = 0; card_index < num_cards; ++card_index) { - samples_card.resize((OUTPUT_FREQUENCY / FPS) * 2); + samples_card.resize(num_samples * 2); { unique_lock lock(cards[card_index].audio_mutex); - if (!cards[card_index].resampler->get_output_samples(pts(), &samples_card[0], OUTPUT_FREQUENCY / FPS)) { + if (!cards[card_index].resampling_queue->get_output_samples(double(frame_pts_int) / TIMEBASE, &samples_card[0], num_samples)) { printf("Card %d reported previous underrun.\n", card_index); } } @@ -541,10 +637,11 @@ void Mixer::process_audio_one_frame() } } - // Cut away everything under 150 Hz; we don't need it for voice, - // and it will reduce headroom and confuse the compressor. - // (In particular, any hums at 50 or 60 Hz should be dampened.) - locut.render(samples_out.data(), samples_out.size() / 2, 150.0 * 2.0 * M_PI / OUTPUT_FREQUENCY, 0.5f); + // Cut away everything under 120 Hz (or whatever the cutoff is); + // we don't need it for voice, and it will reduce headroom + // and confuse the compressor. (In particular, any hums at 50 or 60 Hz + // should be dampened.) + locut.render(samples_out.data(), samples_out.size() / 2, locut_cutoff_hz * 2.0 * M_PI / OUTPUT_FREQUENCY, 0.5f); // Apply a level compressor to get the general level right. // Basically, if it's over about -40 dBFS, we squeeze it down to that level @@ -552,16 +649,16 @@ void Mixer::process_audio_one_frame() // then apply a makeup gain to get it to -14 dBFS. -14 dBFS is, of course, // entirely arbitrary, but from practical tests with speech, it seems to // put ut around -23 LUFS, so it's a reasonable starting point for later use. - // - // TODO: Add the actual compressors/limiters (for taking care of transients) - // later in the chain. - float threshold = 0.01f; // -40 dBFS. - float ratio = 20.0f; - float attack_time = 0.5f; - float release_time = 20.0f; - float makeup_gain = pow(10.0f, 26.0f / 20.0f); // +26 dB takes us to -14 dBFS. - level_compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain); - last_gain_staging_db = 20.0 * log10(level_compressor.get_attenuation() * makeup_gain); + float ref_level_dbfs = -14.0f; + { + float threshold = 0.01f; // -40 dBFS. + float ratio = 20.0f; + float attack_time = 0.5f; + float release_time = 20.0f; + float makeup_gain = pow(10.0f, (ref_level_dbfs - (-40.0f)) / 20.0f); // +26 dB. + level_compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain); + last_gain_staging_db = 20.0 * log10(level_compressor.get_attenuation() * makeup_gain); + } #if 0 printf("level=%f (%+5.2f dBFS) attenuation=%f (%+5.2f dB) end_result=%+5.2f dB\n", @@ -570,15 +667,60 @@ void Mixer::process_audio_one_frame() 20.0 * log10(level_compressor.get_level() * level_compressor.get_attenuation() * makeup_gain)); #endif - // Find peak and R128 levels. - peak = std::max(peak, find_peak(samples_out)); +// float limiter_att, compressor_att; + + // The real compressor. + if (compressor_enabled) { + float threshold = pow(10.0f, compressor_threshold_dbfs / 20.0f); + float ratio = 20.0f; + float attack_time = 0.005f; + float release_time = 0.040f; + float makeup_gain = 2.0f; // +6 dB. + compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain); +// compressor_att = compressor.get_attenuation(); + } + + // Finally a limiter at -4 dB (so, -10 dBFS) to take out the worst peaks only. + // Note that since ratio is not infinite, we could go slightly higher than this. + if (limiter_enabled) { + float threshold = pow(10.0f, limiter_threshold_dbfs / 20.0f); + float ratio = 30.0f; + float attack_time = 0.0f; // Instant. + float release_time = 0.020f; + float makeup_gain = 1.0f; // 0 dB. + limiter.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain); +// limiter_att = limiter.get_attenuation(); + } + +// printf("limiter=%+5.1f compressor=%+5.1f\n", 20.0*log10(limiter_att), 20.0*log10(compressor_att)); + + // Upsample 4x to find interpolated peak. + peak_resampler.inp_data = samples_out.data(); + peak_resampler.inp_count = samples_out.size() / 2; + + vector interpolated_samples_out; + interpolated_samples_out.resize(samples_out.size()); + while (peak_resampler.inp_count > 0) { // About four iterations. + peak_resampler.out_data = &interpolated_samples_out[0]; + peak_resampler.out_count = interpolated_samples_out.size() / 2; + peak_resampler.process(); + size_t out_stereo_samples = interpolated_samples_out.size() / 2 - peak_resampler.out_count; + peak = max(peak, find_peak(interpolated_samples_out.data(), out_stereo_samples * 2)); + } + + // Find R128 levels. vector left, right; deinterleave_samples(samples_out, &left, &right); float *ptrs[] = { left.data(), right.data() }; r128.process(left.size(), ptrs); - // Actually add the samples to the output. - h264_encoder->add_audio(pts_int, move(samples_out)); + // Send the samples to the sound card. + if (alsa) { + alsa->write(samples_out); + } + + // And finally add them to the output. + h264_encoder->add_audio(frame_pts_int, move(samples_out)); } void Mixer::subsample_chroma(GLuint src_tex, GLuint dst_tex) @@ -648,12 +790,14 @@ void Mixer::release_display_frame(DisplayFrame *frame) void Mixer::start() { mixer_thread = thread(&Mixer::thread_func, this); + audio_thread = thread(&Mixer::audio_thread_func, this); } void Mixer::quit() { should_quit = true; mixer_thread.join(); + audio_thread.join(); } void Mixer::transition_clicked(int transition_num) @@ -666,6 +810,14 @@ void Mixer::channel_clicked(int preview_num) theme->channel_clicked(preview_num); } +void Mixer::reset_meters() +{ + peak_resampler.reset(); + peak = 0.0f; + r128.reset(); + r128.integr_start(); +} + Mixer::OutputChannel::~OutputChannel() { if (has_current_frame) {