-// float limiter_att, compressor_att;
-
- // Then a limiter at +0 dB (so, -14 dBFS) to take out the worst peaks only.
- // Note that since ratio is not infinite, we could go slightly higher than this.
- // Probably more tuning is warranted here.
- if (limiter_enabled) {
- float threshold = pow(10.0f, limiter_threshold_dbfs / 20.0f);
- float ratio = 30.0f;
- float attack_time = 0.0f; // Instant.
- float release_time = 0.020f;
- float makeup_gain = 1.0f; // 0 dB.
- limiter.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
-// limiter_att = limiter.get_attenuation();
- }
-
- // Finally, the real compressor.
- if (compressor_enabled) {
- float threshold = pow(10.0f, compressor_threshold_dbfs / 20.0f);
- float ratio = 20.0f;
- float attack_time = 0.005f;
- float release_time = 0.040f;
- float makeup_gain = 2.0f; // +6 dB.
- compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
-// compressor_att = compressor.get_attenuation();
- }
-
-// printf("limiter=%+5.1f compressor=%+5.1f\n", 20.0*log10(limiter_att), 20.0*log10(compressor_att));
-
- // Upsample 4x to find interpolated peak.
- peak_resampler.inp_data = samples_out.data();
- peak_resampler.inp_count = samples_out.size() / 2;
-
- vector<float> interpolated_samples_out;
- interpolated_samples_out.resize(samples_out.size());
- while (peak_resampler.inp_count > 0) { // About four iterations.
- peak_resampler.out_data = &interpolated_samples_out[0];
- peak_resampler.out_count = interpolated_samples_out.size() / 2;
- peak_resampler.process();
- size_t out_stereo_samples = interpolated_samples_out.size() / 2 - peak_resampler.out_count;
- peak = max<float>(peak, find_peak(interpolated_samples_out.data(), out_stereo_samples * 2));
- }
-
- // Find R128 levels.
- vector<float> left, right;
- deinterleave_samples(samples_out, &left, &right);
- float *ptrs[] = { left.data(), right.data() };
- r128.process(left.size(), ptrs);
-
- // Actually add the samples to the output.
- h264_encoder->add_audio(pts_int, move(samples_out));
+void Mixer::schedule_audio_resampling_tasks(unsigned dropped_frames, int num_samples_per_frame, int length_per_frame)
+{
+ // Resample the audio as needed, including from previously dropped frames.
+ assert(num_cards > 0);
+ for (unsigned frame_num = 0; frame_num < dropped_frames + 1; ++frame_num) {
+ const bool dropped_frame = (frame_num != dropped_frames);
+ {
+ // Signal to the audio thread to process this frame.
+ // Note that if the frame is a dropped frame, we signal that
+ // we don't want to use this frame as base for adjusting
+ // the resampler rate. The reason for this is that the timing
+ // of these frames is often way too late; they typically don't
+ // “arrive” before we synthesize them. Thus, we could end up
+ // in a situation where we have inserted e.g. five audio frames
+ // into the queue before we then start pulling five of them
+ // back out. This makes ResamplingQueue overestimate the delay,
+ // causing undue resampler changes. (We _do_ use the last,
+ // non-dropped frame; perhaps we should just discard that as well,
+ // since dropped frames are expected to be rare, and it might be
+ // better to just wait until we have a slightly more normal situation).
+ unique_lock<mutex> lock(audio_mutex);
+ bool adjust_rate = !dropped_frame;
+ audio_task_queue.push(AudioTask{pts_int, num_samples_per_frame, adjust_rate});
+ audio_task_queue_changed.notify_one();
+ }
+ if (dropped_frame) {
+ // For dropped frames, increase the pts. Note that if the format changed
+ // in the meantime, we have no way of detecting that; we just have to
+ // assume the frame length is always the same.
+ pts_int += length_per_frame;
+ }
+ }
+}
+
+void Mixer::render_one_frame(int64_t duration)
+{
+ // Get the main chain from the theme, and set its state immediately.
+ Theme::Chain theme_main_chain = theme->get_chain(0, pts(), WIDTH, HEIGHT, input_state);
+ EffectChain *chain = theme_main_chain.chain;
+ theme_main_chain.setup_chain();
+ //theme_main_chain.chain->enable_phase_timing(true);
+
+ GLuint y_tex, cbcr_tex;
+ bool got_frame = video_encoder->begin_frame(&y_tex, &cbcr_tex);
+ assert(got_frame);
+
+ // Render main chain.
+ GLuint cbcr_full_tex = resource_pool->create_2d_texture(GL_RG8, WIDTH, HEIGHT);
+ GLuint rgba_tex = resource_pool->create_2d_texture(GL_RGB565, WIDTH, HEIGHT); // Saves texture bandwidth, although dithering gets messed up.
+ GLuint fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex, rgba_tex);
+ check_error();
+ chain->render_to_fbo(fbo, WIDTH, HEIGHT);
+ resource_pool->release_fbo(fbo);
+
+ subsample_chroma(cbcr_full_tex, cbcr_tex);
+ resource_pool->release_2d_texture(cbcr_full_tex);
+
+ // Set the right state for rgba_tex.
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+ glBindTexture(GL_TEXTURE_2D, rgba_tex);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+
+ const int64_t av_delay = TIMEBASE / 10; // Corresponds to the fixed delay in resampling_queue.h. TODO: Make less hard-coded.
+ RefCountedGLsync fence = video_encoder->end_frame(pts_int + av_delay, duration, theme_main_chain.input_frames);
+
+ // The live frame just shows the RGBA texture we just rendered.
+ // It owns rgba_tex now.
+ DisplayFrame live_frame;
+ live_frame.chain = display_chain.get();
+ live_frame.setup_chain = [this, rgba_tex]{
+ display_input->set_texture_num(rgba_tex);
+ };
+ live_frame.ready_fence = fence;
+ live_frame.input_frames = {};
+ live_frame.temp_textures = { rgba_tex };
+ output_channel[OUTPUT_LIVE].output_frame(live_frame);
+
+ // Set up preview and any additional channels.
+ for (int i = 1; i < theme->get_num_channels() + 2; ++i) {
+ DisplayFrame display_frame;
+ Theme::Chain chain = theme->get_chain(i, pts(), WIDTH, HEIGHT, input_state); // FIXME: dimensions
+ display_frame.chain = chain.chain;
+ display_frame.setup_chain = chain.setup_chain;
+ display_frame.ready_fence = fence;
+ display_frame.input_frames = chain.input_frames;
+ display_frame.temp_textures = {};
+ output_channel[i].output_frame(display_frame);
+ }
+}
+
+void Mixer::audio_thread_func()
+{
+ while (!should_quit) {
+ AudioTask task;
+
+ {
+ unique_lock<mutex> lock(audio_mutex);
+ audio_task_queue_changed.wait(lock, [this]{ return should_quit || !audio_task_queue.empty(); });
+ if (should_quit) {
+ return;
+ }
+ task = audio_task_queue.front();
+ audio_task_queue.pop();
+ }
+
+ ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy =
+ task.adjust_rate ? ResamplingQueue::ADJUST_RATE : ResamplingQueue::DO_NOT_ADJUST_RATE;
+ vector<float> samples_out = audio_mixer.get_output(
+ double(task.pts_int) / TIMEBASE,
+ task.num_samples,
+ rate_adjustment_policy);
+
+ // Send the samples to the sound card, then add them to the output.
+ if (alsa) {
+ alsa->write(samples_out);
+ }
+ video_encoder->add_audio(task.pts_int, move(samples_out));
+ }