+void Mixer::schedule_audio_resampling_tasks(unsigned dropped_frames, int num_samples_per_frame, int length_per_frame)
+{
+ // Resample the audio as needed, including from previously dropped frames.
+ assert(num_cards > 0);
+ for (unsigned frame_num = 0; frame_num < dropped_frames + 1; ++frame_num) {
+ const bool dropped_frame = (frame_num != dropped_frames);
+ {
+ // Signal to the audio thread to process this frame.
+ // Note that if the frame is a dropped frame, we signal that
+ // we don't want to use this frame as base for adjusting
+ // the resampler rate. The reason for this is that the timing
+ // of these frames is often way too late; they typically don't
+ // “arrive” before we synthesize them. Thus, we could end up
+ // in a situation where we have inserted e.g. five audio frames
+ // into the queue before we then start pulling five of them
+ // back out. This makes ResamplingQueue overestimate the delay,
+ // causing undue resampler changes. (We _do_ use the last,
+ // non-dropped frame; perhaps we should just discard that as well,
+ // since dropped frames are expected to be rare, and it might be
+ // better to just wait until we have a slightly more normal situation).
+ unique_lock<mutex> lock(audio_mutex);
+ bool adjust_rate = !dropped_frame;
+ audio_task_queue.push(AudioTask{pts_int, num_samples_per_frame, adjust_rate});
+ audio_task_queue_changed.notify_one();
+ }
+ if (dropped_frame) {
+ // For dropped frames, increase the pts. Note that if the format changed
+ // in the meantime, we have no way of detecting that; we just have to
+ // assume the frame length is always the same.
+ pts_int += length_per_frame;
+ }
+ }
+}
+
+void Mixer::render_one_frame(int64_t duration)
+{
+ // Get the main chain from the theme, and set its state immediately.
+ Theme::Chain theme_main_chain = theme->get_chain(0, pts(), WIDTH, HEIGHT, input_state);
+ EffectChain *chain = theme_main_chain.chain;
+ theme_main_chain.setup_chain();
+ //theme_main_chain.chain->enable_phase_timing(true);
+
+ GLuint y_tex, cbcr_tex;
+ bool got_frame = video_encoder->begin_frame(&y_tex, &cbcr_tex);
+ assert(got_frame);
+
+ // Render main chain.
+ GLuint cbcr_full_tex = resource_pool->create_2d_texture(GL_RG8, WIDTH, HEIGHT);
+ GLuint rgba_tex = resource_pool->create_2d_texture(GL_RGB565, WIDTH, HEIGHT); // Saves texture bandwidth, although dithering gets messed up.
+ GLuint fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex, rgba_tex);
+ check_error();
+ chain->render_to_fbo(fbo, WIDTH, HEIGHT);
+ resource_pool->release_fbo(fbo);
+
+ subsample_chroma(cbcr_full_tex, cbcr_tex);
+ resource_pool->release_2d_texture(cbcr_full_tex);
+
+ // Set the right state for rgba_tex.
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+ glBindTexture(GL_TEXTURE_2D, rgba_tex);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+
+ const int64_t av_delay = TIMEBASE / 10; // Corresponds to the fixed delay in resampling_queue.h. TODO: Make less hard-coded.
+ RefCountedGLsync fence = video_encoder->end_frame(pts_int + av_delay, duration, theme_main_chain.input_frames);
+
+ // The live frame just shows the RGBA texture we just rendered.
+ // It owns rgba_tex now.
+ DisplayFrame live_frame;
+ live_frame.chain = display_chain.get();
+ live_frame.setup_chain = [this, rgba_tex]{
+ display_input->set_texture_num(rgba_tex);
+ };
+ live_frame.ready_fence = fence;
+ live_frame.input_frames = {};
+ live_frame.temp_textures = { rgba_tex };
+ output_channel[OUTPUT_LIVE].output_frame(live_frame);
+
+ // Set up preview and any additional channels.
+ for (int i = 1; i < theme->get_num_channels() + 2; ++i) {
+ DisplayFrame display_frame;
+ Theme::Chain chain = theme->get_chain(i, pts(), WIDTH, HEIGHT, input_state); // FIXME: dimensions
+ display_frame.chain = chain.chain;
+ display_frame.setup_chain = chain.setup_chain;
+ display_frame.ready_fence = fence;
+ display_frame.input_frames = chain.input_frames;
+ display_frame.temp_textures = {};
+ output_channel[i].output_frame(display_frame);
+ }
+}
+
+void Mixer::send_audio_level_callback()
+{
+ if (audio_level_callback == nullptr) {
+ return;