-int H264Encoder::save_codeddata(storage_task task)
+void H264Encoder::save_codeddata(storage_task task)
{
VACodedBufferSegment *buf_list = NULL;
VAStatus va_status;
httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay);
}
// Encode and add all audio frames up to and including the pts of this video frame.
- // (They can never be queued to us after the video frame they belong to, only before.)
for ( ;; ) {
int64_t audio_pts;
std::vector<float> audio;
{
unique_lock<mutex> lock(frame_queue_mutex);
- if (pending_audio_frames.empty()) break;
+ frame_queue_nonempty.wait(lock, [this]{ return copy_thread_should_quit || !pending_audio_frames.empty(); });
+ if (copy_thread_should_quit) return;
auto it = pending_audio_frames.begin();
if (it->first > task.pts) break;
audio_pts = it->first;
// TODO: Delayed frames.
avcodec_free_frame(&frame);
av_free_packet(&pkt);
+ if (audio_pts == task.pts) break;
}
#if 0
printf("%08lld", encode_order);
printf("(%06d bytes coded)", coded_size);
#endif
-
- return 0;
}
{
unique_lock<mutex> lock(frame_queue_mutex);
copy_thread_should_quit = true;
- frame_queue_nonempty.notify_one();
+ frame_queue_nonempty.notify_all();
}
storage_thread.join();
copy_thread.join();
unique_lock<mutex> lock(frame_queue_mutex);
pending_audio_frames[pts] = move(audio);
}
- frame_queue_nonempty.notify_one();
+ frame_queue_nonempty.notify_all();
}
-
void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const std::vector<RefCountedFrame> &input_frames)
{
{
pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames, pts };
++current_storage_frame;
}
- frame_queue_nonempty.notify_one();
+ frame_queue_nonempty.notify_all();
}
void H264Encoder::copy_thread_func()
void copy_thread_func();
void storage_task_thread();
void storage_task_enqueue(storage_task task);
- int save_codeddata(storage_task task);
+ void save_codeddata(storage_task task);
std::thread copy_thread, storage_thread;
// Resample the audio as needed, including from previously dropped frames.
for (unsigned frame_num = 0; frame_num < card_copy[0].dropped_frames + 1; ++frame_num) {
- process_audio_one_frame();
+ {
+ // Signal to the audio thread to process this frame.
+ unique_lock<mutex> lock(audio_mutex);
+ audio_pts_queue.push(pts_int);
+ audio_pts_queue_changed.notify_one();
+ }
if (frame_num != card_copy[0].dropped_frames) {
// For dropped frames, increase the pts.
++dropped_frames;
resource_pool->clean_context();
}
-void Mixer::process_audio_one_frame()
+void Mixer::audio_thread_func()
+{
+ while (!should_quit) {
+ int64_t frame_pts_int;
+
+ {
+ unique_lock<mutex> lock(audio_mutex);
+ audio_pts_queue_changed.wait(lock, [this]{ return !audio_pts_queue.empty(); });
+ frame_pts_int = audio_pts_queue.front();
+ audio_pts_queue.pop();
+ }
+
+ process_audio_one_frame(frame_pts_int);
+ }
+}
+
+void Mixer::process_audio_one_frame(int64_t frame_pts_int)
{
vector<float> samples_card;
vector<float> samples_out;
samples_card.resize((OUTPUT_FREQUENCY / FPS) * 2);
{
unique_lock<mutex> lock(cards[card_index].audio_mutex);
- if (!cards[card_index].resampling_queue->get_output_samples(pts(), &samples_card[0], OUTPUT_FREQUENCY / FPS)) {
+ if (!cards[card_index].resampling_queue->get_output_samples(double(frame_pts_int) / TIMEBASE, &samples_card[0], OUTPUT_FREQUENCY / FPS)) {
printf("Card %d reported previous underrun.\n", card_index);
}
}
}
// And finally add them to the output.
- h264_encoder->add_audio(pts_int, move(samples_out));
+ h264_encoder->add_audio(frame_pts_int, move(samples_out));
}
void Mixer::subsample_chroma(GLuint src_tex, GLuint dst_tex)
void Mixer::start()
{
mixer_thread = thread(&Mixer::thread_func, this);
+ audio_thread = thread(&Mixer::audio_thread_func, this);
}
void Mixer::quit()
{
should_quit = true;
mixer_thread.join();
+ audio_thread.join();
}
void Mixer::transition_clicked(int transition_num)
FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format);
void place_rectangle(movit::Effect *resample_effect, movit::Effect *padding_effect, float x0, float y0, float x1, float y1);
void thread_func();
- void process_audio_one_frame();
+ void audio_thread_func();
+ void process_audio_one_frame(int64_t frame_pts_int);
void subsample_chroma(GLuint src_tex, GLuint dst_dst);
void release_display_frame(DisplayFrame *frame);
double pts() { return double(pts_int) / TIMEBASE; }
OutputChannel output_channel[NUM_OUTPUTS];
std::thread mixer_thread;
- bool should_quit = false;
+ std::thread audio_thread;
+ std::atomic<bool> should_quit{false};
audio_level_callback_t audio_level_callback = nullptr;
Ebu_r128_proc r128;
std::atomic<bool> compressor_enabled{true};
std::unique_ptr<ALSAOutput> alsa;
+
+ std::mutex audio_mutex;
+ std::condition_variable audio_pts_queue_changed;
+ std::queue<int64_t> audio_pts_queue;
};
extern Mixer *global_mixer;