]> git.sesse.net Git - nageru/commitdiff
Move audio processing into a thread, since it seems we want as little as possible...
authorSteinar H. Gunderson <sgunderson@bigfoot.com>
Sun, 15 Nov 2015 14:24:15 +0000 (15:24 +0100)
committerSteinar H. Gunderson <sgunderson@bigfoot.com>
Sun, 15 Nov 2015 14:24:15 +0000 (15:24 +0100)
h264encode.cpp
h264encode.h
mixer.cpp
mixer.h

index 3d8b463173aab3b5d2f3486d6b02688dc2db8f57..d20683518784fd51c1a15d8089b46e26d7981678 100644 (file)
@@ -1653,7 +1653,7 @@ static int render_slice(void)
 
 
 
-int H264Encoder::save_codeddata(storage_task task)
+void H264Encoder::save_codeddata(storage_task task)
 {    
     VACodedBufferSegment *buf_list = NULL;
     VAStatus va_status;
@@ -1690,13 +1690,13 @@ int H264Encoder::save_codeddata(storage_task task)
         httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay);
     }
     // Encode and add all audio frames up to and including the pts of this video frame.
-    // (They can never be queued to us after the video frame they belong to, only before.)
     for ( ;; ) {
         int64_t audio_pts;
         std::vector<float> audio;
         {
              unique_lock<mutex> lock(frame_queue_mutex);
-             if (pending_audio_frames.empty()) break;
+             frame_queue_nonempty.wait(lock, [this]{ return copy_thread_should_quit || !pending_audio_frames.empty(); });
+             if (copy_thread_should_quit) return;
              auto it = pending_audio_frames.begin();
              if (it->first > task.pts) break;
              audio_pts = it->first;
@@ -1734,6 +1734,7 @@ int H264Encoder::save_codeddata(storage_task task)
         // TODO: Delayed frames.
         avcodec_free_frame(&frame);
         av_free_packet(&pkt);
+        if (audio_pts == task.pts) break;
     }
 
 #if 0
@@ -1755,8 +1756,6 @@ int H264Encoder::save_codeddata(storage_task task)
     printf("%08lld", encode_order);
     printf("(%06d bytes coded)", coded_size);
 #endif
-
-    return 0;
 }
 
 
@@ -1904,7 +1903,7 @@ H264Encoder::~H264Encoder()
        {
                unique_lock<mutex> lock(frame_queue_mutex);
                copy_thread_should_quit = true;
-               frame_queue_nonempty.notify_one();
+               frame_queue_nonempty.notify_all();
        }
        storage_thread.join();
        copy_thread.join();
@@ -1983,10 +1982,9 @@ void H264Encoder::add_audio(int64_t pts, std::vector<float> audio)
                unique_lock<mutex> lock(frame_queue_mutex);
                pending_audio_frames[pts] = move(audio);
        }
-       frame_queue_nonempty.notify_one();
+       frame_queue_nonempty.notify_all();
 }
 
-
 void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const std::vector<RefCountedFrame> &input_frames)
 {
        {
@@ -1994,7 +1992,7 @@ void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const std::vect
                pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames, pts };
                ++current_storage_frame;
        }
-       frame_queue_nonempty.notify_one();
+       frame_queue_nonempty.notify_all();
 }
 
 void H264Encoder::copy_thread_func()
index 2bbbed50f57069dfe4249eb0c31713d3d2d38cdc..b6f783746777680644ff72eb5ddf75d30f35e552 100644 (file)
@@ -85,7 +85,7 @@ private:
        void copy_thread_func();
        void storage_task_thread();
        void storage_task_enqueue(storage_task task);
-       int save_codeddata(storage_task task);
+       void save_codeddata(storage_task task);
 
        std::thread copy_thread, storage_thread;
 
index f044ecd2f743c24855eefb474cfb2a158341fb01..239575f6dc82a1c969597474d425b75f47a8b638 100644 (file)
--- a/mixer.cpp
+++ b/mixer.cpp
@@ -379,7 +379,12 @@ void Mixer::thread_func()
 
                // Resample the audio as needed, including from previously dropped frames.
                for (unsigned frame_num = 0; frame_num < card_copy[0].dropped_frames + 1; ++frame_num) {
-                       process_audio_one_frame();
+                       {
+                               // Signal to the audio thread to process this frame.
+                               unique_lock<mutex> lock(audio_mutex);
+                               audio_pts_queue.push(pts_int);
+                               audio_pts_queue_changed.notify_one();
+                       }
                        if (frame_num != card_copy[0].dropped_frames) {
                                // For dropped frames, increase the pts.
                                ++dropped_frames;
@@ -533,7 +538,23 @@ void Mixer::thread_func()
        resource_pool->clean_context();
 }
 
-void Mixer::process_audio_one_frame()
+void Mixer::audio_thread_func()
+{
+       while (!should_quit) {
+               int64_t frame_pts_int;
+
+               {
+                       unique_lock<mutex> lock(audio_mutex);
+                       audio_pts_queue_changed.wait(lock, [this]{ return !audio_pts_queue.empty(); });
+                       frame_pts_int = audio_pts_queue.front();
+                       audio_pts_queue.pop();
+               }
+
+               process_audio_one_frame(frame_pts_int);
+       }
+}
+
+void Mixer::process_audio_one_frame(int64_t frame_pts_int)
 {
        vector<float> samples_card;
        vector<float> samples_out;
@@ -541,7 +562,7 @@ void Mixer::process_audio_one_frame()
                samples_card.resize((OUTPUT_FREQUENCY / FPS) * 2);
                {
                        unique_lock<mutex> lock(cards[card_index].audio_mutex);
-                       if (!cards[card_index].resampling_queue->get_output_samples(pts(), &samples_card[0], OUTPUT_FREQUENCY / FPS)) {
+                       if (!cards[card_index].resampling_queue->get_output_samples(double(frame_pts_int) / TIMEBASE, &samples_card[0], OUTPUT_FREQUENCY / FPS)) {
                                printf("Card %d reported previous underrun.\n", card_index);
                        }
                }
@@ -633,7 +654,7 @@ void Mixer::process_audio_one_frame()
        }
 
        // And finally add them to the output.
-       h264_encoder->add_audio(pts_int, move(samples_out));
+       h264_encoder->add_audio(frame_pts_int, move(samples_out));
 }
 
 void Mixer::subsample_chroma(GLuint src_tex, GLuint dst_tex)
@@ -703,12 +724,14 @@ void Mixer::release_display_frame(DisplayFrame *frame)
 void Mixer::start()
 {
        mixer_thread = thread(&Mixer::thread_func, this);
+       audio_thread = thread(&Mixer::audio_thread_func, this);
 }
 
 void Mixer::quit()
 {
        should_quit = true;
        mixer_thread.join();
+       audio_thread.join();
 }
 
 void Mixer::transition_clicked(int transition_num)
diff --git a/mixer.h b/mixer.h
index 89a80dd52fabaccd8e81f1743c491f41b4d329ff..8f74cc53206300ff6ee3f036dbd99859489351f4 100644 (file)
--- a/mixer.h
+++ b/mixer.h
@@ -176,7 +176,8 @@ private:
                FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format);
        void place_rectangle(movit::Effect *resample_effect, movit::Effect *padding_effect, float x0, float y0, float x1, float y1);
        void thread_func();
-       void process_audio_one_frame();
+       void audio_thread_func();
+       void process_audio_one_frame(int64_t frame_pts_int);
        void subsample_chroma(GLuint src_tex, GLuint dst_dst);
        void release_display_frame(DisplayFrame *frame);
        double pts() { return double(pts_int) / TIMEBASE; }
@@ -240,7 +241,8 @@ private:
        OutputChannel output_channel[NUM_OUTPUTS];
 
        std::thread mixer_thread;
-       bool should_quit = false;
+       std::thread audio_thread;
+       std::atomic<bool> should_quit{false};
 
        audio_level_callback_t audio_level_callback = nullptr;
        Ebu_r128_proc r128;
@@ -265,6 +267,10 @@ private:
        std::atomic<bool> compressor_enabled{true};
 
        std::unique_ptr<ALSAOutput> alsa;
+
+       std::mutex audio_mutex;
+       std::condition_variable audio_pts_queue_changed;
+       std::queue<int64_t> audio_pts_queue;
 };
 
 extern Mixer *global_mixer;