]> git.sesse.net Git - nageru/commitdiff
Make the API for begin_frame()/end_frame() in VideoEncoder a bit more sensible.
authorSteinar H. Gunderson <sgunderson@bigfoot.com>
Mon, 27 Feb 2017 22:18:24 +0000 (23:18 +0100)
committerSteinar H. Gunderson <sgunderson@bigfoot.com>
Mon, 27 Feb 2017 22:18:24 +0000 (23:18 +0100)
mixer.cpp
quicksync_encoder.cpp
quicksync_encoder.h
quicksync_encoder_impl.h
video_encoder.cpp
video_encoder.h

index f7b6a7c90ea6c87ab5c090a1d5724398dbba4d70..79d8e97eb9b9391cda7ceaf2ad686719a9f00caf 100644 (file)
--- a/mixer.cpp
+++ b/mixer.cpp
@@ -1000,8 +1000,9 @@ void Mixer::render_one_frame(int64_t duration)
        theme_main_chain.setup_chain();
        //theme_main_chain.chain->enable_phase_timing(true);
 
+       const int64_t av_delay = lrint(global_flags.audio_queue_length_ms * 0.001 * TIMEBASE);  // Corresponds to the delay in ResamplingQueue.
        GLuint y_tex, cbcr_tex;
-       bool got_frame = video_encoder->begin_frame(&y_tex, &cbcr_tex);
+       bool got_frame = video_encoder->begin_frame(pts_int + av_delay, duration, theme_main_chain.input_frames, &y_tex, &cbcr_tex);
        assert(got_frame);
 
        // Render main chain.
@@ -1031,8 +1032,7 @@ void Mixer::render_one_frame(int64_t duration)
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
 
-       const int64_t av_delay = lrint(global_flags.audio_queue_length_ms * 0.001 * TIMEBASE);  // Corresponds to the delay in ResamplingQueue.
-       RefCountedGLsync fence = video_encoder->end_frame(pts_int + av_delay, duration, theme_main_chain.input_frames);
+       RefCountedGLsync fence = video_encoder->end_frame();
 
        // The live frame just shows the RGBA texture we just rendered.
        // It owns rgba_tex now.
index 118fa85c3eca9a448cd134074f1e4e4ef234274c..2e8633d3cf72bc55d7fdd84f057bb50a0c7c1683 100644 (file)
@@ -1595,7 +1595,7 @@ void QuickSyncEncoderImpl::release_gl_surface(size_t display_frame_num)
        }
 }
 
-bool QuickSyncEncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
+bool QuickSyncEncoderImpl::begin_frame(int64_t pts, int64_t duration, const vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex)
 {
        assert(!is_shutdown);
        GLSurface *surf = nullptr;
@@ -1669,6 +1669,8 @@ bool QuickSyncEncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
                glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, surf->cbcr_egl_image);
        }
 
+       current_video_frame = PendingFrame{ {}, input_frames, pts, duration };
+
        return true;
 }
 
@@ -1678,7 +1680,7 @@ void QuickSyncEncoderImpl::add_audio(int64_t pts, vector<float> audio)
        file_audio_encoder->encode_audio(audio, pts + global_delay());
 }
 
-RefCountedGLsync QuickSyncEncoderImpl::end_frame(int64_t pts, int64_t duration, const vector<RefCountedFrame> &input_frames)
+RefCountedGLsync QuickSyncEncoderImpl::end_frame()
 {
        assert(!is_shutdown);
 
@@ -1722,7 +1724,8 @@ RefCountedGLsync QuickSyncEncoderImpl::end_frame(int64_t pts, int64_t duration,
 
        {
                unique_lock<mutex> lock(frame_queue_mutex);
-               pending_video_frames.push(PendingFrame{ fence, input_frames, pts, duration });
+               current_video_frame.fence = fence;
+               pending_video_frames.push(move(current_video_frame));
                ++current_storage_frame;
        }
        frame_queue_nonempty.notify_all();
@@ -2032,14 +2035,14 @@ void QuickSyncEncoder::add_audio(int64_t pts, vector<float> audio)
        impl->add_audio(pts, audio);
 }
 
-bool QuickSyncEncoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
+bool QuickSyncEncoder::begin_frame(int64_t pts, int64_t duration, const vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex)
 {
-       return impl->begin_frame(y_tex, cbcr_tex);
+       return impl->begin_frame(pts, duration, input_frames, y_tex, cbcr_tex);
 }
 
-RefCountedGLsync QuickSyncEncoder::end_frame(int64_t pts, int64_t duration, const vector<RefCountedFrame> &input_frames)
+RefCountedGLsync QuickSyncEncoder::end_frame()
 {
-       return impl->end_frame(pts, duration, input_frames);
+       return impl->end_frame();
 }
 
 void QuickSyncEncoder::shutdown()
index ab66d65a6280cc593320edb013733085b3595365..caa6586f6a5b57d5fe63958e41da5506ba92ce88 100644 (file)
@@ -60,8 +60,8 @@ public:
 
        void set_stream_mux(Mux *mux);  // Does not take ownership. Must be called unless x264 is used for the stream.
        void add_audio(int64_t pts, std::vector<float> audio);
-       bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
-       RefCountedGLsync end_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames);
+       bool begin_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex);
+       RefCountedGLsync end_frame();
        void shutdown();  // Blocking. Does not require an OpenGL context.
        void release_gl_resources();  // Requires an OpenGL context. Must be run after shutdown.
        int64_t global_delay() const;  // So we never get negative dts.
index 453a7f6628450f70a07f7429d043f9e1ddce079e..b55edbbc58c6586ff8645fe3b0fbdd08844212f4 100644 (file)
@@ -35,8 +35,8 @@ public:
        QuickSyncEncoderImpl(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const std::string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder, DiskSpaceEstimator *disk_space_estimator);
        ~QuickSyncEncoderImpl();
        void add_audio(int64_t pts, std::vector<float> audio);
-       bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
-       RefCountedGLsync end_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames);
+       bool begin_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex);
+       RefCountedGLsync end_frame();
        void shutdown();
        void release_gl_resources();
        void set_stream_mux(Mux *mux)
@@ -146,6 +146,7 @@ private:
 
        int current_storage_frame;
 
+       PendingFrame current_video_frame;  // Used only between begin_frame() and end_frame().
        std::queue<PendingFrame> pending_video_frames;  // under frame_queue_mutex
        movit::ResourcePool *resource_pool;
        QSurface *surface;
index f7369877b7b55f67a4af3e795a5ed99bb5d88e21..e00465c25a67b8b72e364e39ae29db897c1c76c3 100644 (file)
@@ -120,17 +120,17 @@ void VideoEncoder::add_audio(int64_t pts, std::vector<float> audio)
        stream_audio_encoder->encode_audio(audio, pts + quicksync_encoder->global_delay());
 }
 
-bool VideoEncoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
+bool VideoEncoder::begin_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex)
 {
        lock_guard<mutex> lock(qs_mu);
        qs_needing_cleanup.clear();  // Since we have an OpenGL context here, and are called regularly.
-       return quicksync_encoder->begin_frame(y_tex, cbcr_tex);
+       return quicksync_encoder->begin_frame(pts, duration, input_frames, y_tex, cbcr_tex);
 }
 
-RefCountedGLsync VideoEncoder::end_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames)
+RefCountedGLsync VideoEncoder::end_frame()
 {
        lock_guard<mutex> lock(qs_mu);
-       return quicksync_encoder->end_frame(pts, duration, input_frames);
+       return quicksync_encoder->end_frame();
 }
 
 void VideoEncoder::open_output_stream()
index acb8340fdee333c16b86f9b7e690e7c311afecea..368037d94fbdfcebcd098047a5acfc3e878dffa4 100644 (file)
@@ -40,8 +40,18 @@ public:
        ~VideoEncoder();
 
        void add_audio(int64_t pts, std::vector<float> audio);
-       bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
-       RefCountedGLsync end_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames);
+
+       // Allocate a frame to render into. The returned two textures
+       // are yours to render into (build them into an FBO).
+       // Call end_frame() when you're done.
+       bool begin_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex);
+
+       // Call after you are done rendering into the frame; at this point,
+       // y_tex and cbcr_tex will be assumed done, and handed over to the
+       // encoder. The returned fence is purely a convenience; you do not
+       // need to use it for anything, but it's useful if you wanted to set
+       // one anyway.
+       RefCountedGLsync end_frame();
 
        // Does a cut of the disk stream immediately ("frame" is used for the filename only).
        void do_cut(int frame);