From: Steinar H. Gunderson Date: Mon, 27 Feb 2017 22:18:24 +0000 (+0100) Subject: Make the API for begin_frame()/end_frame() in VideoEncoder a bit more sensible. X-Git-Tag: 1.5.0~18 X-Git-Url: https://git.sesse.net/?p=nageru;a=commitdiff_plain;h=e066f18188fde1e6bd0b698c89427119cbffaaa3 Make the API for begin_frame()/end_frame() in VideoEncoder a bit more sensible. --- diff --git a/mixer.cpp b/mixer.cpp index f7b6a7c..79d8e97 100644 --- a/mixer.cpp +++ b/mixer.cpp @@ -1000,8 +1000,9 @@ void Mixer::render_one_frame(int64_t duration) theme_main_chain.setup_chain(); //theme_main_chain.chain->enable_phase_timing(true); + const int64_t av_delay = lrint(global_flags.audio_queue_length_ms * 0.001 * TIMEBASE); // Corresponds to the delay in ResamplingQueue. GLuint y_tex, cbcr_tex; - bool got_frame = video_encoder->begin_frame(&y_tex, &cbcr_tex); + bool got_frame = video_encoder->begin_frame(pts_int + av_delay, duration, theme_main_chain.input_frames, &y_tex, &cbcr_tex); assert(got_frame); // Render main chain. @@ -1031,8 +1032,7 @@ void Mixer::render_one_frame(int64_t duration) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); - const int64_t av_delay = lrint(global_flags.audio_queue_length_ms * 0.001 * TIMEBASE); // Corresponds to the delay in ResamplingQueue. - RefCountedGLsync fence = video_encoder->end_frame(pts_int + av_delay, duration, theme_main_chain.input_frames); + RefCountedGLsync fence = video_encoder->end_frame(); // The live frame just shows the RGBA texture we just rendered. // It owns rgba_tex now. diff --git a/quicksync_encoder.cpp b/quicksync_encoder.cpp index 118fa85..2e8633d 100644 --- a/quicksync_encoder.cpp +++ b/quicksync_encoder.cpp @@ -1595,7 +1595,7 @@ void QuickSyncEncoderImpl::release_gl_surface(size_t display_frame_num) } } -bool QuickSyncEncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex) +bool QuickSyncEncoderImpl::begin_frame(int64_t pts, int64_t duration, const vector &input_frames, GLuint *y_tex, GLuint *cbcr_tex) { assert(!is_shutdown); GLSurface *surf = nullptr; @@ -1669,6 +1669,8 @@ bool QuickSyncEncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex) glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, surf->cbcr_egl_image); } + current_video_frame = PendingFrame{ {}, input_frames, pts, duration }; + return true; } @@ -1678,7 +1680,7 @@ void QuickSyncEncoderImpl::add_audio(int64_t pts, vector audio) file_audio_encoder->encode_audio(audio, pts + global_delay()); } -RefCountedGLsync QuickSyncEncoderImpl::end_frame(int64_t pts, int64_t duration, const vector &input_frames) +RefCountedGLsync QuickSyncEncoderImpl::end_frame() { assert(!is_shutdown); @@ -1722,7 +1724,8 @@ RefCountedGLsync QuickSyncEncoderImpl::end_frame(int64_t pts, int64_t duration, { unique_lock lock(frame_queue_mutex); - pending_video_frames.push(PendingFrame{ fence, input_frames, pts, duration }); + current_video_frame.fence = fence; + pending_video_frames.push(move(current_video_frame)); ++current_storage_frame; } frame_queue_nonempty.notify_all(); @@ -2032,14 +2035,14 @@ void QuickSyncEncoder::add_audio(int64_t pts, vector audio) impl->add_audio(pts, audio); } -bool QuickSyncEncoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex) +bool QuickSyncEncoder::begin_frame(int64_t pts, int64_t duration, const vector &input_frames, GLuint *y_tex, GLuint *cbcr_tex) { - return impl->begin_frame(y_tex, cbcr_tex); + return impl->begin_frame(pts, duration, input_frames, y_tex, cbcr_tex); } -RefCountedGLsync QuickSyncEncoder::end_frame(int64_t pts, int64_t duration, const vector &input_frames) +RefCountedGLsync QuickSyncEncoder::end_frame() { - return impl->end_frame(pts, duration, input_frames); + return impl->end_frame(); } void QuickSyncEncoder::shutdown() diff --git a/quicksync_encoder.h b/quicksync_encoder.h index ab66d65..caa6586 100644 --- a/quicksync_encoder.h +++ b/quicksync_encoder.h @@ -60,8 +60,8 @@ public: void set_stream_mux(Mux *mux); // Does not take ownership. Must be called unless x264 is used for the stream. void add_audio(int64_t pts, std::vector audio); - bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex); - RefCountedGLsync end_frame(int64_t pts, int64_t duration, const std::vector &input_frames); + bool begin_frame(int64_t pts, int64_t duration, const std::vector &input_frames, GLuint *y_tex, GLuint *cbcr_tex); + RefCountedGLsync end_frame(); void shutdown(); // Blocking. Does not require an OpenGL context. void release_gl_resources(); // Requires an OpenGL context. Must be run after shutdown. int64_t global_delay() const; // So we never get negative dts. diff --git a/quicksync_encoder_impl.h b/quicksync_encoder_impl.h index 453a7f6..b55edbb 100644 --- a/quicksync_encoder_impl.h +++ b/quicksync_encoder_impl.h @@ -35,8 +35,8 @@ public: QuickSyncEncoderImpl(const std::string &filename, movit::ResourcePool *resource_pool, QSurface *surface, const std::string &va_display, int width, int height, AVOutputFormat *oformat, X264Encoder *x264_encoder, DiskSpaceEstimator *disk_space_estimator); ~QuickSyncEncoderImpl(); void add_audio(int64_t pts, std::vector audio); - bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex); - RefCountedGLsync end_frame(int64_t pts, int64_t duration, const std::vector &input_frames); + bool begin_frame(int64_t pts, int64_t duration, const std::vector &input_frames, GLuint *y_tex, GLuint *cbcr_tex); + RefCountedGLsync end_frame(); void shutdown(); void release_gl_resources(); void set_stream_mux(Mux *mux) @@ -146,6 +146,7 @@ private: int current_storage_frame; + PendingFrame current_video_frame; // Used only between begin_frame() and end_frame(). std::queue pending_video_frames; // under frame_queue_mutex movit::ResourcePool *resource_pool; QSurface *surface; diff --git a/video_encoder.cpp b/video_encoder.cpp index f736987..e00465c 100644 --- a/video_encoder.cpp +++ b/video_encoder.cpp @@ -120,17 +120,17 @@ void VideoEncoder::add_audio(int64_t pts, std::vector audio) stream_audio_encoder->encode_audio(audio, pts + quicksync_encoder->global_delay()); } -bool VideoEncoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex) +bool VideoEncoder::begin_frame(int64_t pts, int64_t duration, const std::vector &input_frames, GLuint *y_tex, GLuint *cbcr_tex) { lock_guard lock(qs_mu); qs_needing_cleanup.clear(); // Since we have an OpenGL context here, and are called regularly. - return quicksync_encoder->begin_frame(y_tex, cbcr_tex); + return quicksync_encoder->begin_frame(pts, duration, input_frames, y_tex, cbcr_tex); } -RefCountedGLsync VideoEncoder::end_frame(int64_t pts, int64_t duration, const std::vector &input_frames) +RefCountedGLsync VideoEncoder::end_frame() { lock_guard lock(qs_mu); - return quicksync_encoder->end_frame(pts, duration, input_frames); + return quicksync_encoder->end_frame(); } void VideoEncoder::open_output_stream() diff --git a/video_encoder.h b/video_encoder.h index acb8340..368037d 100644 --- a/video_encoder.h +++ b/video_encoder.h @@ -40,8 +40,18 @@ public: ~VideoEncoder(); void add_audio(int64_t pts, std::vector audio); - bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex); - RefCountedGLsync end_frame(int64_t pts, int64_t duration, const std::vector &input_frames); + + // Allocate a frame to render into. The returned two textures + // are yours to render into (build them into an FBO). + // Call end_frame() when you're done. + bool begin_frame(int64_t pts, int64_t duration, const std::vector &input_frames, GLuint *y_tex, GLuint *cbcr_tex); + + // Call after you are done rendering into the frame; at this point, + // y_tex and cbcr_tex will be assumed done, and handed over to the + // encoder. The returned fence is purely a convenience; you do not + // need to use it for anything, but it's useful if you wanted to set + // one anyway. + RefCountedGLsync end_frame(); // Does a cut of the disk stream immediately ("frame" is used for the filename only). void do_cut(int frame);