X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=video_encoder.h;h=d51399e17a82c71bcbf31fbe7ba28115fc768b85;hb=2abf57fbc06f52c04fb2ca1f765459908e688890;hp=e1518aec1fb3067c7bbfa56b24aa51b61448a1da;hpb=de2324b9ad89aa5fbeb0cb8ef499d74bb9bcef14;p=nageru diff --git a/video_encoder.h b/video_encoder.h index e1518ae..d51399e 100644 --- a/video_encoder.h +++ b/video_encoder.h @@ -42,9 +42,22 @@ public: void add_audio(int64_t pts, std::vector audio); + bool is_zerocopy() const; + // Allocate a frame to render into. The returned two textures // are yours to render into (build them into an FBO). // Call end_frame() when you're done. + // + // The semantics of y_tex and cbcr_tex depend on is_zerocopy(): + // + // - If false, the are input parameters, ie., the caller + // allocates textures. (The contents are not read before + // end_frame() is called.) + // - If true, they are output parameters, ie., VideoEncoder + // allocates textures and borrow them to you for rendering. + // In this case, after end_frame(), you are no longer allowed + // to use the textures; they are torn down and given to the + // H.264 encoder. bool begin_frame(int64_t pts, int64_t duration, movit::YCbCrLumaCoefficients ycbcr_coefficients, const std::vector &input_frames, GLuint *y_tex, GLuint *cbcr_tex); // Call after you are done rendering into the frame; at this point, @@ -65,7 +78,7 @@ private: int write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time); AVOutputFormat *oformat; - std::mutex qs_mu; + mutable std::mutex qs_mu; std::unique_ptr quicksync_encoder; // Under . movit::ResourcePool *resource_pool; QSurface *surface;