stream_audio_encoder->encode_audio(audio, pts + quicksync_encoder->global_delay());
}
-bool VideoEncoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
+bool VideoEncoder::begin_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames, GLuint *y_tex, GLuint *cbcr_tex)
{
lock_guard<mutex> lock(qs_mu);
qs_needing_cleanup.clear(); // Since we have an OpenGL context here, and are called regularly.
- return quicksync_encoder->begin_frame(y_tex, cbcr_tex);
+ return quicksync_encoder->begin_frame(pts, duration, input_frames, y_tex, cbcr_tex);
}
-RefCountedGLsync VideoEncoder::end_frame(int64_t pts, int64_t duration, const std::vector<RefCountedFrame> &input_frames)
+RefCountedGLsync VideoEncoder::end_frame()
{
lock_guard<mutex> lock(qs_mu);
- return quicksync_encoder->end_frame(pts, duration, input_frames);
+ return quicksync_encoder->end_frame();
}
void VideoEncoder::open_output_stream()
int VideoEncoder::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
{
+ if (type == AVIO_DATA_MARKER_SYNC_POINT || type == AVIO_DATA_MARKER_BOUNDARY_POINT) {
+ seen_sync_markers = true;
+ } else if (type == AVIO_DATA_MARKER_UNKNOWN && !seen_sync_markers) {
+ // We don't know if this is a keyframe or not (the muxer could
+ // avoid marking it), so we just have to make the best of it.
+ type = AVIO_DATA_MARKER_SYNC_POINT;
+ }
+
if (type == AVIO_DATA_MARKER_HEADER) {
stream_mux_header.append((char *)buf, buf_size);
httpd->set_header(stream_mux_header);