void VideoStream::schedule_original_frame(int64_t output_pts, unsigned stream_idx, int64_t input_pts)
{
+ fprintf(stderr, "output_pts=%ld original input_pts=%ld\n", output_pts, input_pts);
+
QueuedFrame qf;
qf.type = QueuedFrame::ORIGINAL;
qf.output_pts = output_pts;
void VideoStream::schedule_interpolated_frame(int64_t output_pts, unsigned stream_idx, int64_t input_first_pts, int64_t input_second_pts, float alpha)
{
+ fprintf(stderr, "output_pts=%ld interpolated input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, input_first_pts, input_second_pts, alpha);
+
// Get the temporary OpenGL resources we need for doing the interpolation.
InterpolatedFrameResources resources;
{
// Convert frame0 and frame1 to OpenGL textures.
// TODO: Deduplicate against JPEGFrameView::setDecodedFrame?
for (size_t frame_no = 0; frame_no < 2; ++frame_no) {
- shared_ptr<Frame> frame = decode_jpeg(filename_for_frame(stream_idx, frame_no == 1 ? input_second_pts : input_first_pts));
+ JPEGID jpeg_id;
+ jpeg_id.stream_idx = stream_idx;
+ jpeg_id.pts = frame_no == 1 ? input_second_pts : input_first_pts;
+ bool did_decode;
+ shared_ptr<Frame> frame = decode_jpeg_with_cache(jpeg_id, DECODE_IF_NOT_IN_CACHE, &did_decode);
ycbcr_format.chroma_subsampling_x = frame->chroma_subsampling_x;
ycbcr_format.chroma_subsampling_y = frame->chroma_subsampling_y;
ycbcr_input->change_ycbcr_format(ycbcr_format);
check_error();
glGenerateTextureMipmap(resources.gray_tex);
check_error();
- GLuint flow_tex = compute_flow->exec(resources.gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
+ qf.flow_tex = compute_flow->exec(resources.gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
check_error();
- qf.output_tex = interpolate->exec(resources.input_tex, flow_tex, 1280, 720, alpha);
+ qf.output_tex = interpolate->exec(resources.input_tex, qf.flow_tex, 1280, 720, alpha);
check_error();
+ // We could have released qf.flow_tex here, but to make sure we don't cause a stall
+ // when trying to reuse it for the next frame, we can just as well hold on to it
+ // and release it only when the readback is done.
+
// Read it down (asynchronously) to the CPU.
glPixelStorei(GL_PACK_ROW_LENGTH, 0);
glBindBuffer(GL_PIXEL_PACK_BUFFER, resources.pbo);
void VideoStream::encode_thread_func()
{
+ pthread_setname_np(pthread_self(), "VideoStream");
QSurface *surface = create_surface();
QOpenGLContext *context = create_context(surface);
bool ok = make_current(context, surface);
glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
vector<uint8_t> jpeg = encode_jpeg((const uint8_t *)qf.resources.pbo_contents, 1280, 720);
+ compute_flow->release_texture(qf.flow_tex);
+ interpolate->release_texture(qf.output_tex);
AVPacket pkt;
av_init_packet(&pkt);