X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=futatabi%2Fvideo_stream.cpp;h=7c2f8f90464c3af487c119bf8962830ae299b831;hb=b44bf7cfce6a5aaffbcd1e37df39068a163438ad;hp=e41304bac948f9180b2f6f108cc121e26e950829;hpb=299f7273d2c27e2de40510f8256fb5d4de465014;p=nageru diff --git a/futatabi/video_stream.cpp b/futatabi/video_stream.cpp index e41304b..7c2f8f9 100644 --- a/futatabi/video_stream.cpp +++ b/futatabi/video_stream.cpp @@ -236,7 +236,12 @@ VideoStream::VideoStream(AVFormatContext *file_avctx) last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), global_flags.width, global_flags.height); } -VideoStream::~VideoStream() {} +VideoStream::~VideoStream() +{ + if (last_flow_tex != 0) { + compute_flow->release_texture(last_flow_tex); + } +} void VideoStream::start() { @@ -259,8 +264,7 @@ void VideoStream::start() size_t width = global_flags.width, height = global_flags.height; // Doesn't matter for MJPEG. mux.reset(new Mux(avctx, width, height, Mux::CODEC_MJPEG, /*video_extradata=*/"", /*audio_codec_parameters=*/nullptr, - AVCOL_SPC_BT709, Mux::WITHOUT_AUDIO, - COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {})); + AVCOL_SPC_BT709, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {})); encode_thread = thread(&VideoStream::encode_thread_func, this); } @@ -278,7 +282,7 @@ void VideoStream::clear_queue() deque q; { - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); q = move(frame_queue); } @@ -287,7 +291,9 @@ void VideoStream::clear_queue() for (const QueuedFrame &qf : q) { if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) { - compute_flow->release_texture(qf.flow_tex); + if (qf.flow_tex != 0) { + compute_flow->release_texture(qf.flow_tex); + } } if (qf.type == QueuedFrame::INTERPOLATED) { interpolate->release_texture(qf.output_tex); @@ -317,7 +323,7 @@ void VideoStream::schedule_original_frame(steady_clock::time_point local_pts, qf.display_func = move(display_func); qf.queue_spot_holder = move(queue_spot_holder); - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); frame_queue.push_back(move(qf)); queue_changed.notify_all(); } @@ -336,7 +342,7 @@ void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64 // separate pools around.) BorrowedInterpolatedFrameResources resources; { - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); if (interpolate_resources.empty()) { fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n"); return; @@ -385,7 +391,7 @@ void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64 qf.resources = move(resources); qf.local_pts = local_pts; - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); frame_queue.push_back(move(qf)); queue_changed.notify_all(); } @@ -405,7 +411,7 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts // Get the temporary OpenGL resources we need for doing the interpolation. BorrowedInterpolatedFrameResources resources; { - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); if (interpolate_resources.empty()) { fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n"); return; @@ -436,13 +442,30 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts glGenerateTextureMipmap(resources->gray_tex); check_error(); - // Compute the interpolated frame. - qf.flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW); - check_error(); + GLuint flow_tex; + if (last_flow_tex != 0 && frame1 == last_frame1 && frame2 == last_frame2) { + // Reuse the flow from previous computation. This frequently happens + // if we slow down by more than 2x, so that there are multiple interpolated + // frames between each original. + flow_tex = last_flow_tex; + qf.flow_tex = 0; + } else { + // Cache miss, so release last_flow_tex. + qf.flow_tex = last_flow_tex; + + // Compute the flow. + flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW); + check_error(); + + // Store the flow texture for possible reuse next frame. + last_flow_tex = flow_tex; + last_frame1 = frame1; + last_frame2 = frame2; + } if (secondary_frame.pts != -1) { // Fade. First kick off the interpolation. - tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, global_flags.width, global_flags.height, alpha); + tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha); check_error(); // Now decode the image we are fading against. @@ -457,7 +480,7 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts interpolate_no_split->release_texture(qf.output_tex); } else { - tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, global_flags.width, global_flags.height, alpha); + tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha); check_error(); // Subsample and split Cb/Cr. @@ -467,6 +490,9 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts // We could have released qf.flow_tex here, but to make sure we don't cause a stall // when trying to reuse it for the next frame, we can just as well hold on to it // and release it only when the readback is done. + // + // TODO: This is maybe less relevant now that qf.flow_tex contains the texture we used + // _last_ frame, not this one. // Read it down (asynchronously) to the CPU. glPixelStorei(GL_PACK_ROW_LENGTH, 0); @@ -491,7 +517,7 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts check_error(); qf.resources = move(resources); - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); frame_queue.push_back(move(qf)); queue_changed.notify_all(); } @@ -506,7 +532,7 @@ void VideoStream::schedule_refresh_frame(steady_clock::time_point local_pts, qf.display_func = move(display_func); qf.queue_spot_holder = move(queue_spot_holder); - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); frame_queue.push_back(move(qf)); queue_changed.notify_all(); } @@ -593,6 +619,7 @@ void VideoStream::encode_thread_func() pkt.stream_index = 0; pkt.data = (uint8_t *)jpeg.data(); pkt.size = jpeg.size(); + pkt.flags = AV_PKT_FLAG_KEY; mux->add_packet(pkt, qf.output_pts, qf.output_pts); last_frame.assign(&jpeg[0], &jpeg[0] + jpeg.size()); @@ -609,6 +636,7 @@ void VideoStream::encode_thread_func() pkt.stream_index = 0; pkt.data = (uint8_t *)jpeg.data(); pkt.size = jpeg.size(); + pkt.flags = AV_PKT_FLAG_KEY; mux->add_packet(pkt, qf.output_pts, qf.output_pts); last_frame = move(jpeg); } else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) { @@ -622,7 +650,9 @@ void VideoStream::encode_thread_func() // Now JPEG encode it, and send it on to the stream. vector jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height); - compute_flow->release_texture(qf.flow_tex); + if (qf.flow_tex != 0) { + compute_flow->release_texture(qf.flow_tex); + } if (qf.type != QueuedFrame::FADED_INTERPOLATED) { interpolate->release_texture(qf.output_tex); interpolate->release_texture(qf.cbcr_tex); @@ -633,6 +663,7 @@ void VideoStream::encode_thread_func() pkt.stream_index = 0; pkt.data = (uint8_t *)jpeg.data(); pkt.size = jpeg.size(); + pkt.flags = AV_PKT_FLAG_KEY; mux->add_packet(pkt, qf.output_pts, qf.output_pts); last_frame = move(jpeg); } else if (qf.type == QueuedFrame::REFRESH) { @@ -641,6 +672,7 @@ void VideoStream::encode_thread_func() pkt.stream_index = 0; pkt.data = (uint8_t *)last_frame.data(); pkt.size = last_frame.size(); + pkt.flags = AV_PKT_FLAG_KEY; mux->add_packet(pkt, qf.output_pts, qf.output_pts); } else { assert(false);