X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=futatabi%2Fvideo_stream.cpp;h=66323b88749afc17a7142c51cc7b2e017ee61758;hb=d02bca0f5622abd11b9697b738613f7644c33f59;hp=2f8f8848e37f4d962e72f339230f05aa7e453e1c;hpb=52336c086b8bc355b55e2046e3a055b1b4c70ef7;p=nageru diff --git a/futatabi/video_stream.cpp b/futatabi/video_stream.cpp index 2f8f884..66323b8 100644 --- a/futatabi/video_stream.cpp +++ b/futatabi/video_stream.cpp @@ -6,14 +6,14 @@ extern "C" { } #include "chroma_subsampler.h" -#include "shared/context.h" #include "flags.h" #include "flow.h" -#include "shared/httpd.h" #include "jpeg_frame_view.h" #include "movit/util.h" -#include "shared/mux.h" #include "player.h" +#include "shared/context.h" +#include "shared/httpd.h" +#include "shared/mux.h" #include "util.h" #include "ycbcr_converter.h" @@ -129,7 +129,8 @@ vector encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const return move(dest.dest); } -VideoStream::VideoStream() +VideoStream::VideoStream(AVFormatContext *file_avctx) + : avctx(file_avctx), output_fast_forward(file_avctx != nullptr) { ycbcr_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_DUAL_YCBCR, /*resource_pool=*/nullptr)); ycbcr_semiplanar_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_SEMIPLANAR, /*resource_pool=*/nullptr)); @@ -207,7 +208,8 @@ VideoStream::VideoStream() check_error(); OperatingPoint op; - if (global_flags.interpolation_quality == 1) { + if (global_flags.interpolation_quality == 0 || + global_flags.interpolation_quality == 1) { op = operating_point1; } else if (global_flags.interpolation_quality == 2) { op = operating_point2; @@ -216,6 +218,7 @@ VideoStream::VideoStream() } else if (global_flags.interpolation_quality == 4) { op = operating_point4; } else { + // Quality 0 will be changed to 1 in flags.cpp. assert(false); } @@ -233,39 +236,44 @@ VideoStream::VideoStream() last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), global_flags.width, global_flags.height); } -VideoStream::~VideoStream() {} +VideoStream::~VideoStream() +{ + if (last_flow_tex != 0) { + compute_flow->release_texture(last_flow_tex); + } +} void VideoStream::start() { - AVFormatContext *avctx = avformat_alloc_context(); + if (avctx == nullptr) { + avctx = avformat_alloc_context(); - // We use Matroska, because it's pretty much the only mux where FFmpeg - // allows writing chroma location to override JFIF's default center placement. - // (Note that at the time of writing, however, FFmpeg does not correctly - // _read_ this information!) - avctx->oformat = av_guess_format("matroska", nullptr, nullptr); + // We use Matroska, because it's pretty much the only mux where FFmpeg + // allows writing chroma location to override JFIF's default center placement. + // (Note that at the time of writing, however, FFmpeg does not correctly + // _read_ this information!) + avctx->oformat = av_guess_format("matroska", nullptr, nullptr); - uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE); - avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr); - avctx->pb->write_data_type = &VideoStream::write_packet2_thunk; - avctx->pb->ignore_boundary_point = 1; + uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE); + avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr); + avctx->pb->write_data_type = &VideoStream::write_packet2_thunk; + avctx->pb->ignore_boundary_point = 1; - Mux::Codec video_codec = Mux::CODEC_MJPEG; - - avctx->flags = AVFMT_FLAG_CUSTOM_IO; - - string video_extradata; + avctx->flags = AVFMT_FLAG_CUSTOM_IO; + } size_t width = global_flags.width, height = global_flags.height; // Doesn't matter for MJPEG. - stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, /*audio_codec_parameters=*/nullptr, - AVCOL_SPC_BT709, Mux::WITHOUT_AUDIO, - COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {})); + mux.reset(new Mux(avctx, width, height, Mux::CODEC_MJPEG, /*video_extradata=*/"", /*audio_codec_parameters=*/nullptr, + AVCOL_SPC_BT709, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {})); encode_thread = thread(&VideoStream::encode_thread_func, this); } void VideoStream::stop() { + should_quit = true; + queue_changed.notify_all(); + clear_queue(); encode_thread.join(); } @@ -274,7 +282,7 @@ void VideoStream::clear_queue() deque q; { - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); q = move(frame_queue); } @@ -283,7 +291,9 @@ void VideoStream::clear_queue() for (const QueuedFrame &qf : q) { if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) { - compute_flow->release_texture(qf.flow_tex); + if (qf.flow_tex != 0) { + compute_flow->release_texture(qf.flow_tex); + } } if (qf.type == QueuedFrame::INTERPOLATED) { interpolate->release_texture(qf.output_tex); @@ -313,7 +323,7 @@ void VideoStream::schedule_original_frame(steady_clock::time_point local_pts, qf.display_func = move(display_func); qf.queue_spot_holder = move(queue_spot_holder); - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); frame_queue.push_back(move(qf)); queue_changed.notify_all(); } @@ -332,7 +342,7 @@ void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64 // separate pools around.) BorrowedInterpolatedFrameResources resources; { - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); if (interpolate_resources.empty()) { fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n"); return; @@ -381,7 +391,7 @@ void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64 qf.resources = move(resources); qf.local_pts = local_pts; - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); frame_queue.push_back(move(qf)); queue_changed.notify_all(); } @@ -401,7 +411,7 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts // Get the temporary OpenGL resources we need for doing the interpolation. BorrowedInterpolatedFrameResources resources; { - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); if (interpolate_resources.empty()) { fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n"); return; @@ -432,13 +442,30 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts glGenerateTextureMipmap(resources->gray_tex); check_error(); - // Compute the interpolated frame. - qf.flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW); - check_error(); + GLuint flow_tex; + if (last_flow_tex != 0 && frame1 == last_frame1 && frame2 == last_frame2) { + // Reuse the flow from previous computation. This frequently happens + // if we slow down by more than 2x, so that there are multiple interpolated + // frames between each original. + flow_tex = last_flow_tex; + qf.flow_tex = 0; + } else { + // Cache miss, so release last_flow_tex. + qf.flow_tex = last_flow_tex; + + // Compute the flow. + flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW); + check_error(); + + // Store the flow texture for possible reuse next frame. + last_flow_tex = flow_tex; + last_frame1 = frame1; + last_frame2 = frame2; + } if (secondary_frame.pts != -1) { // Fade. First kick off the interpolation. - tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, global_flags.width, global_flags.height, alpha); + tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha); check_error(); // Now decode the image we are fading against. @@ -453,7 +480,7 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts interpolate_no_split->release_texture(qf.output_tex); } else { - tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, global_flags.width, global_flags.height, alpha); + tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha); check_error(); // Subsample and split Cb/Cr. @@ -463,6 +490,9 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts // We could have released qf.flow_tex here, but to make sure we don't cause a stall // when trying to reuse it for the next frame, we can just as well hold on to it // and release it only when the readback is done. + // + // TODO: This is maybe less relevant now that qf.flow_tex contains the texture we used + // _last_ frame, not this one. // Read it down (asynchronously) to the CPU. glPixelStorei(GL_PACK_ROW_LENGTH, 0); @@ -487,7 +517,7 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts check_error(); qf.resources = move(resources); - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); frame_queue.push_back(move(qf)); queue_changed.notify_all(); } @@ -502,7 +532,7 @@ void VideoStream::schedule_refresh_frame(steady_clock::time_point local_pts, qf.display_func = move(display_func); qf.queue_spot_holder = move(queue_spot_holder); - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); frame_queue.push_back(move(qf)); queue_changed.notify_all(); } @@ -549,22 +579,30 @@ void VideoStream::encode_thread_func() exit(1); } - for ( ;; ) { + while (!should_quit) { QueuedFrame qf; { unique_lock lock(queue_lock); // Wait until we have a frame to play. - queue_changed.wait(lock, [this]{ - return !frame_queue.empty(); + queue_changed.wait(lock, [this] { + return !frame_queue.empty() || should_quit; }); + if (should_quit) { + break; + } steady_clock::time_point frame_start = frame_queue.front().local_pts; // Now sleep until the frame is supposed to start (the usual case), // _or_ clear_queue() happened. - bool aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start]{ - return frame_queue.empty() || frame_queue.front().local_pts != frame_start; - }); + bool aborted; + if (output_fast_forward) { + aborted = frame_queue.empty() || frame_queue.front().local_pts != frame_start; + } else { + aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start] { + return frame_queue.empty() || frame_queue.front().local_pts != frame_start; + }); + } if (aborted) { // clear_queue() happened, so don't play this frame after all. continue; @@ -581,7 +619,8 @@ void VideoStream::encode_thread_func() pkt.stream_index = 0; pkt.data = (uint8_t *)jpeg.data(); pkt.size = jpeg.size(); - stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts); + pkt.flags = AV_PKT_FLAG_KEY; + mux->add_packet(pkt, qf.output_pts, qf.output_pts); last_frame.assign(&jpeg[0], &jpeg[0] + jpeg.size()); } else if (qf.type == QueuedFrame::FADED) { @@ -597,7 +636,8 @@ void VideoStream::encode_thread_func() pkt.stream_index = 0; pkt.data = (uint8_t *)jpeg.data(); pkt.size = jpeg.size(); - stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts); + pkt.flags = AV_PKT_FLAG_KEY; + mux->add_packet(pkt, qf.output_pts, qf.output_pts); last_frame = move(jpeg); } else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) { glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED); @@ -610,7 +650,9 @@ void VideoStream::encode_thread_func() // Now JPEG encode it, and send it on to the stream. vector jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height); - compute_flow->release_texture(qf.flow_tex); + if (qf.flow_tex != 0) { + compute_flow->release_texture(qf.flow_tex); + } if (qf.type != QueuedFrame::FADED_INTERPOLATED) { interpolate->release_texture(qf.output_tex); interpolate->release_texture(qf.cbcr_tex); @@ -621,7 +663,8 @@ void VideoStream::encode_thread_func() pkt.stream_index = 0; pkt.data = (uint8_t *)jpeg.data(); pkt.size = jpeg.size(); - stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts); + pkt.flags = AV_PKT_FLAG_KEY; + mux->add_packet(pkt, qf.output_pts, qf.output_pts); last_frame = move(jpeg); } else if (qf.type == QueuedFrame::REFRESH) { AVPacket pkt; @@ -629,7 +672,8 @@ void VideoStream::encode_thread_func() pkt.stream_index = 0; pkt.data = (uint8_t *)last_frame.data(); pkt.size = last_frame.size(); - stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts); + pkt.flags = AV_PKT_FLAG_KEY; + mux->add_packet(pkt, qf.output_pts, qf.output_pts); } else { assert(false); }