X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=futatabi%2Fvideo_stream.cpp;h=da57ebb9d9f0cc882065fa1658bf55bc4d467194;hb=b57d57b88a1d7388c6eacf8cc0867f680123120a;hp=6ee9608816f1940be13c06ba8ca9fc2edacd6afa;hpb=0a3ea4312599886108fbd12e389ed3504c4dae60;p=nageru diff --git a/futatabi/video_stream.cpp b/futatabi/video_stream.cpp index 6ee9608..da57ebb 100644 --- a/futatabi/video_stream.cpp +++ b/futatabi/video_stream.cpp @@ -6,14 +6,14 @@ extern "C" { } #include "chroma_subsampler.h" -#include "shared/context.h" #include "flags.h" #include "flow.h" -#include "shared/httpd.h" #include "jpeg_frame_view.h" #include "movit/util.h" -#include "shared/mux.h" #include "player.h" +#include "shared/context.h" +#include "shared/httpd.h" +#include "shared/mux.h" #include "util.h" #include "ycbcr_converter.h" @@ -28,7 +28,7 @@ extern HTTPD *global_httpd; struct VectorDestinationManager { jpeg_destination_mgr pub; - std::vector dest; + string dest; VectorDestinationManager() { @@ -62,7 +62,7 @@ struct VectorDestinationManager { { dest.resize(bytes_used + 4096); dest.resize(dest.capacity()); - pub.next_output_byte = dest.data() + bytes_used; + pub.next_output_byte = (uint8_t *)dest.data() + bytes_used; pub.free_in_buffer = dest.size() - bytes_used; } @@ -78,7 +78,7 @@ struct VectorDestinationManager { }; static_assert(std::is_standard_layout::value, ""); -vector encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height) +string encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height) { VectorDestinationManager dest; @@ -236,7 +236,36 @@ VideoStream::VideoStream(AVFormatContext *file_avctx) last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), global_flags.width, global_flags.height); } -VideoStream::~VideoStream() {} +VideoStream::~VideoStream() +{ + if (last_flow_tex != 0) { + compute_flow->release_texture(last_flow_tex); + } + + for (const unique_ptr &resource : interpolate_resources) { + glUnmapNamedBuffer(resource->pbo); + check_error(); + glDeleteBuffers(1, &resource->pbo); + check_error(); + glDeleteFramebuffers(2, resource->input_fbos); + check_error(); + glDeleteFramebuffers(1, &resource->fade_fbo); + check_error(); + glDeleteTextures(1, &resource->input_tex); + check_error(); + glDeleteTextures(1, &resource->gray_tex); + check_error(); + glDeleteTextures(1, &resource->fade_y_output_tex); + check_error(); + glDeleteTextures(1, &resource->fade_cbcr_output_tex); + check_error(); + glDeleteTextures(1, &resource->cb_tex); + check_error(); + glDeleteTextures(1, &resource->cr_tex); + check_error(); + } + assert(interpolate_resources.size() == num_interpolate_slots); +} void VideoStream::start() { @@ -259,8 +288,7 @@ void VideoStream::start() size_t width = global_flags.width, height = global_flags.height; // Doesn't matter for MJPEG. mux.reset(new Mux(avctx, width, height, Mux::CODEC_MJPEG, /*video_extradata=*/"", /*audio_codec_parameters=*/nullptr, - AVCOL_SPC_BT709, Mux::WITHOUT_AUDIO, - COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {})); + AVCOL_SPC_BT709, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}, Mux::WITH_SUBTITLES)); encode_thread = thread(&VideoStream::encode_thread_func, this); } @@ -278,7 +306,7 @@ void VideoStream::clear_queue() deque q; { - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); q = move(frame_queue); } @@ -287,7 +315,9 @@ void VideoStream::clear_queue() for (const QueuedFrame &qf : q) { if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) { - compute_flow->release_texture(qf.flow_tex); + if (qf.flow_tex != 0) { + compute_flow->release_texture(qf.flow_tex); + } } if (qf.type == QueuedFrame::INTERPOLATED) { interpolate->release_texture(qf.output_tex); @@ -301,23 +331,20 @@ void VideoStream::clear_queue() void VideoStream::schedule_original_frame(steady_clock::time_point local_pts, int64_t output_pts, function &&display_func, QueueSpotHolder &&queue_spot_holder, - FrameOnDisk frame) + FrameOnDisk frame, const string &subtitle) { fprintf(stderr, "output_pts=%ld original input_pts=%ld\n", output_pts, frame.pts); - // Preload the file from disk, so that the encoder thread does not get stalled. - // TODO: Consider sending it through the queue instead. - (void)frame_reader.read_frame(frame); - QueuedFrame qf; qf.local_pts = local_pts; qf.type = QueuedFrame::ORIGINAL; qf.output_pts = output_pts; - qf.frame1 = frame; qf.display_func = move(display_func); qf.queue_spot_holder = move(queue_spot_holder); + qf.subtitle = subtitle; + qf.encoded_jpeg.reset(new string(frame_reader.read_frame(frame))); - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); frame_queue.push_back(move(qf)); queue_changed.notify_all(); } @@ -326,7 +353,7 @@ void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64 function &&display_func, QueueSpotHolder &&queue_spot_holder, FrameOnDisk frame1_spec, FrameOnDisk frame2_spec, - float fade_alpha) + float fade_alpha, const string &subtitle) { fprintf(stderr, "output_pts=%ld faded input_pts=%ld,%ld fade_alpha=%.2f\n", output_pts, frame1_spec.pts, frame2_spec.pts, fade_alpha); @@ -336,7 +363,7 @@ void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64 // separate pools around.) BorrowedInterpolatedFrameResources resources; { - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); if (interpolate_resources.empty()) { fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n"); return; @@ -359,6 +386,7 @@ void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64 qf.frame1 = frame1_spec; qf.display_func = move(display_func); qf.queue_spot_holder = move(queue_spot_holder); + qf.subtitle = subtitle; qf.secondary_frame = frame2_spec; @@ -385,7 +413,7 @@ void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64 qf.resources = move(resources); qf.local_pts = local_pts; - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); frame_queue.push_back(move(qf)); queue_changed.notify_all(); } @@ -394,7 +422,7 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts int64_t output_pts, function)> &&display_func, QueueSpotHolder &&queue_spot_holder, FrameOnDisk frame1, FrameOnDisk frame2, - float alpha, FrameOnDisk secondary_frame, float fade_alpha) + float alpha, FrameOnDisk secondary_frame, float fade_alpha, const string &subtitle) { if (secondary_frame.pts != -1) { fprintf(stderr, "output_pts=%ld interpolated input_pts1=%ld input_pts2=%ld alpha=%.3f secondary_pts=%ld fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha); @@ -405,7 +433,7 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts // Get the temporary OpenGL resources we need for doing the interpolation. BorrowedInterpolatedFrameResources resources; { - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); if (interpolate_resources.empty()) { fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n"); return; @@ -420,6 +448,7 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts qf.display_decoded_func = move(display_func); qf.queue_spot_holder = move(queue_spot_holder); qf.local_pts = local_pts; + qf.subtitle = subtitle; check_error(); @@ -436,13 +465,30 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts glGenerateTextureMipmap(resources->gray_tex); check_error(); - // Compute the interpolated frame. - qf.flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW); - check_error(); + GLuint flow_tex; + if (last_flow_tex != 0 && frame1 == last_frame1 && frame2 == last_frame2) { + // Reuse the flow from previous computation. This frequently happens + // if we slow down by more than 2x, so that there are multiple interpolated + // frames between each original. + flow_tex = last_flow_tex; + qf.flow_tex = 0; + } else { + // Cache miss, so release last_flow_tex. + qf.flow_tex = last_flow_tex; + + // Compute the flow. + flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW); + check_error(); + + // Store the flow texture for possible reuse next frame. + last_flow_tex = flow_tex; + last_frame1 = frame1; + last_frame2 = frame2; + } if (secondary_frame.pts != -1) { // Fade. First kick off the interpolation. - tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, global_flags.width, global_flags.height, alpha); + tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha); check_error(); // Now decode the image we are fading against. @@ -457,7 +503,7 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts interpolate_no_split->release_texture(qf.output_tex); } else { - tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, global_flags.width, global_flags.height, alpha); + tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha); check_error(); // Subsample and split Cb/Cr. @@ -467,6 +513,9 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts // We could have released qf.flow_tex here, but to make sure we don't cause a stall // when trying to reuse it for the next frame, we can just as well hold on to it // and release it only when the readback is done. + // + // TODO: This is maybe less relevant now that qf.flow_tex contains the texture we used + // _last_ frame, not this one. // Read it down (asynchronously) to the CPU. glPixelStorei(GL_PACK_ROW_LENGTH, 0); @@ -491,22 +540,23 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts check_error(); qf.resources = move(resources); - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); frame_queue.push_back(move(qf)); queue_changed.notify_all(); } void VideoStream::schedule_refresh_frame(steady_clock::time_point local_pts, int64_t output_pts, function &&display_func, - QueueSpotHolder &&queue_spot_holder) + QueueSpotHolder &&queue_spot_holder, const string &subtitle) { QueuedFrame qf; qf.type = QueuedFrame::REFRESH; qf.output_pts = output_pts; qf.display_func = move(display_func); qf.queue_spot_holder = move(queue_spot_holder); + qf.subtitle = subtitle; - unique_lock lock(queue_lock); + lock_guard lock(queue_lock); frame_queue.push_back(move(qf)); queue_changed.notify_all(); } @@ -559,7 +609,7 @@ void VideoStream::encode_thread_func() unique_lock lock(queue_lock); // Wait until we have a frame to play. - queue_changed.wait(lock, [this]{ + queue_changed.wait(lock, [this] { return !frame_queue.empty() || should_quit; }); if (should_quit) { @@ -573,7 +623,7 @@ void VideoStream::encode_thread_func() if (output_fast_forward) { aborted = frame_queue.empty() || frame_queue.front().local_pts != frame_start; } else { - aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start]{ + aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start] { return frame_queue.empty() || frame_queue.front().local_pts != frame_start; }); } @@ -585,9 +635,23 @@ void VideoStream::encode_thread_func() frame_queue.pop_front(); } + // Hack: We mux the subtitle packet one time unit before the actual frame, + // so that Nageru is sure to get it first. + if (!qf.subtitle.empty()) { + AVPacket pkt; + av_init_packet(&pkt); + pkt.stream_index = mux->get_subtitle_stream_idx(); + assert(pkt.stream_index != -1); + pkt.data = (uint8_t *)qf.subtitle.data(); + pkt.size = qf.subtitle.size(); + pkt.flags = 0; + pkt.duration = lrint(TIMEBASE / global_flags.output_framerate); // Doesn't really matter for Nageru. + mux->add_packet(pkt, qf.output_pts - 1, qf.output_pts - 1); + } + if (qf.type == QueuedFrame::ORIGINAL) { // Send the JPEG frame on, unchanged. - string jpeg = frame_reader.read_frame(qf.frame1); + string jpeg = move(*qf.encoded_jpeg); AVPacket pkt; av_init_packet(&pkt); pkt.stream_index = 0; @@ -595,15 +659,14 @@ void VideoStream::encode_thread_func() pkt.size = jpeg.size(); pkt.flags = AV_PKT_FLAG_KEY; mux->add_packet(pkt, qf.output_pts, qf.output_pts); - - last_frame.assign(&jpeg[0], &jpeg[0] + jpeg.size()); + last_frame = move(jpeg); } else if (qf.type == QueuedFrame::FADED) { glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED); shared_ptr frame = frame_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height); // Now JPEG encode it, and send it on to the stream. - vector jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height); + string jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height); AVPacket pkt; av_init_packet(&pkt); @@ -623,8 +686,10 @@ void VideoStream::encode_thread_func() } // Now JPEG encode it, and send it on to the stream. - vector jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height); - compute_flow->release_texture(qf.flow_tex); + string jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height); + if (qf.flow_tex != 0) { + compute_flow->release_texture(qf.flow_tex); + } if (qf.type != QueuedFrame::FADED_INTERPOLATED) { interpolate->release_texture(qf.output_tex); interpolate->release_texture(qf.cbcr_tex);