X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=video_stream.cpp;h=d425ed190b6fa3187a6f3311c3c980d7ff48c031;hb=3795723be95f2fe82f3c8b8b45b1a905b2c811fd;hp=65f52c094bd70e16d41a09a9afbd0c573964e6e6;hpb=9a5e0639486a26685ea623c7c85e9ae87f8ae656;p=nageru diff --git a/video_stream.cpp b/video_stream.cpp index 65f52c0..d425ed1 100644 --- a/video_stream.cpp +++ b/video_stream.cpp @@ -5,37 +5,231 @@ extern "C" { #include } +#include "chroma_subsampler.h" +#include "context.h" +#include "flags.h" +#include "flow.h" #include "httpd.h" #include "jpeg_frame_view.h" +#include "movit/util.h" #include "mux.h" #include "player.h" +#include "util.h" +#include "ycbcr_converter.h" + +#include +#include +#include using namespace std; +using namespace std::chrono; extern HTTPD *global_httpd; -namespace { +struct VectorDestinationManager { + jpeg_destination_mgr pub; + std::vector dest; + + VectorDestinationManager() + { + pub.init_destination = init_destination_thunk; + pub.empty_output_buffer = empty_output_buffer_thunk; + pub.term_destination = term_destination_thunk; + } + + static void init_destination_thunk(j_compress_ptr ptr) + { + ((VectorDestinationManager *)(ptr->dest))->init_destination(); + } + + inline void init_destination() + { + make_room(0); + } + + static boolean empty_output_buffer_thunk(j_compress_ptr ptr) + { + return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer(); + } -string read_file(const string &filename) + inline bool empty_output_buffer() + { + make_room(dest.size()); // Should ignore pub.free_in_buffer! + return true; + } + + inline void make_room(size_t bytes_used) + { + dest.resize(bytes_used + 4096); + dest.resize(dest.capacity()); + pub.next_output_byte = dest.data() + bytes_used; + pub.free_in_buffer = dest.size() - bytes_used; + } + + static void term_destination_thunk(j_compress_ptr ptr) + { + ((VectorDestinationManager *)(ptr->dest))->term_destination(); + } + + inline void term_destination() + { + dest.resize(dest.size() - pub.free_in_buffer); + } +}; +static_assert(std::is_standard_layout::value, ""); + +vector encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height) { - FILE *fp = fopen(filename.c_str(), "rb"); - if (fp == nullptr) { - perror(filename.c_str()); - return ""; + VectorDestinationManager dest; + + jpeg_compress_struct cinfo; + jpeg_error_mgr jerr; + cinfo.err = jpeg_std_error(&jerr); + jpeg_create_compress(&cinfo); + + cinfo.dest = (jpeg_destination_mgr *)&dest; + cinfo.input_components = 3; + cinfo.in_color_space = JCS_RGB; + jpeg_set_defaults(&cinfo); + constexpr int quality = 90; + jpeg_set_quality(&cinfo, quality, /*force_baseline=*/false); + + cinfo.image_width = width; + cinfo.image_height = height; + cinfo.raw_data_in = true; + jpeg_set_colorspace(&cinfo, JCS_YCbCr); + cinfo.comp_info[0].h_samp_factor = 2; + cinfo.comp_info[0].v_samp_factor = 1; + cinfo.comp_info[1].h_samp_factor = 1; + cinfo.comp_info[1].v_samp_factor = 1; + cinfo.comp_info[2].h_samp_factor = 1; + cinfo.comp_info[2].v_samp_factor = 1; + cinfo.CCIR601_sampling = true; // Seems to be mostly ignored by libjpeg, though. + jpeg_start_compress(&cinfo, true); + + JSAMPROW yptr[8], cbptr[8], crptr[8]; + JSAMPARRAY data[3] = { yptr, cbptr, crptr }; + for (unsigned y = 0; y < height; y += 8) { + for (unsigned yy = 0; yy < 8; ++yy) { + yptr[yy] = const_cast(&y_data[(y + yy) * width]); + cbptr[yy] = const_cast(&cb_data[(y + yy) * width / 2]); + crptr[yy] = const_cast(&cr_data[(y + yy) * width / 2]); + } + + jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8); } - fseek(fp, 0, SEEK_END); - long len = ftell(fp); - rewind(fp); + jpeg_finish_compress(&cinfo); + jpeg_destroy_compress(&cinfo); - string ret; - ret.resize(len); - fread(&ret[0], len, 1, fp); - fclose(fp); - return ret; + return move(dest.dest); } -} // namespace +VideoStream::VideoStream() +{ + ycbcr_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_DUAL_YCBCR, /*resource_pool=*/nullptr)); + ycbcr_semiplanar_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_SEMIPLANAR, /*resource_pool=*/nullptr)); + + GLuint input_tex[num_interpolate_slots], gray_tex[num_interpolate_slots]; + GLuint fade_y_output_tex[num_interpolate_slots], fade_cbcr_output_tex[num_interpolate_slots]; + GLuint cb_tex[num_interpolate_slots], cr_tex[num_interpolate_slots]; + + glCreateTextures(GL_TEXTURE_2D_ARRAY, num_interpolate_slots, input_tex); + glCreateTextures(GL_TEXTURE_2D_ARRAY, num_interpolate_slots, gray_tex); + glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, fade_y_output_tex); + glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, fade_cbcr_output_tex); + glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cb_tex); + glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cr_tex); + check_error(); + + constexpr size_t width = 1280, height = 720; // FIXME: adjustable width, height + int levels = find_num_levels(width, height); + for (size_t i = 0; i < num_interpolate_slots; ++i) { + glTextureStorage3D(input_tex[i], levels, GL_RGBA8, width, height, 2); + check_error(); + glTextureStorage3D(gray_tex[i], levels, GL_R8, width, height, 2); + check_error(); + glTextureStorage2D(fade_y_output_tex[i], 1, GL_R8, width, height); + check_error(); + glTextureStorage2D(fade_cbcr_output_tex[i], 1, GL_RG8, width, height); + check_error(); + glTextureStorage2D(cb_tex[i], 1, GL_R8, width / 2, height); + check_error(); + glTextureStorage2D(cr_tex[i], 1, GL_R8, width / 2, height); + check_error(); + + unique_ptr resource(new InterpolatedFrameResources); + resource->owner = this; + resource->input_tex = input_tex[i]; + resource->gray_tex = gray_tex[i]; + resource->fade_y_output_tex = fade_y_output_tex[i]; + resource->fade_cbcr_output_tex = fade_cbcr_output_tex[i]; + resource->cb_tex = cb_tex[i]; + resource->cr_tex = cr_tex[i]; + glCreateFramebuffers(2, resource->input_fbos); + check_error(); + glCreateFramebuffers(1, &resource->fade_fbo); + check_error(); + + glNamedFramebufferTextureLayer(resource->input_fbos[0], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 0); + check_error(); + glNamedFramebufferTextureLayer(resource->input_fbos[0], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 0); + check_error(); + glNamedFramebufferTextureLayer(resource->input_fbos[1], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 1); + check_error(); + glNamedFramebufferTextureLayer(resource->input_fbos[1], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 1); + check_error(); + glNamedFramebufferTexture(resource->fade_fbo, GL_COLOR_ATTACHMENT0, fade_y_output_tex[i], 0); + check_error(); + glNamedFramebufferTexture(resource->fade_fbo, GL_COLOR_ATTACHMENT1, fade_cbcr_output_tex[i], 0); + check_error(); + + GLuint bufs[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 }; + glNamedFramebufferDrawBuffers(resource->input_fbos[0], 2, bufs); + check_error(); + glNamedFramebufferDrawBuffers(resource->input_fbos[1], 2, bufs); + check_error(); + glNamedFramebufferDrawBuffers(resource->fade_fbo, 2, bufs); + check_error(); + + glCreateBuffers(1, &resource->pbo); + check_error(); + glNamedBufferStorage(resource->pbo, width * height * 4, nullptr, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT); + check_error(); + resource->pbo_contents = glMapNamedBufferRange(resource->pbo, 0, width * height * 4, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT); + interpolate_resources.push_back(move(resource)); + } + + check_error(); + + OperatingPoint op; + if (global_flags.interpolation_quality == 1) { + op = operating_point1; + } else if (global_flags.interpolation_quality == 2) { + op = operating_point2; + } else if (global_flags.interpolation_quality == 3) { + op = operating_point3; + } else if (global_flags.interpolation_quality == 4) { + op = operating_point4; + } else { + assert(false); + } + + compute_flow.reset(new DISComputeFlow(width, height, op)); + interpolate.reset(new Interpolate(op, /*split_ycbcr_output=*/true)); + interpolate_no_split.reset(new Interpolate(op, /*split_ycbcr_output=*/false)); + chroma_subsampler.reset(new ChromaSubsampler); + check_error(); + + // The “last frame” is initially black. + unique_ptr y(new uint8_t[1280 * 720]); + unique_ptr cb_or_cr(new uint8_t[640 * 720]); + memset(y.get(), 16, 1280 * 720); + memset(cb_or_cr.get(), 128, 640 * 720); + last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), 1280, 720); +} + +VideoStream::~VideoStream() {} void VideoStream::start() { @@ -66,39 +260,373 @@ void VideoStream::stop() encode_thread.join(); } -void VideoStream::schedule_original_frame(int64_t output_pts, unsigned stream_idx, int64_t input_pts) +void VideoStream::clear_queue() +{ + deque q; + + { + unique_lock lock(queue_lock); + q = move(frame_queue); + } + + // These are not RAII-ed, unfortunately, so we'll need to clean them ourselves. + // Note that release_texture() is thread-safe. + for (const QueuedFrame &qf : q) { + if (qf.type == QueuedFrame::INTERPOLATED || + qf.type == QueuedFrame::FADED_INTERPOLATED) { + compute_flow->release_texture(qf.flow_tex); + } + if (qf.type == QueuedFrame::INTERPOLATED) { + interpolate->release_texture(qf.output_tex); + interpolate->release_texture(qf.cbcr_tex); + } + } + + // Destroy q outside the mutex, as that would be a double-lock. +} + +void VideoStream::schedule_original_frame(steady_clock::time_point local_pts, + int64_t output_pts, function &&display_func, + QueueSpotHolder &&queue_spot_holder, + FrameOnDisk frame) +{ + fprintf(stderr, "output_pts=%ld original input_pts=%ld\n", output_pts, frame.pts); + + // Preload the file from disk, so that the encoder thread does not get stalled. + // TODO: Consider sending it through the queue instead. + (void)frame_reader.read_frame(frame); + + QueuedFrame qf; + qf.local_pts = local_pts; + qf.type = QueuedFrame::ORIGINAL; + qf.output_pts = output_pts; + qf.frame1 = frame; + qf.display_func = move(display_func); + qf.queue_spot_holder = move(queue_spot_holder); + + unique_lock lock(queue_lock); + frame_queue.push_back(move(qf)); + queue_changed.notify_all(); +} + +void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64_t output_pts, + function &&display_func, + QueueSpotHolder &&queue_spot_holder, + FrameOnDisk frame1_spec, FrameOnDisk frame2_spec, + float fade_alpha) +{ + fprintf(stderr, "output_pts=%ld faded input_pts=%ld,%ld fade_alpha=%.2f\n", output_pts, frame1_spec.pts, frame2_spec.pts, fade_alpha); + + // Get the temporary OpenGL resources we need for doing the fade. + // (We share these with interpolated frames, which is slightly + // overkill, but there's no need to waste resources on keeping + // separate pools around.) + BorrowedInterpolatedFrameResources resources; + { + unique_lock lock(queue_lock); + if (interpolate_resources.empty()) { + fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n"); + return; + } + resources = BorrowedInterpolatedFrameResources(interpolate_resources.front().release()); + interpolate_resources.pop_front(); + } + + bool did_decode; + + shared_ptr frame1 = decode_jpeg_with_cache(frame1_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode); + shared_ptr frame2 = decode_jpeg_with_cache(frame2_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode); + + ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, 1280, 720); + + QueuedFrame qf; + qf.local_pts = local_pts; + qf.type = QueuedFrame::FADED; + qf.output_pts = output_pts; + qf.frame1 = frame1_spec; + qf.display_func = move(display_func); + qf.queue_spot_holder = move(queue_spot_holder); + + qf.secondary_frame = frame2_spec; + + // Subsample and split Cb/Cr. + chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, 1280, 720, resources->cb_tex, resources->cr_tex); + + // Read it down (asynchronously) to the CPU. + glPixelStorei(GL_PACK_ROW_LENGTH, 0); + glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo); + check_error(); + glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0)); + check_error(); + glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720)); + check_error(); + glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720)); + check_error(); + glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); + + // Set a fence we can wait for to make sure the CPU sees the read. + glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT); + check_error(); + qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0); + check_error(); + qf.resources = move(resources); + qf.local_pts = local_pts; + + unique_lock lock(queue_lock); + frame_queue.push_back(move(qf)); + queue_changed.notify_all(); +} + +void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts, + int64_t output_pts, function)> &&display_func, + QueueSpotHolder &&queue_spot_holder, + FrameOnDisk frame1, FrameOnDisk frame2, + float alpha, FrameOnDisk secondary_frame, float fade_alpha) { + if (secondary_frame.pts != -1) { + fprintf(stderr, "output_pts=%ld interpolated input_pts1=%ld input_pts2=%ld alpha=%.3f secondary_pts=%ld fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha); + } else { + fprintf(stderr, "output_pts=%ld interpolated input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, frame1.pts, frame2.pts, alpha); + } + + // Get the temporary OpenGL resources we need for doing the interpolation. + BorrowedInterpolatedFrameResources resources; + { + unique_lock lock(queue_lock); + if (interpolate_resources.empty()) { + fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n"); + return; + } + resources = BorrowedInterpolatedFrameResources(interpolate_resources.front().release()); + interpolate_resources.pop_front(); + } + + QueuedFrame qf; + qf.type = (secondary_frame.pts == -1) ? QueuedFrame::INTERPOLATED : QueuedFrame::FADED_INTERPOLATED; + qf.output_pts = output_pts; + qf.display_decoded_func = move(display_func); + qf.queue_spot_holder = move(queue_spot_holder); + qf.local_pts = local_pts; + + check_error(); + + // Convert frame0 and frame1 to OpenGL textures. + for (size_t frame_no = 0; frame_no < 2; ++frame_no) { + FrameOnDisk frame_spec = frame_no == 1 ? frame2 : frame1; + bool did_decode; + shared_ptr frame = decode_jpeg_with_cache(frame_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode); + ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources->input_fbos[frame_no], 1280, 720); + } + + glGenerateTextureMipmap(resources->input_tex); + check_error(); + glGenerateTextureMipmap(resources->gray_tex); + check_error(); + + // Compute the interpolated frame. + qf.flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW); + check_error(); + + if (secondary_frame.pts != -1) { + // Fade. First kick off the interpolation. + tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, 1280, 720, alpha); + check_error(); + + // Now decode the image we are fading against. + bool did_decode; + shared_ptr frame2 = decode_jpeg_with_cache(secondary_frame, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode); + + // Then fade against it, putting it into the fade Y' and CbCr textures. + ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, 1280, 720); + + // Subsample and split Cb/Cr. + chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, 1280, 720, resources->cb_tex, resources->cr_tex); + + interpolate_no_split->release_texture(qf.output_tex); + } else { + tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, 1280, 720, alpha); + check_error(); + + // Subsample and split Cb/Cr. + chroma_subsampler->subsample_chroma(qf.cbcr_tex, 1280, 720, resources->cb_tex, resources->cr_tex); + } + + // We could have released qf.flow_tex here, but to make sure we don't cause a stall + // when trying to reuse it for the next frame, we can just as well hold on to it + // and release it only when the readback is done. + + // Read it down (asynchronously) to the CPU. + glPixelStorei(GL_PACK_ROW_LENGTH, 0); + glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo); + check_error(); + if (secondary_frame.pts != -1) { + glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0)); + } else { + glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0)); + } + check_error(); + glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720)); + check_error(); + glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720)); + check_error(); + glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); + + // Set a fence we can wait for to make sure the CPU sees the read. + glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT); + check_error(); + qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0); + check_error(); + qf.resources = move(resources); + unique_lock lock(queue_lock); + frame_queue.push_back(move(qf)); + queue_changed.notify_all(); +} + +void VideoStream::schedule_refresh_frame(steady_clock::time_point local_pts, + int64_t output_pts, function &&display_func, + QueueSpotHolder &&queue_spot_holder) +{ QueuedFrame qf; + qf.type = QueuedFrame::REFRESH; qf.output_pts = output_pts; - qf.stream_idx = stream_idx; - qf.input_first_pts = input_pts; - frame_queue.push_back(qf); - queue_nonempty.notify_all(); + qf.display_func = move(display_func); + qf.queue_spot_holder = move(queue_spot_holder); + + unique_lock lock(queue_lock); + frame_queue.push_back(move(qf)); + queue_changed.notify_all(); +} + +namespace { + +shared_ptr frame_from_pbo(void *contents, size_t width, size_t height) +{ + size_t chroma_width = width / 2; + + const uint8_t *y = (const uint8_t *)contents; + const uint8_t *cb = (const uint8_t *)contents + width * height; + const uint8_t *cr = (const uint8_t *)contents + width * height + chroma_width * height; + + shared_ptr frame(new Frame); + frame->y.reset(new uint8_t[width * height]); + frame->cb.reset(new uint8_t[chroma_width * height]); + frame->cr.reset(new uint8_t[chroma_width * height]); + for (unsigned yy = 0; yy < height; ++yy) { + memcpy(frame->y.get() + width * yy, y + width * yy, width); + memcpy(frame->cb.get() + chroma_width * yy, cb + chroma_width * yy, chroma_width); + memcpy(frame->cr.get() + chroma_width * yy, cr + chroma_width * yy, chroma_width); + } + frame->is_semiplanar = false; + frame->width = width; + frame->height = height; + frame->chroma_subsampling_x = 2; + frame->chroma_subsampling_y = 1; + frame->pitch_y = width; + frame->pitch_chroma = chroma_width; + return frame; } +} // namespace + void VideoStream::encode_thread_func() { + pthread_setname_np(pthread_self(), "VideoStream"); + QSurface *surface = create_surface(); + QOpenGLContext *context = create_context(surface); + bool ok = make_current(context, surface); + if (!ok) { + fprintf(stderr, "Video stream couldn't get an OpenGL context\n"); + exit(1); + } + for ( ;; ) { QueuedFrame qf; { unique_lock lock(queue_lock); - queue_nonempty.wait(lock, [this]{ + + // Wait until we have a frame to play. + queue_changed.wait(lock, [this]{ return !frame_queue.empty(); }); - qf = frame_queue.front(); + steady_clock::time_point frame_start = frame_queue.front().local_pts; + + // Now sleep until the frame is supposed to start (the usual case), + // _or_ clear_queue() happened. + bool aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start]{ + return frame_queue.empty() || frame_queue.front().local_pts != frame_start; + }); + if (aborted) { + // clear_queue() happened, so don't play this frame after all. + continue; + } + qf = move(frame_queue.front()); frame_queue.pop_front(); } if (qf.type == QueuedFrame::ORIGINAL) { - string jpeg = read_file(filename_for_frame(qf.stream_idx, qf.input_first_pts)); + // Send the JPEG frame on, unchanged. + string jpeg = frame_reader.read_frame(qf.frame1); + AVPacket pkt; + av_init_packet(&pkt); + pkt.stream_index = 0; + pkt.data = (uint8_t *)jpeg.data(); + pkt.size = jpeg.size(); + stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts); + + last_frame.assign(&jpeg[0], &jpeg[0] + jpeg.size()); + } else if (qf.type == QueuedFrame::FADED) { + glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED); + + shared_ptr frame = frame_from_pbo(qf.resources->pbo_contents, 1280, 720); + + // Now JPEG encode it, and send it on to the stream. + vector jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720); + AVPacket pkt; av_init_packet(&pkt); pkt.stream_index = 0; pkt.data = (uint8_t *)jpeg.data(); pkt.size = jpeg.size(); stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts); - } + last_frame = move(jpeg); + } else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) { + glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED); + + // Send it on to display. + shared_ptr frame = frame_from_pbo(qf.resources->pbo_contents, 1280, 720); + if (qf.display_decoded_func != nullptr) { + qf.display_decoded_func(frame); + } + + // Now JPEG encode it, and send it on to the stream. + vector jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720); + compute_flow->release_texture(qf.flow_tex); + if (qf.type != QueuedFrame::FADED_INTERPOLATED) { + interpolate->release_texture(qf.output_tex); + interpolate->release_texture(qf.cbcr_tex); + } + + AVPacket pkt; + av_init_packet(&pkt); + pkt.stream_index = 0; + pkt.data = (uint8_t *)jpeg.data(); + pkt.size = jpeg.size(); + stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts); + last_frame = move(jpeg); + } else if (qf.type == QueuedFrame::REFRESH) { + AVPacket pkt; + av_init_packet(&pkt); + pkt.stream_index = 0; + pkt.data = (uint8_t *)last_frame.data(); + pkt.size = last_frame.size(); + stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts); + } else { + assert(false); + } + if (qf.display_func != nullptr) { + qf.display_func(); + } } } @@ -126,4 +654,3 @@ int VideoStream::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType ty } return buf_size; } -