check_error();
OperatingPoint op;
- if (global_flags.interpolation_quality == 0) {
- // Allocate something just for simplicity; we won't be using it.
- op = operating_point1;
- } else if (global_flags.interpolation_quality == 1) {
+ if (global_flags.interpolation_quality == 0 ||
+ global_flags.interpolation_quality == 1) {
op = operating_point1;
} else if (global_flags.interpolation_quality == 2) {
op = operating_point2;
} else if (global_flags.interpolation_quality == 4) {
op = operating_point4;
} else {
+ // Quality 0 will be changed to 1 in flags.cpp.
assert(false);
}
last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), global_flags.width, global_flags.height);
}
-VideoStream::~VideoStream() {}
+VideoStream::~VideoStream()
+{
+ if (last_flow_tex != 0) {
+ compute_flow->release_texture(last_flow_tex);
+ }
+}
void VideoStream::start()
{
size_t width = global_flags.width, height = global_flags.height; // Doesn't matter for MJPEG.
mux.reset(new Mux(avctx, width, height, Mux::CODEC_MJPEG, /*video_extradata=*/"", /*audio_codec_parameters=*/nullptr,
- AVCOL_SPC_BT709, Mux::WITHOUT_AUDIO,
- COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
+ AVCOL_SPC_BT709, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
encode_thread = thread(&VideoStream::encode_thread_func, this);
}
void VideoStream::stop()
{
should_quit = true;
+ queue_changed.notify_all();
clear_queue();
encode_thread.join();
}
for (const QueuedFrame &qf : q) {
if (qf.type == QueuedFrame::INTERPOLATED ||
qf.type == QueuedFrame::FADED_INTERPOLATED) {
- compute_flow->release_texture(qf.flow_tex);
+ if (qf.flow_tex != 0) {
+ compute_flow->release_texture(qf.flow_tex);
+ }
}
if (qf.type == QueuedFrame::INTERPOLATED) {
interpolate->release_texture(qf.output_tex);
glGenerateTextureMipmap(resources->gray_tex);
check_error();
- // Compute the interpolated frame.
- qf.flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
- check_error();
+ GLuint flow_tex;
+ if (last_flow_tex != 0 && frame1 == last_frame1 && frame2 == last_frame2) {
+ // Reuse the flow from previous computation. This frequently happens
+ // if we slow down by more than 2x, so that there are multiple interpolated
+ // frames between each original.
+ flow_tex = last_flow_tex;
+ qf.flow_tex = 0;
+ } else {
+ // Cache miss, so release last_flow_tex.
+ qf.flow_tex = last_flow_tex;
+
+ // Compute the flow.
+ flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
+ check_error();
+
+ // Store the flow texture for possible reuse next frame.
+ last_flow_tex = flow_tex;
+ last_frame1 = frame1;
+ last_frame2 = frame2;
+ }
if (secondary_frame.pts != -1) {
// Fade. First kick off the interpolation.
- tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, global_flags.width, global_flags.height, alpha);
+ tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha);
check_error();
// Now decode the image we are fading against.
interpolate_no_split->release_texture(qf.output_tex);
} else {
- tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, global_flags.width, global_flags.height, alpha);
+ tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha);
check_error();
// Subsample and split Cb/Cr.
// We could have released qf.flow_tex here, but to make sure we don't cause a stall
// when trying to reuse it for the next frame, we can just as well hold on to it
// and release it only when the readback is done.
+ //
+ // TODO: This is maybe less relevant now that qf.flow_tex contains the texture we used
+ // _last_ frame, not this one.
// Read it down (asynchronously) to the CPU.
glPixelStorei(GL_PACK_ROW_LENGTH, 0);
// Wait until we have a frame to play.
queue_changed.wait(lock, [this]{
- return !frame_queue.empty();
+ return !frame_queue.empty() || should_quit;
});
+ if (should_quit) {
+ break;
+ }
steady_clock::time_point frame_start = frame_queue.front().local_pts;
// Now sleep until the frame is supposed to start (the usual case),
pkt.stream_index = 0;
pkt.data = (uint8_t *)jpeg.data();
pkt.size = jpeg.size();
+ pkt.flags = AV_PKT_FLAG_KEY;
mux->add_packet(pkt, qf.output_pts, qf.output_pts);
last_frame.assign(&jpeg[0], &jpeg[0] + jpeg.size());
pkt.stream_index = 0;
pkt.data = (uint8_t *)jpeg.data();
pkt.size = jpeg.size();
+ pkt.flags = AV_PKT_FLAG_KEY;
mux->add_packet(pkt, qf.output_pts, qf.output_pts);
last_frame = move(jpeg);
} else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) {
// Now JPEG encode it, and send it on to the stream.
vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height);
- compute_flow->release_texture(qf.flow_tex);
+ if (qf.flow_tex != 0) {
+ compute_flow->release_texture(qf.flow_tex);
+ }
if (qf.type != QueuedFrame::FADED_INTERPOLATED) {
interpolate->release_texture(qf.output_tex);
interpolate->release_texture(qf.cbcr_tex);
pkt.stream_index = 0;
pkt.data = (uint8_t *)jpeg.data();
pkt.size = jpeg.size();
+ pkt.flags = AV_PKT_FLAG_KEY;
mux->add_packet(pkt, qf.output_pts, qf.output_pts);
last_frame = move(jpeg);
} else if (qf.type == QueuedFrame::REFRESH) {
pkt.stream_index = 0;
pkt.data = (uint8_t *)last_frame.data();
pkt.size = last_frame.size();
+ pkt.flags = AV_PKT_FLAG_KEY;
mux->add_packet(pkt, qf.output_pts, qf.output_pts);
} else {
assert(false);