]> git.sesse.net Git - nageru/blobdiff - futatabi/video_stream.cpp
Make Futatabi fades apply white balance.
[nageru] / futatabi / video_stream.cpp
index 3df272ac08b5836b6f49c3277bff6de5504c3ef6..d7d9fd04bcc97b0a8f3a56dbb467daf70996f49c 100644 (file)
@@ -6,14 +6,16 @@ extern "C" {
 }
 
 #include "chroma_subsampler.h"
-#include "context.h"
+#include "exif_parser.h"
 #include "flags.h"
 #include "flow.h"
-#include "httpd.h"
 #include "jpeg_frame_view.h"
 #include "movit/util.h"
-#include "shared/mux.h"
 #include "player.h"
+#include "shared/context.h"
+#include "shared/httpd.h"
+#include "shared/shared_defs.h"
+#include "shared/mux.h"
 #include "util.h"
 #include "ycbcr_converter.h"
 
@@ -21,6 +23,7 @@ extern "C" {
 #include <jpeglib.h>
 #include <unistd.h>
 
+using namespace movit;
 using namespace std;
 using namespace std::chrono;
 
@@ -28,7 +31,7 @@ extern HTTPD *global_httpd;
 
 struct VectorDestinationManager {
        jpeg_destination_mgr pub;
-       std::vector<uint8_t> dest;
+       string dest;
 
        VectorDestinationManager()
        {
@@ -62,7 +65,7 @@ struct VectorDestinationManager {
        {
                dest.resize(bytes_used + 4096);
                dest.resize(dest.capacity());
-               pub.next_output_byte = dest.data() + bytes_used;
+               pub.next_output_byte = (uint8_t *)dest.data() + bytes_used;
                pub.free_in_buffer = dest.size() - bytes_used;
        }
 
@@ -78,7 +81,7 @@ struct VectorDestinationManager {
 };
 static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
 
-vector<uint8_t> encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height)
+string encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height, const string exif_data)
 {
        VectorDestinationManager dest;
 
@@ -107,6 +110,14 @@ vector<uint8_t> encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const
        cinfo.CCIR601_sampling = true;  // Seems to be mostly ignored by libjpeg, though.
        jpeg_start_compress(&cinfo, true);
 
+       // This comment marker is private to FFmpeg. It signals limited Y'CbCr range
+       // (and nothing else).
+       jpeg_write_marker(&cinfo, JPEG_COM, (const JOCTET *)"CS=ITU601", strlen("CS=ITU601"));
+
+       if (!exif_data.empty()) {
+               jpeg_write_marker(&cinfo, JPEG_APP0 + 1, (const JOCTET *)exif_data.data(), exif_data.size());
+       }
+
        JSAMPROW yptr[8], cbptr[8], crptr[8];
        JSAMPARRAY data[3] = { yptr, cbptr, crptr };
        for (unsigned y = 0; y < height; y += 8) {
@@ -125,7 +136,8 @@ vector<uint8_t> encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const
        return move(dest.dest);
 }
 
-VideoStream::VideoStream()
+VideoStream::VideoStream(AVFormatContext *file_avctx)
+       : avctx(file_avctx), output_fast_forward(file_avctx != nullptr)
 {
        ycbcr_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_DUAL_YCBCR, /*resource_pool=*/nullptr));
        ycbcr_semiplanar_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_SEMIPLANAR, /*resource_pool=*/nullptr));
@@ -142,7 +154,7 @@ VideoStream::VideoStream()
        glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cr_tex);
        check_error();
 
-       constexpr size_t width = 1280, height = 720;  // FIXME: adjustable width, height
+       size_t width = global_flags.width, height = global_flags.height;
        int levels = find_num_levels(width, height);
        for (size_t i = 0; i < num_interpolate_slots; ++i) {
                glTextureStorage3D(input_tex[i], levels, GL_RGBA8, width, height, 2);
@@ -203,7 +215,8 @@ VideoStream::VideoStream()
        check_error();
 
        OperatingPoint op;
-       if (global_flags.interpolation_quality == 1) {
+       if (global_flags.interpolation_quality == 0 ||
+           global_flags.interpolation_quality == 1) {
                op = operating_point1;
        } else if (global_flags.interpolation_quality == 2) {
                op = operating_point2;
@@ -212,6 +225,7 @@ VideoStream::VideoStream()
        } else if (global_flags.interpolation_quality == 4) {
                op = operating_point4;
        } else {
+               // Quality 0 will be changed to 1 in flags.cpp.
                assert(false);
        }
 
@@ -222,42 +236,90 @@ VideoStream::VideoStream()
        check_error();
 
        // The “last frame” is initially black.
-       unique_ptr<uint8_t[]> y(new uint8_t[1280 * 720]);
-       unique_ptr<uint8_t[]> cb_or_cr(new uint8_t[640 * 720]);
-       memset(y.get(), 16, 1280 * 720);
-       memset(cb_or_cr.get(), 128, 640 * 720);
-       last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), 1280, 720);
+       unique_ptr<uint8_t[]> y(new uint8_t[global_flags.width * global_flags.height]);
+       unique_ptr<uint8_t[]> cb_or_cr(new uint8_t[(global_flags.width / 2) * global_flags.height]);
+       memset(y.get(), 16, global_flags.width * global_flags.height);
+       memset(cb_or_cr.get(), 128, (global_flags.width / 2) * global_flags.height);
+       last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), global_flags.width, global_flags.height, /*exif_data=*/"");
+
+       if (file_avctx != nullptr) {
+               with_subtitles = Mux::WITHOUT_SUBTITLES;
+       } else {
+               with_subtitles = Mux::WITH_SUBTITLES;
+       }
 }
 
-VideoStream::~VideoStream() {}
+VideoStream::~VideoStream()
+{
+       if (last_flow_tex != 0) {
+               compute_flow->release_texture(last_flow_tex);
+       }
+
+       for (const unique_ptr<InterpolatedFrameResources> &resource : interpolate_resources) {
+               glUnmapNamedBuffer(resource->pbo);
+               check_error();
+               glDeleteBuffers(1, &resource->pbo);
+               check_error();
+               glDeleteFramebuffers(2, resource->input_fbos);
+               check_error();
+               glDeleteFramebuffers(1, &resource->fade_fbo);
+               check_error();
+               glDeleteTextures(1, &resource->input_tex);
+               check_error();
+               glDeleteTextures(1, &resource->gray_tex);
+               check_error();
+               glDeleteTextures(1, &resource->fade_y_output_tex);
+               check_error();
+               glDeleteTextures(1, &resource->fade_cbcr_output_tex);
+               check_error();
+               glDeleteTextures(1, &resource->cb_tex);
+               check_error();
+               glDeleteTextures(1, &resource->cr_tex);
+               check_error();
+       }
+       assert(interpolate_resources.size() == num_interpolate_slots);
+}
 
 void VideoStream::start()
 {
-       AVFormatContext *avctx = avformat_alloc_context();
-       avctx->oformat = av_guess_format("nut", nullptr, nullptr);
+       if (avctx == nullptr) {
+               avctx = avformat_alloc_context();
 
-       uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
-       avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
-       avctx->pb->write_data_type = &VideoStream::write_packet2_thunk;
-       avctx->pb->ignore_boundary_point = 1;
+               // We use Matroska, because it's pretty much the only mux where FFmpeg
+               // allows writing chroma location to override JFIF's default center placement.
+               // (Note that at the time of writing, however, FFmpeg does not correctly
+               // _read_ this information!)
+               avctx->oformat = av_guess_format("matroska", nullptr, nullptr);
 
-       Mux::Codec video_codec = Mux::CODEC_MJPEG;
+               uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
+               avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
+               avctx->pb->write_data_type = &VideoStream::write_packet2_thunk;
+               avctx->pb->ignore_boundary_point = 1;
 
-       avctx->flags = AVFMT_FLAG_CUSTOM_IO;
+               avctx->flags = AVFMT_FLAG_CUSTOM_IO;
+       }
 
-       string video_extradata;
+       AVCodecParameters *audio_codecpar = avcodec_parameters_alloc();
 
-       constexpr int width = 1280, height = 720;  // Doesn't matter for MJPEG.
-       stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, /*audio_codec_parameters=*/nullptr,
-               AVCOL_SPC_BT709, Mux::WITHOUT_AUDIO,
-               COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
+       audio_codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
+       audio_codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
+       audio_codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
+       audio_codecpar->channels = 2;
+       audio_codecpar->sample_rate = OUTPUT_FREQUENCY;
 
+       size_t width = global_flags.width, height = global_flags.height;  // Doesn't matter for MJPEG.
+       mux.reset(new Mux(avctx, width, height, Mux::CODEC_MJPEG, /*video_extradata=*/"", audio_codecpar,
+                         AVCOL_SPC_BT709, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}, with_subtitles));
 
+       avcodec_parameters_free(&audio_codecpar);
        encode_thread = thread(&VideoStream::encode_thread_func, this);
 }
 
 void VideoStream::stop()
 {
+       should_quit = true;
+       queue_changed.notify_all();
+       clear_queue();
        encode_thread.join();
 }
 
@@ -266,7 +328,7 @@ void VideoStream::clear_queue()
        deque<QueuedFrame> q;
 
        {
-               unique_lock<mutex> lock(queue_lock);
+               lock_guard<mutex> lock(queue_lock);
                q = move(frame_queue);
        }
 
@@ -275,7 +337,9 @@ void VideoStream::clear_queue()
        for (const QueuedFrame &qf : q) {
                if (qf.type == QueuedFrame::INTERPOLATED ||
                    qf.type == QueuedFrame::FADED_INTERPOLATED) {
-                       compute_flow->release_texture(qf.flow_tex);
+                       if (qf.flow_tex != 0) {
+                               compute_flow->release_texture(qf.flow_tex);
+                       }
                }
                if (qf.type == QueuedFrame::INTERPOLATED) {
                        interpolate->release_texture(qf.output_tex);
@@ -289,23 +353,22 @@ void VideoStream::clear_queue()
 void VideoStream::schedule_original_frame(steady_clock::time_point local_pts,
                                           int64_t output_pts, function<void()> &&display_func,
                                           QueueSpotHolder &&queue_spot_holder,
-                                          FrameOnDisk frame)
+                                          FrameOnDisk frame, const string &subtitle, bool include_audio)
 {
-       fprintf(stderr, "output_pts=%ld  original      input_pts=%ld\n", output_pts, frame.pts);
-
-       // Preload the file from disk, so that the encoder thread does not get stalled.
-       // TODO: Consider sending it through the queue instead.
-       (void)frame_reader.read_frame(frame);
+       fprintf(stderr, "output_pts=%" PRId64 "  original      input_pts=%" PRId64 "\n", output_pts, frame.pts);
 
        QueuedFrame qf;
        qf.local_pts = local_pts;
        qf.type = QueuedFrame::ORIGINAL;
        qf.output_pts = output_pts;
-       qf.frame1 = frame;
        qf.display_func = move(display_func);
        qf.queue_spot_holder = move(queue_spot_holder);
+       qf.subtitle = subtitle;
+       FrameReader::Frame read_frame = frame_reader.read_frame(frame, /*read_video=*/true, include_audio);
+       qf.encoded_jpeg.reset(new string(move(read_frame.video)));
+       qf.audio = move(read_frame.audio);
 
-       unique_lock<mutex> lock(queue_lock);
+       lock_guard<mutex> lock(queue_lock);
        frame_queue.push_back(move(qf));
        queue_changed.notify_all();
 }
@@ -314,9 +377,9 @@ void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64
                                        function<void()> &&display_func,
                                        QueueSpotHolder &&queue_spot_holder,
                                        FrameOnDisk frame1_spec, FrameOnDisk frame2_spec,
-                                       float fade_alpha)
+                                       float fade_alpha, const string &subtitle)
 {
-       fprintf(stderr, "output_pts=%ld  faded         input_pts=%ld,%ld  fade_alpha=%.2f\n", output_pts, frame1_spec.pts, frame2_spec.pts, fade_alpha);
+       fprintf(stderr, "output_pts=%" PRId64 "  faded         input_pts=%" PRId64 ",%" PRId64 "  fade_alpha=%.2f\n", output_pts, frame1_spec.pts, frame2_spec.pts, fade_alpha);
 
        // Get the temporary OpenGL resources we need for doing the fade.
        // (We share these with interpolated frames, which is slightly
@@ -324,7 +387,7 @@ void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64
        // separate pools around.)
        BorrowedInterpolatedFrameResources resources;
        {
-               unique_lock<mutex> lock(queue_lock);
+               lock_guard<mutex> lock(queue_lock);
                if (interpolate_resources.empty()) {
                        fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
                        return;
@@ -338,7 +401,7 @@ void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64
        shared_ptr<Frame> frame1 = decode_jpeg_with_cache(frame1_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
        shared_ptr<Frame> frame2 = decode_jpeg_with_cache(frame2_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
 
-       ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, 1280, 720);
+       ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, global_flags.width, global_flags.height);
 
        QueuedFrame qf;
        qf.local_pts = local_pts;
@@ -347,21 +410,22 @@ void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64
        qf.frame1 = frame1_spec;
        qf.display_func = move(display_func);
        qf.queue_spot_holder = move(queue_spot_holder);
+       qf.subtitle = subtitle;
 
        qf.secondary_frame = frame2_spec;
 
        // Subsample and split Cb/Cr.
-       chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, 1280, 720, resources->cb_tex, resources->cr_tex);
+       chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
 
        // Read it down (asynchronously) to the CPU.
        glPixelStorei(GL_PACK_ROW_LENGTH, 0);
        glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
        check_error();
-       glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
+       glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0));
        check_error();
-       glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
+       glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3, BUFFER_OFFSET(global_flags.width * global_flags.height));
        check_error();
-       glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
+       glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3 - (global_flags.width / 2) * global_flags.height, BUFFER_OFFSET(global_flags.width * global_flags.height + (global_flags.width / 2) * global_flags.height));
        check_error();
        glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
 
@@ -373,7 +437,7 @@ void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64
        qf.resources = move(resources);
        qf.local_pts = local_pts;
 
-       unique_lock<mutex> lock(queue_lock);
+       lock_guard<mutex> lock(queue_lock);
        frame_queue.push_back(move(qf));
        queue_changed.notify_all();
 }
@@ -382,18 +446,19 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts
                                               int64_t output_pts, function<void(shared_ptr<Frame>)> &&display_func,
                                               QueueSpotHolder &&queue_spot_holder,
                                               FrameOnDisk frame1, FrameOnDisk frame2,
-                                              float alpha, FrameOnDisk secondary_frame, float fade_alpha)
+                                              float alpha, FrameOnDisk secondary_frame, float fade_alpha, const string &subtitle,
+                                              bool play_audio)
 {
        if (secondary_frame.pts != -1) {
-               fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f  secondary_pts=%ld  fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha);
+               fprintf(stderr, "output_pts=%" PRId64 "  interpolated  input_pts1=%" PRId64 " input_pts2=%" PRId64 " alpha=%.3f  secondary_pts=%" PRId64 "  fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha);
        } else {
-               fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, frame1.pts, frame2.pts, alpha);
+               fprintf(stderr, "output_pts=%" PRId64 "  interpolated  input_pts1=%" PRId64 " input_pts2=%" PRId64 " alpha=%.3f\n", output_pts, frame1.pts, frame2.pts, alpha);
        }
 
        // Get the temporary OpenGL resources we need for doing the interpolation.
        BorrowedInterpolatedFrameResources resources;
        {
-               unique_lock<mutex> lock(queue_lock);
+               lock_guard<mutex> lock(queue_lock);
                if (interpolate_resources.empty()) {
                        fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
                        return;
@@ -408,6 +473,11 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts
        qf.display_decoded_func = move(display_func);
        qf.queue_spot_holder = move(queue_spot_holder);
        qf.local_pts = local_pts;
+       qf.subtitle = subtitle;
+
+       if (play_audio) {
+               qf.audio = frame_reader.read_frame(frame1, /*read_video=*/false, /*read_audio=*/true).audio;
+       }
 
        check_error();
 
@@ -416,7 +486,10 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts
                FrameOnDisk frame_spec = frame_no == 1 ? frame2 : frame1;
                bool did_decode;
                shared_ptr<Frame> frame = decode_jpeg_with_cache(frame_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
-               ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources->input_fbos[frame_no], 1280, 720);
+               ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources->input_fbos[frame_no], global_flags.width, global_flags.height);
+               if (frame_no == 1) {
+                       qf.exif_data = frame->exif_data;  // Use the white point from the last frame.
+               }
        }
 
        glGenerateTextureMipmap(resources->input_tex);
@@ -424,13 +497,30 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts
        glGenerateTextureMipmap(resources->gray_tex);
        check_error();
 
-       // Compute the interpolated frame.
-       qf.flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
-       check_error();
+       GLuint flow_tex;
+       if (last_flow_tex != 0 && frame1 == last_frame1 && frame2 == last_frame2) {
+               // Reuse the flow from previous computation. This frequently happens
+               // if we slow down by more than 2x, so that there are multiple interpolated
+               // frames between each original.
+               flow_tex = last_flow_tex;
+               qf.flow_tex = 0;
+       } else {
+               // Cache miss, so release last_flow_tex.
+               qf.flow_tex = last_flow_tex;
+
+               // Compute the flow.
+               flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
+               check_error();
+
+               // Store the flow texture for possible reuse next frame.
+               last_flow_tex = flow_tex;
+               last_frame1 = frame1;
+               last_frame2 = frame2;
+       }
 
        if (secondary_frame.pts != -1) {
                // Fade. First kick off the interpolation.
-               tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, 1280, 720, alpha);
+               tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha);
                check_error();
 
                // Now decode the image we are fading against.
@@ -438,37 +528,44 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts
                shared_ptr<Frame> frame2 = decode_jpeg_with_cache(secondary_frame, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
 
                // Then fade against it, putting it into the fade Y' and CbCr textures.
-               ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, 1280, 720);
+               RGBTriplet neutral_color = get_neutral_color(qf.exif_data);
+               ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, neutral_color, global_flags.width, global_flags.height, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, global_flags.width, global_flags.height);
 
                // Subsample and split Cb/Cr.
-               chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, 1280, 720, resources->cb_tex, resources->cr_tex);
+               chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
 
                interpolate_no_split->release_texture(qf.output_tex);
+
+               // We already applied the white balance, so don't have the client redo it.
+               qf.exif_data.clear();
        } else {
-               tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, 1280, 720, alpha);
+               tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha);
                check_error();
 
                // Subsample and split Cb/Cr.
-               chroma_subsampler->subsample_chroma(qf.cbcr_tex, 1280, 720, resources->cb_tex, resources->cr_tex);
+               chroma_subsampler->subsample_chroma(qf.cbcr_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
        }
 
        // We could have released qf.flow_tex here, but to make sure we don't cause a stall
        // when trying to reuse it for the next frame, we can just as well hold on to it
        // and release it only when the readback is done.
+       //
+       // TODO: This is maybe less relevant now that qf.flow_tex contains the texture we used
+       // _last_ frame, not this one.
 
        // Read it down (asynchronously) to the CPU.
        glPixelStorei(GL_PACK_ROW_LENGTH, 0);
        glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
        check_error();
        if (secondary_frame.pts != -1) {
-               glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
+               glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0));
        } else {
-               glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
+               glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0));
        }
        check_error();
-       glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
+       glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3, BUFFER_OFFSET(global_flags.width * global_flags.height));
        check_error();
-       glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
+       glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3 - (global_flags.width / 2) * global_flags.height, BUFFER_OFFSET(global_flags.width * global_flags.height + (global_flags.width / 2) * global_flags.height));
        check_error();
        glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
 
@@ -479,22 +576,37 @@ void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts
        check_error();
        qf.resources = move(resources);
 
-       unique_lock<mutex> lock(queue_lock);
+       lock_guard<mutex> lock(queue_lock);
        frame_queue.push_back(move(qf));
        queue_changed.notify_all();
 }
 
 void VideoStream::schedule_refresh_frame(steady_clock::time_point local_pts,
                                          int64_t output_pts, function<void()> &&display_func,
-                                         QueueSpotHolder &&queue_spot_holder)
+                                         QueueSpotHolder &&queue_spot_holder, const string &subtitle)
 {
        QueuedFrame qf;
        qf.type = QueuedFrame::REFRESH;
        qf.output_pts = output_pts;
        qf.display_func = move(display_func);
        qf.queue_spot_holder = move(queue_spot_holder);
+       qf.subtitle = subtitle;
+
+       lock_guard<mutex> lock(queue_lock);
+       frame_queue.push_back(move(qf));
+       queue_changed.notify_all();
+}
+
+void VideoStream::schedule_silence(steady_clock::time_point local_pts, int64_t output_pts,
+                                   int64_t length_pts, QueueSpotHolder &&queue_spot_holder)
+{
+       QueuedFrame qf;
+       qf.type = QueuedFrame::SILENCE;
+       qf.output_pts = output_pts;
+       qf.queue_spot_holder = move(queue_spot_holder);
+       qf.silence_length_pts = length_pts;
 
-       unique_lock<mutex> lock(queue_lock);
+       lock_guard<mutex> lock(queue_lock);
        frame_queue.push_back(move(qf));
        queue_changed.notify_all();
 }
@@ -538,25 +650,33 @@ void VideoStream::encode_thread_func()
        bool ok = make_current(context, surface);
        if (!ok) {
                fprintf(stderr, "Video stream couldn't get an OpenGL context\n");
-               exit(1);
+               abort();
        }
 
-       for ( ;; ) {
+       while (!should_quit) {
                QueuedFrame qf;
                {
                        unique_lock<mutex> lock(queue_lock);
 
                        // Wait until we have a frame to play.
-                       queue_changed.wait(lock, [this]{
-                               return !frame_queue.empty();
+                       queue_changed.wait(lock, [this] {
+                               return !frame_queue.empty() || should_quit;
                        });
+                       if (should_quit) {
+                               break;
+                       }
                        steady_clock::time_point frame_start = frame_queue.front().local_pts;
 
                        // Now sleep until the frame is supposed to start (the usual case),
                        // _or_ clear_queue() happened.
-                       bool aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start]{
-                               return frame_queue.empty() || frame_queue.front().local_pts != frame_start;
-                       });
+                       bool aborted;
+                       if (output_fast_forward) {
+                               aborted = frame_queue.empty() || frame_queue.front().local_pts != frame_start;
+                       } else {
+                               aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start] {
+                                       return frame_queue.empty() || frame_queue.front().local_pts != frame_start;
+                               });
+                       }
                        if (aborted) {
                                // clear_queue() happened, so don't play this frame after all.
                                continue;
@@ -565,44 +685,66 @@ void VideoStream::encode_thread_func()
                        frame_queue.pop_front();
                }
 
+               // Hack: We mux the subtitle packet one time unit before the actual frame,
+               // so that Nageru is sure to get it first.
+               if (!qf.subtitle.empty() && with_subtitles == Mux::WITH_SUBTITLES) {
+                       AVPacket pkt;
+                       av_init_packet(&pkt);
+                       pkt.stream_index = mux->get_subtitle_stream_idx();
+                       assert(pkt.stream_index != -1);
+                       pkt.data = (uint8_t *)qf.subtitle.data();
+                       pkt.size = qf.subtitle.size();
+                       pkt.flags = 0;
+                       pkt.duration = lrint(TIMEBASE / global_flags.output_framerate);  // Doesn't really matter for Nageru.
+                       mux->add_packet(pkt, qf.output_pts - 1, qf.output_pts - 1);
+               }
+
                if (qf.type == QueuedFrame::ORIGINAL) {
                        // Send the JPEG frame on, unchanged.
-                       string jpeg = frame_reader.read_frame(qf.frame1);
+                       string jpeg = move(*qf.encoded_jpeg);
                        AVPacket pkt;
                        av_init_packet(&pkt);
                        pkt.stream_index = 0;
                        pkt.data = (uint8_t *)jpeg.data();
                        pkt.size = jpeg.size();
-                       stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+                       pkt.flags = AV_PKT_FLAG_KEY;
+                       mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+                       last_frame = move(jpeg);
 
-                       last_frame.assign(&jpeg[0], &jpeg[0] + jpeg.size());
+                       add_audio_or_silence(qf);
                } else if (qf.type == QueuedFrame::FADED) {
                        glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
 
-                       shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, 1280, 720);
+                       shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height);
+                       assert(frame->exif_data.empty());
 
                        // Now JPEG encode it, and send it on to the stream.
-                       vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
+                       string jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height, /*exif_data=*/"");
 
                        AVPacket pkt;
                        av_init_packet(&pkt);
                        pkt.stream_index = 0;
                        pkt.data = (uint8_t *)jpeg.data();
                        pkt.size = jpeg.size();
-                       stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+                       pkt.flags = AV_PKT_FLAG_KEY;
+                       mux->add_packet(pkt, qf.output_pts, qf.output_pts);
                        last_frame = move(jpeg);
+
+                       add_audio_or_silence(qf);
                } else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) {
                        glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
 
                        // Send it on to display.
-                       shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, 1280, 720);
+                       shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height);
                        if (qf.display_decoded_func != nullptr) {
                                qf.display_decoded_func(frame);
                        }
 
                        // Now JPEG encode it, and send it on to the stream.
-                       vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
-                       compute_flow->release_texture(qf.flow_tex);
+                       string jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height, move(qf.exif_data));
+                       if (qf.flow_tex != 0) {
+                               compute_flow->release_texture(qf.flow_tex);
+                       }
                        if (qf.type != QueuedFrame::FADED_INTERPOLATED) {
                                interpolate->release_texture(qf.output_tex);
                                interpolate->release_texture(qf.cbcr_tex);
@@ -613,15 +755,23 @@ void VideoStream::encode_thread_func()
                        pkt.stream_index = 0;
                        pkt.data = (uint8_t *)jpeg.data();
                        pkt.size = jpeg.size();
-                       stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+                       pkt.flags = AV_PKT_FLAG_KEY;
+                       mux->add_packet(pkt, qf.output_pts, qf.output_pts);
                        last_frame = move(jpeg);
+
+                       add_audio_or_silence(qf);
                } else if (qf.type == QueuedFrame::REFRESH) {
                        AVPacket pkt;
                        av_init_packet(&pkt);
                        pkt.stream_index = 0;
                        pkt.data = (uint8_t *)last_frame.data();
                        pkt.size = last_frame.size();
-                       stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+                       pkt.flags = AV_PKT_FLAG_KEY;
+                       mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+
+                       add_audio_or_silence(qf);  // Definitely silence.
+               } else if (qf.type == QueuedFrame::SILENCE) {
+                       add_silence(qf.output_pts, qf.silence_length_pts);
                } else {
                        assert(false);
                }
@@ -649,9 +799,44 @@ int VideoStream::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType ty
 
        if (type == AVIO_DATA_MARKER_HEADER) {
                stream_mux_header.append((char *)buf, buf_size);
-               global_httpd->set_header(stream_mux_header);
+               global_httpd->set_header(HTTPD::MAIN_STREAM, stream_mux_header);
        } else {
-               global_httpd->add_data((char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
+               global_httpd->add_data(HTTPD::MAIN_STREAM, (char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
        }
        return buf_size;
 }
+
+void VideoStream::add_silence(int64_t pts, int64_t length_pts)
+{
+       // At 59.94, this will never quite add up (even discounting refresh frames,
+       // which have unpredictable length), but hopefully, the player in the other
+       // end should be able to stretch silence easily enough.
+       long num_samples = lrint(length_pts * double(OUTPUT_FREQUENCY) / double(TIMEBASE)) * 2;
+       uint8_t *zero = (uint8_t *)calloc(num_samples, sizeof(int32_t));
+
+       AVPacket pkt;
+       av_init_packet(&pkt);
+       pkt.stream_index = 1;
+       pkt.data = zero;
+       pkt.size = num_samples * sizeof(int32_t);
+       pkt.flags = AV_PKT_FLAG_KEY;
+       mux->add_packet(pkt, pts, pts);
+
+       free(zero);
+}
+
+void VideoStream::add_audio_or_silence(const QueuedFrame &qf)
+{
+       if (qf.audio.empty()) {
+               int64_t frame_length = lrint(double(TIMEBASE) / global_flags.output_framerate);
+               add_silence(qf.output_pts, frame_length);
+       } else {
+               AVPacket pkt;
+               av_init_packet(&pkt);
+               pkt.stream_index = 1;
+               pkt.data = (uint8_t *)qf.audio.data();
+               pkt.size = qf.audio.size();
+               pkt.flags = AV_PKT_FLAG_KEY;
+               mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+       }
+}