]> git.sesse.net Git - nageru/blobdiff - mixer.cpp
Release Nageru 1.7.2.
[nageru] / mixer.cpp
index 093e63e7214d28296a17fb92d6eebf97effd9154..7a3f437d6ec745048c57be3283005cbf94597937 100644 (file)
--- a/mixer.cpp
+++ b/mixer.cpp
@@ -14,7 +14,6 @@
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
-#include <sys/resource.h>
 #include <algorithm>
 #include <chrono>
 #include <condition_variable>
 #include "DeckLinkAPI.h"
 #include "LinuxCOM.h"
 #include "alsa_output.h"
+#include "basic_stats.h"
 #include "bmusb/bmusb.h"
 #include "bmusb/fake_capture.h"
+#ifdef HAVE_CEF
+#include "cef_capture.h"
+#endif
 #include "chroma_subsampler.h"
 #include "context.h"
 #include "decklink_capture.h"
 #include "decklink_output.h"
 #include "defs.h"
 #include "disk_space_estimator.h"
+#include "ffmpeg_capture.h"
 #include "flags.h"
 #include "input_mapping.h"
+#include "metrics.h"
 #include "pbo_frame_allocator.h"
 #include "ref_counted_gl_sync.h"
 #include "resampling_queue.h"
 #include "timebase.h"
+#include "timecode_renderer.h"
+#include "v210_converter.h"
 #include "video_encoder.h"
 
+#undef Status
+#include <google/protobuf/util/json_util.h>
+#include "json.pb.h"
+
 class IDeckLink;
 class QOpenGLContext;
 
@@ -57,7 +68,6 @@ using namespace std::placeholders;
 using namespace bmusb;
 
 Mixer *global_mixer = nullptr;
-bool uses_mlock = false;
 
 namespace {
 
@@ -76,33 +86,217 @@ void insert_new_frame(RefCountedFrame frame, unsigned field_num, bool interlaced
        }
 }
 
-}  // namespace
-
-void QueueLengthPolicy::update_policy(unsigned queue_length)
+void ensure_texture_resolution(PBOFrameAllocator::Userdata *userdata, unsigned field, unsigned width, unsigned height, unsigned cbcr_width, unsigned cbcr_height, unsigned v210_width)
 {
-       if (queue_length == 0) {  // Starvation.
-               if (been_at_safe_point_since_last_starvation && safe_queue_length < 6) {
-                       ++safe_queue_length;
-                       fprintf(stderr, "Card %u: Starvation, increasing safe limit to %u frame(s)\n",
-                               card_index, safe_queue_length);
+       bool first;
+       switch (userdata->pixel_format) {
+       case PixelFormat_10BitYCbCr:
+               first = userdata->tex_v210[field] == 0 || userdata->tex_444[field] == 0;
+               break;
+       case PixelFormat_8BitYCbCr:
+               first = userdata->tex_y[field] == 0 || userdata->tex_cbcr[field] == 0;
+               break;
+       case PixelFormat_8BitBGRA:
+               first = userdata->tex_rgba[field] == 0;
+               break;
+       case PixelFormat_8BitYCbCrPlanar:
+               first = userdata->tex_y[field] == 0 || userdata->tex_cb[field] == 0 || userdata->tex_cr[field] == 0;
+               break;
+       default:
+               assert(false);
+       }
+
+       if (first ||
+           width != userdata->last_width[field] ||
+           height != userdata->last_height[field] ||
+           cbcr_width != userdata->last_cbcr_width[field] ||
+           cbcr_height != userdata->last_cbcr_height[field]) {
+               // We changed resolution since last use of this texture, so we need to create
+               // a new object. Note that this each card has its own PBOFrameAllocator,
+               // we don't need to worry about these flip-flopping between resolutions.
+               switch (userdata->pixel_format) {
+               case PixelFormat_10BitYCbCr:
+                       glBindTexture(GL_TEXTURE_2D, userdata->tex_444[field]);
+                       check_error();
+                       glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB10_A2, width, height, 0, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, nullptr);
+                       check_error();
+                       break;
+               case PixelFormat_8BitYCbCr: {
+                       glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]);
+                       check_error();
+                       glTexImage2D(GL_TEXTURE_2D, 0, GL_RG8, cbcr_width, height, 0, GL_RG, GL_UNSIGNED_BYTE, nullptr);
+                       check_error();
+                       glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]);
+                       check_error();
+                       glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
+                       check_error();
+                       break;
                }
-               frames_with_at_least_one = 0;
-               been_at_safe_point_since_last_starvation = false;
-               return;
+               case PixelFormat_8BitYCbCrPlanar: {
+                       glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]);
+                       check_error();
+                       glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
+                       check_error();
+                       glBindTexture(GL_TEXTURE_2D, userdata->tex_cb[field]);
+                       check_error();
+                       glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, cbcr_width, cbcr_height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
+                       check_error();
+                       glBindTexture(GL_TEXTURE_2D, userdata->tex_cr[field]);
+                       check_error();
+                       glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, cbcr_width, cbcr_height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
+                       check_error();
+                       break;
+               }
+               case PixelFormat_8BitBGRA:
+                       glBindTexture(GL_TEXTURE_2D, userdata->tex_rgba[field]);
+                       check_error();
+                       if (global_flags.can_disable_srgb_decoder) {  // See the comments in tweaked_inputs.h.
+                               glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8_ALPHA8, width, height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, nullptr);
+                       } else {
+                               glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, nullptr);
+                       }
+                       check_error();
+                       break;
+               default:
+                       assert(false);
+               }
+               userdata->last_width[field] = width;
+               userdata->last_height[field] = height;
+               userdata->last_cbcr_width[field] = cbcr_width;
+               userdata->last_cbcr_height[field] = cbcr_height;
        }
-       if (queue_length >= 1) {
-               if (queue_length >= safe_queue_length) {
-                       been_at_safe_point_since_last_starvation = true;
+       if (global_flags.ten_bit_input &&
+           (first || v210_width != userdata->last_v210_width[field])) {
+               // Same as above; we need to recreate the texture.
+               glBindTexture(GL_TEXTURE_2D, userdata->tex_v210[field]);
+               check_error();
+               glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB10_A2, v210_width, height, 0, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, nullptr);
+               check_error();
+               userdata->last_v210_width[field] = v210_width;
+       }
+}
+
+void upload_texture(GLuint tex, GLuint width, GLuint height, GLuint stride, bool interlaced_stride, GLenum format, GLenum type, GLintptr offset)
+{
+       if (interlaced_stride) {
+               stride *= 2;
+       }
+       if (global_flags.flush_pbos) {
+               glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, offset, stride * height);
+               check_error();
+       }
+
+       glBindTexture(GL_TEXTURE_2D, tex);
+       check_error();
+       if (interlaced_stride) {
+               glPixelStorei(GL_UNPACK_ROW_LENGTH, width * 2);
+               check_error();
+       } else {
+               glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+               check_error();
+       }
+
+       glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, format, type, BUFFER_OFFSET(offset));
+       check_error();
+       glBindTexture(GL_TEXTURE_2D, 0);
+       check_error();
+       glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+       check_error();
+}
+
+}  // namespace
+
+void JitterHistory::register_metrics(const vector<pair<string, string>> &labels)
+{
+       global_metrics.add("input_underestimated_jitter_frames", labels, &metric_input_underestimated_jitter_frames);
+       global_metrics.add("input_estimated_max_jitter_seconds", labels, &metric_input_estimated_max_jitter_seconds, Metrics::TYPE_GAUGE);
+}
+
+void JitterHistory::unregister_metrics(const vector<pair<string, string>> &labels)
+{
+       global_metrics.remove("input_underestimated_jitter_frames", labels);
+       global_metrics.remove("input_estimated_max_jitter_seconds", labels);
+}
+
+void JitterHistory::frame_arrived(steady_clock::time_point now, int64_t frame_duration, size_t dropped_frames)
+{
+       if (expected_timestamp > steady_clock::time_point::min()) {
+               expected_timestamp += dropped_frames * nanoseconds(frame_duration * 1000000000 / TIMEBASE);
+               double jitter_seconds = fabs(duration<double>(expected_timestamp - now).count());
+               history.push_back(orders.insert(jitter_seconds));
+               if (jitter_seconds > estimate_max_jitter()) {
+                       ++metric_input_underestimated_jitter_frames;
                }
-               if (++frames_with_at_least_one >= 1000 && safe_queue_length > 1) {
-                       --safe_queue_length;
-                       fprintf(stderr, "Card %u: Spare frames for more than 1000 frames, reducing safe limit to %u frame(s)\n",
-                               card_index, safe_queue_length);
-                       frames_with_at_least_one = 0;
+
+               metric_input_estimated_max_jitter_seconds = estimate_max_jitter();
+
+               if (history.size() > history_length) {
+                       orders.erase(history.front());
+                       history.pop_front();
                }
+               assert(history.size() <= history_length);
+       }
+       expected_timestamp = now + nanoseconds(frame_duration * 1000000000 / TIMEBASE);
+}
+
+double JitterHistory::estimate_max_jitter() const
+{
+       if (orders.empty()) {
+               return 0.0;
+       }
+       size_t elem_idx = lrint((orders.size() - 1) * percentile);
+       if (percentile <= 0.5) {
+               return *next(orders.begin(), elem_idx) * multiplier;
+       } else {
+               return *prev(orders.end(), orders.size() - elem_idx) * multiplier;
+       }
+}
+
+void QueueLengthPolicy::register_metrics(const vector<pair<string, string>> &labels)
+{
+       global_metrics.add("input_queue_safe_length_frames", labels, &metric_input_queue_safe_length_frames, Metrics::TYPE_GAUGE);
+}
+
+void QueueLengthPolicy::unregister_metrics(const vector<pair<string, string>> &labels)
+{
+       global_metrics.remove("input_queue_safe_length_frames", labels);
+}
+
+void QueueLengthPolicy::update_policy(steady_clock::time_point now,
+                                      steady_clock::time_point expected_next_frame,
+                                      int64_t input_frame_duration,
+                                      int64_t master_frame_duration,
+                                      double max_input_card_jitter_seconds,
+                                      double max_master_card_jitter_seconds)
+{
+       double input_frame_duration_seconds = input_frame_duration / double(TIMEBASE);
+       double master_frame_duration_seconds = master_frame_duration / double(TIMEBASE);
+
+       // Figure out when we can expect the next frame for this card, assuming
+       // worst-case jitter (ie., the frame is maximally late).
+       double seconds_until_next_frame = max(duration<double>(expected_next_frame - now).count() + max_input_card_jitter_seconds, 0.0);
+
+       // How many times are the master card expected to tick in that time?
+       // We assume the master clock has worst-case jitter but not any rate
+       // discrepancy, ie., it ticks as early as possible every time, but not
+       // cumulatively.
+       double frames_needed = (seconds_until_next_frame + max_master_card_jitter_seconds) / master_frame_duration_seconds;
+
+       // As a special case, if the master card ticks faster than the input card,
+       // we expect the queue to drain by itself even without dropping. But if
+       // the difference is small (e.g. 60 Hz master and 59.94 input), it would
+       // go slowly enough that the effect wouldn't really be appreciable.
+       // We account for this by looking at the situation five frames ahead,
+       // assuming everything else is the same.
+       double frames_allowed;
+       if (master_frame_duration < input_frame_duration) {
+               frames_allowed = frames_needed + 5 * (input_frame_duration_seconds - master_frame_duration_seconds) / master_frame_duration_seconds;
        } else {
-               frames_with_at_least_one = 0;
+               frames_allowed = frames_needed;
        }
+
+       safe_queue_length = max<int>(floor(frames_allowed), 0);
+       metric_input_queue_safe_length_frames = safe_queue_length;
 }
 
 Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
@@ -110,18 +304,22 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
          num_cards(num_cards),
          mixer_surface(create_surface(format)),
          h264_encoder_surface(create_surface(format)),
-         decklink_output_surface(create_surface(format)),
-         audio_mixer(num_cards)
+         decklink_output_surface(create_surface(format))
 {
+       memcpy(ycbcr_interpretation, global_flags.ycbcr_interpretation, sizeof(ycbcr_interpretation));
        CHECK(init_movit(MOVIT_SHADER_DIR, MOVIT_DEBUG_OFF));
        check_error();
 
+       // This nearly always should be true.
+       global_flags.can_disable_srgb_decoder =
+               epoxy_has_gl_extension("GL_EXT_texture_sRGB_decode") &&
+               epoxy_has_gl_extension("GL_ARB_sampler_objects");
+
        // Since we allow non-bouncing 4:2:2 YCbCrInputs, effective subpixel precision
        // will be halved when sampling them, and we need to compensate here.
        movit_texel_subpixel_precision /= 2.0;
 
        resource_pool.reset(new ResourcePool);
-       theme.reset(new Theme(global_flags.theme_filename, global_flags.theme_dirs, resource_pool.get(), num_cards));
        for (unsigned i = 0; i < NUM_OUTPUTS; ++i) {
                output_channel[i].parent = this;
                output_channel[i].channel = i;
@@ -131,10 +329,27 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
        inout_format.color_space = COLORSPACE_sRGB;
        inout_format.gamma_curve = GAMMA_sRGB;
 
-       // Display chain; shows the live output produced by the main chain (its RGBA version).
+       // Matches the 4:2:0 format created by the main chain.
+       YCbCrFormat ycbcr_format;
+       ycbcr_format.chroma_subsampling_x = 2;
+       ycbcr_format.chroma_subsampling_y = 2;
+       if (global_flags.ycbcr_rec709_coefficients) {
+               ycbcr_format.luma_coefficients = YCBCR_REC_709;
+       } else {
+               ycbcr_format.luma_coefficients = YCBCR_REC_601;
+       }
+       ycbcr_format.full_range = false;
+       ycbcr_format.num_levels = 1 << global_flags.x264_bit_depth;
+       ycbcr_format.cb_x_position = 0.0f;
+       ycbcr_format.cr_x_position = 0.0f;
+       ycbcr_format.cb_y_position = 0.5f;
+       ycbcr_format.cr_y_position = 0.5f;
+
+       // Display chain; shows the live output produced by the main chain (or rather, a copy of it).
        display_chain.reset(new EffectChain(global_flags.width, global_flags.height, resource_pool.get()));
        check_error();
-       display_input = new FlatInput(inout_format, FORMAT_RGB, GL_UNSIGNED_BYTE, global_flags.width, global_flags.height);  // FIXME: GL_UNSIGNED_BYTE is really wrong.
+       GLenum type = global_flags.x264_bit_depth > 8 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_BYTE;
+       display_input = new YCbCrInput(inout_format, ycbcr_format, global_flags.width, global_flags.height, YCBCR_INPUT_SPLIT_Y_AND_CBCR, type);
        display_chain->add_input(display_input);
        display_chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
        display_chain->set_dither_bits(0);  // Don't bother.
@@ -142,8 +357,22 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
 
        video_encoder.reset(new VideoEncoder(resource_pool.get(), h264_encoder_surface, global_flags.va_display, global_flags.width, global_flags.height, &httpd, global_disk_space_estimator));
 
+       // Must be instantiated after VideoEncoder has initialized global_flags.use_zerocopy.
+       theme.reset(new Theme(global_flags.theme_filename, global_flags.theme_dirs, resource_pool.get(), num_cards));
+
+       // Must be instantiated after the theme, as the theme decides the number of FFmpeg inputs.
+       std::vector<FFmpegCapture *> video_inputs = theme->get_video_inputs();
+       audio_mixer.reset(new AudioMixer(num_cards, video_inputs.size()));
+
+       httpd.add_endpoint("/channels", bind(&Mixer::get_channels_json, this), HTTPD::ALLOW_ALL_ORIGINS);
+       for (int channel_idx = 2; channel_idx < theme->get_num_channels(); ++channel_idx) {
+               char url[256];
+               snprintf(url, sizeof(url), "/channels/%d/color", channel_idx);
+               httpd.add_endpoint(url, bind(&Mixer::get_channel_color_http, this, unsigned(channel_idx)), HTTPD::ALLOW_ALL_ORIGINS);
+       }
+
        // Start listening for clients only once VideoEncoder has written its header, if any.
-       httpd.start(9095);
+       httpd.start(global_flags.http_port);
 
        // First try initializing the then PCI devices, then USB, then
        // fill up with fake cards until we have the desired number of cards.
@@ -161,8 +390,11 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
 
                                DeckLinkCapture *capture = new DeckLinkCapture(decklink, card_index);
                                DeckLinkOutput *output = new DeckLinkOutput(resource_pool.get(), decklink_output_surface, global_flags.width, global_flags.height, card_index);
-                               output->set_device(decklink);
-                               configure_card(card_index, capture, /*is_fake_capture=*/false, output);
+                               if (!output->set_device(decklink)) {
+                                       delete output;
+                                       output = nullptr;
+                               }
+                               configure_card(card_index, capture, CardType::LIVE_CARD, output);
                                ++num_pci_devices;
                        }
                        decklink_iterator->Release();
@@ -176,29 +408,84 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
        for (unsigned usb_card_index = 0; usb_card_index < num_usb_devices && card_index < num_cards; ++usb_card_index, ++card_index) {
                BMUSBCapture *capture = new BMUSBCapture(usb_card_index);
                capture->set_card_disconnected_callback(bind(&Mixer::bm_hotplug_remove, this, card_index));
-               configure_card(card_index, capture, /*is_fake_capture=*/false, /*output=*/nullptr);
+               configure_card(card_index, capture, CardType::LIVE_CARD, /*output=*/nullptr);
        }
        fprintf(stderr, "Found %u USB card(s).\n", num_usb_devices);
 
        unsigned num_fake_cards = 0;
        for ( ; card_index < num_cards; ++card_index, ++num_fake_cards) {
                FakeCapture *capture = new FakeCapture(global_flags.width, global_flags.height, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
-               configure_card(card_index, capture, /*is_fake_capture=*/true, /*output=*/nullptr);
+               configure_card(card_index, capture, CardType::FAKE_CAPTURE, /*output=*/nullptr);
        }
 
        if (num_fake_cards > 0) {
                fprintf(stderr, "Initialized %u fake cards.\n", num_fake_cards);
        }
 
+       // Initialize all video inputs the theme asked for. Note that these are
+       // all put _after_ the regular cards, which stop at <num_cards> - 1.
+       for (unsigned video_card_index = 0; video_card_index < video_inputs.size(); ++card_index, ++video_card_index) {
+               if (card_index >= MAX_VIDEO_CARDS) {
+                       fprintf(stderr, "ERROR: Not enough card slots available for the videos the theme requested.\n");
+                       exit(1);
+               }
+               configure_card(card_index, video_inputs[video_card_index], CardType::FFMPEG_INPUT, /*output=*/nullptr);
+               video_inputs[video_card_index]->set_card_index(card_index);
+       }
+       num_video_inputs = video_inputs.size();
+
+#ifdef HAVE_CEF
+       // Same, for HTML inputs.
+       std::vector<CEFCapture *> html_inputs = theme->get_html_inputs();
+       for (unsigned html_card_index = 0; html_card_index < html_inputs.size(); ++card_index, ++html_card_index) {
+               if (card_index >= MAX_VIDEO_CARDS) {
+                       fprintf(stderr, "ERROR: Not enough card slots available for the HTML inputs the theme requested.\n");
+                       exit(1);
+               }
+               configure_card(card_index, html_inputs[html_card_index], CardType::CEF_INPUT, /*output=*/nullptr);
+               html_inputs[html_card_index]->set_card_index(card_index);
+       }
+       num_html_inputs = html_inputs.size();
+#endif
+
        BMUSBCapture::set_card_connected_callback(bind(&Mixer::bm_hotplug_add, this, _1));
        BMUSBCapture::start_bm_thread();
 
-       for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+       for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
                cards[card_index].queue_length_policy.reset(card_index);
        }
 
        chroma_subsampler.reset(new ChromaSubsampler(resource_pool.get()));
 
+       if (global_flags.ten_bit_input) {
+               if (!v210Converter::has_hardware_support()) {
+                       fprintf(stderr, "ERROR: --ten-bit-input requires support for OpenGL compute shaders\n");
+                       fprintf(stderr, "       (OpenGL 4.3, or GL_ARB_compute_shader + GL_ARB_shader_image_load_store).\n");
+                       exit(1);
+               }
+               v210_converter.reset(new v210Converter());
+
+               // These are all the widths listed in the Blackmagic SDK documentation
+               // (section 2.7.3, “Display Modes”).
+               v210_converter->precompile_shader(720);
+               v210_converter->precompile_shader(1280);
+               v210_converter->precompile_shader(1920);
+               v210_converter->precompile_shader(2048);
+               v210_converter->precompile_shader(3840);
+               v210_converter->precompile_shader(4096);
+       }
+       if (global_flags.ten_bit_output) {
+               if (!v210Converter::has_hardware_support()) {
+                       fprintf(stderr, "ERROR: --ten-bit-output requires support for OpenGL compute shaders\n");
+                       fprintf(stderr, "       (OpenGL 4.3, or GL_ARB_compute_shader + GL_ARB_shader_image_load_store).\n");
+                       exit(1);
+               }
+       }
+
+       timecode_renderer.reset(new TimecodeRenderer(resource_pool.get(), global_flags.width, global_flags.height));
+       display_timecode_in_stream = global_flags.display_timecode_in_stream;
+       display_timecode_on_stdout = global_flags.display_timecode_on_stdout;
+
        if (global_flags.enable_alsa_output) {
                alsa.reset(new ALSAOutput(OUTPUT_FREQUENCY, /*num_channels=*/2));
        }
@@ -206,13 +493,16 @@ Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
                desired_output_card_index = global_flags.output_card;
                set_output_card_internal(global_flags.output_card);
        }
+
+       output_jitter_history.register_metrics({{ "card", "output" }});
 }
 
 Mixer::~Mixer()
 {
+       httpd.stop();
        BMUSBCapture::stop_bm_thread();
 
-       for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+       for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
                {
                        unique_lock<mutex> lock(card_mutex);
                        cards[card_index].should_quit = true;  // Unblock thread.
@@ -228,7 +518,7 @@ Mixer::~Mixer()
        video_encoder.reset(nullptr);
 }
 
-void Mixer::configure_card(unsigned card_index, CaptureInterface *capture, bool is_fake_capture, DeckLinkOutput *output)
+void Mixer::configure_card(unsigned card_index, CaptureInterface *capture, CardType card_type, DeckLinkOutput *output)
 {
        printf("Configuring card %d...\n", card_index);
 
@@ -237,13 +527,28 @@ void Mixer::configure_card(unsigned card_index, CaptureInterface *capture, bool
                card->capture->stop_dequeue_thread();
        }
        card->capture.reset(capture);
-       card->is_fake_capture = is_fake_capture;
+       card->is_fake_capture = (card_type == CardType::FAKE_CAPTURE);
+       card->is_cef_capture = (card_type == CardType::CEF_INPUT);
+       card->may_have_dropped_last_frame = false;
+       card->type = card_type;
        if (card->output.get() != output) {
                card->output.reset(output);
        }
+
+       PixelFormat pixel_format;
+       if (card_type == CardType::FFMPEG_INPUT) {
+               pixel_format = capture->get_current_pixel_format();
+       } else if (card_type == CardType::CEF_INPUT) {
+               pixel_format = PixelFormat_8BitBGRA;
+       } else if (global_flags.ten_bit_input) {
+               pixel_format = PixelFormat_10BitYCbCr;
+       } else {
+               pixel_format = PixelFormat_8BitYCbCr;
+       }
+
        card->capture->set_frame_callback(bind(&Mixer::bm_frame, this, card_index, _1, _2, _3, _4, _5, _6, _7));
        if (card->frame_allocator == nullptr) {
-               card->frame_allocator.reset(new PBOFrameAllocator(8 << 20, global_flags.width, global_flags.height));  // 8 MB.
+               card->frame_allocator.reset(new PBOFrameAllocator(pixel_format, 8 << 20, global_flags.width, global_flags.height));  // 8 MB.
        }
        card->capture->set_video_frame_allocator(card->frame_allocator.get());
        if (card->surface == nullptr) {
@@ -251,14 +556,83 @@ void Mixer::configure_card(unsigned card_index, CaptureInterface *capture, bool
        }
        while (!card->new_frames.empty()) card->new_frames.pop_front();
        card->last_timecode = -1;
+       card->capture->set_pixel_format(pixel_format);
        card->capture->configure_card();
 
        // NOTE: start_bm_capture() happens in thread_func().
 
-       DeviceSpec device{InputSourceType::CAPTURE_CARD, card_index};
-       audio_mixer.reset_resampler(device);
-       audio_mixer.set_display_name(device, card->capture->get_description());
-       audio_mixer.trigger_state_changed_callback();
+       DeviceSpec device;
+       if (card_type == CardType::FFMPEG_INPUT) {
+               device = DeviceSpec{InputSourceType::FFMPEG_VIDEO_INPUT, card_index - num_cards};
+       } else {
+               device = DeviceSpec{InputSourceType::CAPTURE_CARD, card_index};
+       }
+       audio_mixer->reset_resampler(device);
+       audio_mixer->set_display_name(device, card->capture->get_description());
+       audio_mixer->trigger_state_changed_callback();
+
+       // Unregister old metrics, if any.
+       if (!card->labels.empty()) {
+               const vector<pair<string, string>> &labels = card->labels;
+               card->jitter_history.unregister_metrics(labels);
+               card->queue_length_policy.unregister_metrics(labels);
+               global_metrics.remove("input_received_frames", labels);
+               global_metrics.remove("input_dropped_frames_jitter", labels);
+               global_metrics.remove("input_dropped_frames_error", labels);
+               global_metrics.remove("input_dropped_frames_resets", labels);
+               global_metrics.remove("input_queue_length_frames", labels);
+               global_metrics.remove("input_queue_duped_frames", labels);
+
+               global_metrics.remove("input_has_signal_bool", labels);
+               global_metrics.remove("input_is_connected_bool", labels);
+               global_metrics.remove("input_interlaced_bool", labels);
+               global_metrics.remove("input_width_pixels", labels);
+               global_metrics.remove("input_height_pixels", labels);
+               global_metrics.remove("input_frame_rate_nom", labels);
+               global_metrics.remove("input_frame_rate_den", labels);
+               global_metrics.remove("input_sample_rate_hz", labels);
+       }
+
+       // Register metrics.
+       vector<pair<string, string>> labels;
+       char card_name[64];
+       snprintf(card_name, sizeof(card_name), "%d", card_index);
+       labels.emplace_back("card", card_name);
+
+       switch (card_type) {
+       case CardType::LIVE_CARD:
+               labels.emplace_back("cardtype", "live");
+               break;
+       case CardType::FAKE_CAPTURE:
+               labels.emplace_back("cardtype", "fake");
+               break;
+       case CardType::FFMPEG_INPUT:
+               labels.emplace_back("cardtype", "ffmpeg");
+               break;
+       case CardType::CEF_INPUT:
+               labels.emplace_back("cardtype", "cef");
+               break;
+       default:
+               assert(false);
+       }
+       card->jitter_history.register_metrics(labels);
+       card->queue_length_policy.register_metrics(labels);
+       global_metrics.add("input_received_frames", labels, &card->metric_input_received_frames);
+       global_metrics.add("input_dropped_frames_jitter", labels, &card->metric_input_dropped_frames_jitter);
+       global_metrics.add("input_dropped_frames_error", labels, &card->metric_input_dropped_frames_error);
+       global_metrics.add("input_dropped_frames_resets", labels, &card->metric_input_resets);
+       global_metrics.add("input_queue_length_frames", labels, &card->metric_input_queue_length_frames, Metrics::TYPE_GAUGE);
+       global_metrics.add("input_queue_duped_frames", labels, &card->metric_input_duped_frames);
+
+       global_metrics.add("input_has_signal_bool", labels, &card->metric_input_has_signal_bool, Metrics::TYPE_GAUGE);
+       global_metrics.add("input_is_connected_bool", labels, &card->metric_input_is_connected_bool, Metrics::TYPE_GAUGE);
+       global_metrics.add("input_interlaced_bool", labels, &card->metric_input_interlaced_bool, Metrics::TYPE_GAUGE);
+       global_metrics.add("input_width_pixels", labels, &card->metric_input_width_pixels, Metrics::TYPE_GAUGE);
+       global_metrics.add("input_height_pixels", labels, &card->metric_input_height_pixels, Metrics::TYPE_GAUGE);
+       global_metrics.add("input_frame_rate_nom", labels, &card->metric_input_frame_rate_nom, Metrics::TYPE_GAUGE);
+       global_metrics.add("input_frame_rate_den", labels, &card->metric_input_frame_rate_den, Metrics::TYPE_GAUGE);
+       global_metrics.add("input_sample_rate_hz", labels, &card->metric_input_sample_rate_hz, Metrics::TYPE_GAUGE);
+       card->labels = labels;
 }
 
 void Mixer::set_output_card_internal(int card_index)
@@ -275,17 +649,17 @@ void Mixer::set_output_card_internal(int card_index)
                // Stop the fake card that we put into place.
                // This needs to _not_ happen under the mutex, to avoid deadlock
                // (delivering the last frame needs to take the mutex).
-               bmusb::CaptureInterface *fake_capture = old_card->capture.get();
+               CaptureInterface *fake_capture = old_card->capture.get();
                lock.unlock();
                fake_capture->stop_dequeue_thread();
                lock.lock();
-               old_card->capture = move(old_card->parked_capture);
+               old_card->capture = move(old_card->parked_capture);  // TODO: reset the metrics
                old_card->is_fake_capture = false;
                old_card->capture->start_bm_capture();
        }
        if (card_index != -1) {
                CaptureCard *card = &cards[card_index];
-               bmusb::CaptureInterface *capture = card->capture.get();
+               CaptureInterface *capture = card->capture.get();
                // TODO: DeckLinkCapture::stop_dequeue_thread can actually take
                // several seconds to complete (blocking on DisableVideoInput);
                // see if we can maybe do it asynchronously.
@@ -293,14 +667,15 @@ void Mixer::set_output_card_internal(int card_index)
                capture->stop_dequeue_thread();
                lock.lock();
                card->parked_capture = move(card->capture);
-               bmusb::CaptureInterface *fake_capture = new FakeCapture(global_flags.width, global_flags.height, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
-               configure_card(card_index, fake_capture, /*is_fake_capture=*/true, card->output.release());
+               CaptureInterface *fake_capture = new FakeCapture(global_flags.width, global_flags.height, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
+               configure_card(card_index, fake_capture, CardType::FAKE_CAPTURE, card->output.release());
                card->queue_length_policy.reset(card_index);
                card->capture->start_bm_capture();
                desired_output_video_mode = output_video_mode = card->output->pick_video_mode(desired_output_video_mode);
                card->output->start_output(desired_output_video_mode, pts_int);
        }
        output_card_index = card_index;
+       output_jitter_history.clear();
 }
 
 namespace {
@@ -321,9 +696,24 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
                      FrameAllocator::Frame video_frame, size_t video_offset, VideoFormat video_format,
                     FrameAllocator::Frame audio_frame, size_t audio_offset, AudioFormat audio_format)
 {
-       DeviceSpec device{InputSourceType::CAPTURE_CARD, card_index};
+       DeviceSpec device;
+       if (card_index >= num_cards) {
+               device = DeviceSpec{InputSourceType::FFMPEG_VIDEO_INPUT, card_index - num_cards};
+       } else {
+               device = DeviceSpec{InputSourceType::CAPTURE_CARD, card_index};
+       }
        CaptureCard *card = &cards[card_index];
 
+       ++card->metric_input_received_frames;
+       card->metric_input_has_signal_bool = video_format.has_signal;
+       card->metric_input_is_connected_bool = video_format.is_connected;
+       card->metric_input_interlaced_bool = video_format.interlaced;
+       card->metric_input_width_pixels = video_format.width;
+       card->metric_input_height_pixels = video_format.height;
+       card->metric_input_frame_rate_nom = video_format.frame_rate_nom;
+       card->metric_input_frame_rate_den = video_format.frame_rate_den;
+       card->metric_input_sample_rate_hz = audio_format.sample_rate;
+
        if (is_mode_scanning[card_index]) {
                if (video_format.has_signal) {
                        // Found a stable signal, so stop scanning.
@@ -346,9 +736,9 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
        assert(frame_length > 0);
 
        size_t num_samples = (audio_frame.len > audio_offset) ? (audio_frame.len - audio_offset) / audio_format.num_channels / (audio_format.bits_per_sample / 8) : 0;
-       if (num_samples > OUTPUT_FREQUENCY / 10) {
-               printf("Card %d: Dropping frame with implausible audio length (len=%d, offset=%d) [timecode=0x%04x video_len=%d video_offset=%d video_format=%x)\n",
-                       card_index, int(audio_frame.len), int(audio_offset),
+       if (num_samples > OUTPUT_FREQUENCY / 10 && card->type != CardType::FFMPEG_INPUT) {
+               printf("%s: Dropping frame with implausible audio length (len=%d, offset=%d) [timecode=0x%04x video_len=%d video_offset=%d video_format=%x)\n",
+                       spec_to_string(device).c_str(), int(audio_frame.len), int(audio_offset),
                        timecode, int(video_frame.len), int(video_offset), video_format.id);
                if (video_frame.owner) {
                        video_frame.owner->release_frame(video_frame);
@@ -369,22 +759,26 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
        const int silence_samples = OUTPUT_FREQUENCY * video_format.frame_rate_den / video_format.frame_rate_nom;
 
        if (dropped_frames > MAX_FPS * 2) {
-               fprintf(stderr, "Card %d lost more than two seconds (or time code jumping around; from 0x%04x to 0x%04x), resetting resampler\n",
-                       card_index, card->last_timecode, timecode);
-               audio_mixer.reset_resampler(device);
+               fprintf(stderr, "%s lost more than two seconds (or time code jumping around; from 0x%04x to 0x%04x), resetting resampler\n",
+                       spec_to_string(device).c_str(), card->last_timecode, timecode);
+               audio_mixer->reset_resampler(device);
                dropped_frames = 0;
+               ++card->metric_input_resets;
        } else if (dropped_frames > 0) {
                // Insert silence as needed.
-               fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
-                       card_index, dropped_frames, timecode);
+               fprintf(stderr, "%s dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
+                       spec_to_string(device).c_str(), dropped_frames, timecode);
+               card->metric_input_dropped_frames_error += dropped_frames;
 
                bool success;
                do {
-                       success = audio_mixer.add_silence(device, silence_samples, dropped_frames, frame_length);
+                       success = audio_mixer->add_silence(device, silence_samples, dropped_frames, frame_length);
                } while (!success);
        }
 
-       audio_mixer.add_audio(device, audio_frame.data + audio_offset, num_samples, audio_format, frame_length, audio_frame.received_timestamp);
+       if (num_samples > 0) {
+               audio_mixer->add_audio(device, audio_frame.data + audio_offset, num_samples, audio_format, frame_length, audio_frame.received_timestamp);
+       }
 
        // Done with the audio, so release it.
        if (audio_frame.owner) {
@@ -393,12 +787,33 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
 
        card->last_timecode = timecode;
 
-       size_t expected_length = video_format.width * (video_format.height + video_format.extra_lines_top + video_format.extra_lines_bottom) * 2;
+       PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)video_frame.userdata;
+
+       size_t cbcr_width, cbcr_height, cbcr_offset, y_offset;
+       size_t expected_length = video_format.stride * (video_format.height + video_format.extra_lines_top + video_format.extra_lines_bottom);
+       if (userdata != nullptr && userdata->pixel_format == PixelFormat_8BitYCbCrPlanar) {
+               // The calculation above is wrong for planar Y'CbCr, so just override it.
+               assert(card->type == CardType::FFMPEG_INPUT);
+               assert(video_offset == 0);
+               expected_length = video_frame.len;
+
+               userdata->ycbcr_format = (static_cast<FFmpegCapture *>(card->capture.get()))->get_current_frame_ycbcr_format();
+               cbcr_width = video_format.width / userdata->ycbcr_format.chroma_subsampling_x;
+               cbcr_height = video_format.height / userdata->ycbcr_format.chroma_subsampling_y;
+               cbcr_offset = video_format.width * video_format.height;
+               y_offset = 0;
+       } else {
+               // All the other Y'CbCr formats are 4:2:2.
+               cbcr_width = video_format.width / 2;
+               cbcr_height = video_format.height;
+               cbcr_offset = video_offset / 2;
+               y_offset = video_frame.size / 2 + video_offset / 2;
+       }
        if (video_frame.len - video_offset == 0 ||
            video_frame.len - video_offset != expected_length) {
                if (video_frame.len != 0) {
-                       printf("Card %d: Dropping video frame with wrong length (%ld; expected %ld)\n",
-                               card_index, video_frame.len - video_offset, expected_length);
+                       printf("%s: Dropping video frame with wrong length (%ld; expected %ld)\n",
+                               spec_to_string(device).c_str(), video_frame.len - video_offset, expected_length);
                }
                if (video_frame.owner) {
                        video_frame.owner->release_frame(video_frame);
@@ -413,14 +828,14 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
                        new_frame.length = frame_length;
                        new_frame.interlaced = false;
                        new_frame.dropped_frames = dropped_frames;
+                       new_frame.received_timestamp = video_frame.received_timestamp;
                        card->new_frames.push_back(move(new_frame));
-                       card->new_frames_changed.notify_all();
+                       card->jitter_history.frame_arrived(video_frame.received_timestamp, frame_length, dropped_frames);
                }
+               card->new_frames_changed.notify_all();
                return;
        }
 
-       PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)video_frame.userdata;
-
        unsigned num_fields = video_format.interlaced ? 2 : 1;
        steady_clock::time_point frame_upload_start;
        bool interlaced_stride = false;
@@ -429,6 +844,7 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
                // a deinterlacer to actually get this right.
                assert(video_format.height % 2 == 0);
                video_format.height /= 2;
+               cbcr_height /= 2;
                assert(frame_length % 2 == 0);
                frame_length /= 2;
                num_fields = 2;
@@ -445,10 +861,6 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
        RefCountedFrame frame(video_frame);
 
        // Upload the textures.
-       size_t cbcr_width = video_format.width / 2;
-       size_t cbcr_offset = video_offset / 2;
-       size_t y_offset = video_frame.size / 2 + video_offset / 2;
-
        for (unsigned field = 0; field < num_fields; ++field) {
                // Put the actual texture upload in a lambda that is executed in the main thread.
                // It is entirely possible to do this in the same thread (and it might even be
@@ -458,7 +870,7 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
                // Note that this means we must hold on to the actual frame data in <userdata>
                // until the upload command is run, but we hold on to <frame> much longer than that
                // (in fact, all the way until we no longer use the texture in rendering).
-               auto upload_func = [field, video_format, y_offset, cbcr_offset, cbcr_width, interlaced_stride, userdata]() {
+               auto upload_func = [this, field, video_format, y_offset, video_offset, cbcr_offset, cbcr_width, cbcr_height, interlaced_stride, userdata]() {
                        unsigned field_start_line;
                        if (field == 1) {
                                field_start_line = video_format.second_field_start;
@@ -466,68 +878,59 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
                                field_start_line = video_format.extra_lines_top;
                        }
 
-                       if (userdata->tex_y[field] == 0 ||
-                           userdata->tex_cbcr[field] == 0 ||
-                           video_format.width != userdata->last_width[field] ||
-                           video_format.height != userdata->last_height[field]) {
-                               // We changed resolution since last use of this texture, so we need to create
-                               // a new object. Note that this each card has its own PBOFrameAllocator,
-                               // we don't need to worry about these flip-flopping between resolutions.
-                               glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]);
-                               check_error();
-                               glTexImage2D(GL_TEXTURE_2D, 0, GL_RG8, cbcr_width, video_format.height, 0, GL_RG, GL_UNSIGNED_BYTE, nullptr);
-                               check_error();
-                               glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]);
-                               check_error();
-                               glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, video_format.width, video_format.height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
-                               check_error();
-                               userdata->last_width[field] = video_format.width;
-                               userdata->last_height[field] = video_format.height;
-                       }
+                       // For anything not FRAME_FORMAT_YCBCR_10BIT, v210_width will be nonsensical but not used.
+                       size_t v210_width = video_format.stride / sizeof(uint32_t);
+                       ensure_texture_resolution(userdata, field, video_format.width, video_format.height, cbcr_width, cbcr_height, v210_width);
 
-                       GLuint pbo = userdata->pbo;
+                       glBindBuffer(GL_PIXEL_UNPACK_BUFFER, userdata->pbo);
                        check_error();
-                       glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
-                       check_error();
-
-                       size_t field_y_start = y_offset + video_format.width * field_start_line;
-                       size_t field_cbcr_start = cbcr_offset + cbcr_width * field_start_line * sizeof(uint16_t);
 
-                       if (global_flags.flush_pbos) {
-                               glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, field_y_start, video_format.width * video_format.height);
-                               check_error();
-                               glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, field_cbcr_start, cbcr_width * video_format.height * sizeof(uint16_t));
-                               check_error();
+                       switch (userdata->pixel_format) {
+                       case PixelFormat_10BitYCbCr: {
+                               size_t field_start = video_offset + video_format.stride * field_start_line;
+                               upload_texture(userdata->tex_v210[field], v210_width, video_format.height, video_format.stride, interlaced_stride, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, field_start);
+                               v210_converter->convert(userdata->tex_v210[field], userdata->tex_444[field], video_format.width, video_format.height);
+                               break;
                        }
+                       case PixelFormat_8BitYCbCr: {
+                               size_t field_y_start = y_offset + video_format.width * field_start_line;
+                               size_t field_cbcr_start = cbcr_offset + cbcr_width * field_start_line * sizeof(uint16_t);
 
-                       glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]);
-                       check_error();
-                       if (interlaced_stride) {
-                               glPixelStorei(GL_UNPACK_ROW_LENGTH, cbcr_width * 2);
-                               check_error();
-                       } else {
-                               glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
-                               check_error();
+                               // Make up our own strides, since we are interleaving.
+                               upload_texture(userdata->tex_y[field], video_format.width, video_format.height, video_format.width, interlaced_stride, GL_RED, GL_UNSIGNED_BYTE, field_y_start);
+                               upload_texture(userdata->tex_cbcr[field], cbcr_width, cbcr_height, cbcr_width * sizeof(uint16_t), interlaced_stride, GL_RG, GL_UNSIGNED_BYTE, field_cbcr_start);
+                               break;
                        }
-                       glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, cbcr_width, video_format.height, GL_RG, GL_UNSIGNED_BYTE, BUFFER_OFFSET(field_cbcr_start));
-                       check_error();
-                       glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]);
-                       check_error();
-                       if (interlaced_stride) {
-                               glPixelStorei(GL_UNPACK_ROW_LENGTH, video_format.width * 2);
+                       case PixelFormat_8BitYCbCrPlanar: {
+                               assert(field_start_line == 0);  // We don't really support interlaced here.
+                               size_t field_y_start = y_offset;
+                               size_t field_cb_start = cbcr_offset;
+                               size_t field_cr_start = cbcr_offset + cbcr_width * cbcr_height;
+
+                               // Make up our own strides, since we are interleaving.
+                               upload_texture(userdata->tex_y[field], video_format.width, video_format.height, video_format.width, interlaced_stride, GL_RED, GL_UNSIGNED_BYTE, field_y_start);
+                               upload_texture(userdata->tex_cb[field], cbcr_width, cbcr_height, cbcr_width, interlaced_stride, GL_RED, GL_UNSIGNED_BYTE, field_cb_start);
+                               upload_texture(userdata->tex_cr[field], cbcr_width, cbcr_height, cbcr_width, interlaced_stride, GL_RED, GL_UNSIGNED_BYTE, field_cr_start);
+                               break;
+                       }
+                       case PixelFormat_8BitBGRA: {
+                               size_t field_start = video_offset + video_format.stride * field_start_line;
+                               upload_texture(userdata->tex_rgba[field], video_format.width, video_format.height, video_format.stride, interlaced_stride, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, field_start);
+                               // These could be asked to deliver mipmaps at any time.
+                               glBindTexture(GL_TEXTURE_2D, userdata->tex_rgba[field]);
                                check_error();
-                       } else {
-                               glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+                               glGenerateMipmap(GL_TEXTURE_2D);
+                               check_error();
+                               glBindTexture(GL_TEXTURE_2D, 0);
                                check_error();
+                               break;
                        }
-                       glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, video_format.width, video_format.height, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(field_y_start));
-                       check_error();
-                       glBindTexture(GL_TEXTURE_2D, 0);
-                       check_error();
+                       default:
+                               assert(false);
+                       }
+
                        glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
                        check_error();
-                       glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
-                       check_error();
                };
 
                if (field == 1) {
@@ -553,8 +956,10 @@ void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
                        new_frame.dropped_frames = dropped_frames;
                        new_frame.received_timestamp = video_frame.received_timestamp;  // Ignore the audio timestamp.
                        card->new_frames.push_back(move(new_frame));
-                       card->new_frames_changed.notify_all();
+                       card->jitter_history.frame_arrived(video_frame.received_timestamp, frame_length, dropped_frames);
+                       card->may_have_dropped_last_frame = false;
                }
+               card->new_frames_changed.notify_all();
        }
 }
 
@@ -582,16 +987,13 @@ void Mixer::thread_func()
 
        // Start the actual capture. (We don't want to do it before we're actually ready
        // to process output frames.)
-       for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+       for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
                if (int(card_index) != output_card_index) {
                        cards[card_index].capture->start_bm_capture();
                }
        }
 
-       steady_clock::time_point start, now;
-       start = steady_clock::now();
-
-       int frame = 0;
+       BasicStats basic_stats(/*verbose=*/true, /*use_opengl=*/true);
        int stats_dropped_frames = 0;
 
        while (!should_quit) {
@@ -617,7 +1019,7 @@ void Mixer::thread_func()
                } else {
                        master_card_is_output = false;
                        master_card_index = theme->map_signal(master_clock_channel);
-                       assert(master_card_index < num_cards);
+                       assert(master_card_index < num_cards + num_video_inputs);
                }
 
                OutputFrameInfo output_frame_info = get_one_frame_from_each_card(master_card_index, master_card_is_output, new_frames, has_new_frame);
@@ -626,7 +1028,7 @@ void Mixer::thread_func()
 
                handle_hotplugged_cards();
 
-               for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+               for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
                        if (card_index == master_card_index || !has_new_frame[card_index]) {
                                continue;
                        }
@@ -647,7 +1049,7 @@ void Mixer::thread_func()
                        continue;
                }
 
-               for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+               for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
                        if (!has_new_frame[card_index] || new_frames[card_index].frame->len == 0)
                                continue;
 
@@ -665,52 +1067,18 @@ void Mixer::thread_func()
 
                int64_t frame_duration = output_frame_info.frame_duration;
                render_one_frame(frame_duration);
-               ++frame;
-               pts_int += frame_duration;
-
-               now = steady_clock::now();
-               double elapsed = duration<double>(now - start).count();
-               if (frame % 100 == 0) {
-                       printf("%d frames (%d dropped) in %.3f seconds = %.1f fps (%.1f ms/frame)",
-                               frame, stats_dropped_frames, elapsed, frame / elapsed,
-                               1e3 * elapsed / frame);
-               //      chain->print_phase_timing();
-
-                       // Check our memory usage, to see if we are close to our mlockall()
-                       // limit (if at all set).
-                       rusage used;
-                       if (getrusage(RUSAGE_SELF, &used) == -1) {
-                               perror("getrusage(RUSAGE_SELF)");
-                               assert(false);
-                       }
-
-                       if (uses_mlock) {
-                               rlimit limit;
-                               if (getrlimit(RLIMIT_MEMLOCK, &limit) == -1) {
-                                       perror("getrlimit(RLIMIT_MEMLOCK)");
-                                       assert(false);
-                               }
-
-                               if (limit.rlim_cur == 0) {
-                                       printf(", using %ld MB memory (locked)",
-                                               long(used.ru_maxrss / 1024));
-                               } else {
-                                       printf(", using %ld / %ld MB lockable memory (%.1f%%)",
-                                               long(used.ru_maxrss / 1024),
-                                               long(limit.rlim_cur / 1048576),
-                                               float(100.0 * (used.ru_maxrss * 1024.0) / limit.rlim_cur));
-                               }
-                       } else {
-                               printf(", using %ld MB memory (not locked)",
-                                       long(used.ru_maxrss / 1024));
-                       }
-
-                       printf("\n");
+               {
+                       lock_guard<mutex> lock(frame_num_mutex);
+                       ++frame_num;
                }
+               frame_num_updated.notify_all();
+               pts_int += frame_duration;
 
+               basic_stats.update(frame_num, stats_dropped_frames);
+               // if (frame_num % 100 == 0) chain->print_phase_timing();
 
                if (should_cut.exchange(false)) {  // Test and clear.
-                       video_encoder->do_cut(frame);
+                       video_encoder->do_cut(frame_num);
                }
 
 #if 0
@@ -739,7 +1107,7 @@ bool Mixer::input_card_is_master_clock(unsigned card_index, unsigned master_card
        return (card_index == master_card_index);
 }
 
-void Mixer::trim_queue(CaptureCard *card, unsigned card_index)
+void Mixer::trim_queue(CaptureCard *card, size_t safe_queue_length)
 {
        // Count the number of frames in the queue, including any frames
        // we dropped. It's hard to know exactly how we should deal with
@@ -751,18 +1119,17 @@ void Mixer::trim_queue(CaptureCard *card, unsigned card_index)
        for (const CaptureCard::NewFrame &frame : card->new_frames) {
                queue_length += frame.dropped_frames + 1;
        }
-       card->queue_length_policy.update_policy(queue_length);
 
        // If needed, drop frames until the queue is below the safe limit.
        // We prefer to drop from the head, because all else being equal,
        // we'd like more recent frames (less latency).
        unsigned dropped_frames = 0;
-       while (queue_length > card->queue_length_policy.get_safe_queue_length()) {
+       while (queue_length > safe_queue_length) {
                assert(!card->new_frames.empty());
                assert(queue_length > card->new_frames.front().dropped_frames);
                queue_length -= card->new_frames.front().dropped_frames;
 
-               if (queue_length <= card->queue_length_policy.get_safe_queue_length()) {
+               if (queue_length <= safe_queue_length) {
                        // No need to drop anything.
                        break;
                }
@@ -771,14 +1138,41 @@ void Mixer::trim_queue(CaptureCard *card, unsigned card_index)
                card->new_frames_changed.notify_all();
                --queue_length;
                ++dropped_frames;
+
+               if (queue_length == 0 && card->is_cef_capture) {
+                       card->may_have_dropped_last_frame = true;
+               }
        }
 
+       card->metric_input_dropped_frames_jitter += dropped_frames;
+       card->metric_input_queue_length_frames = queue_length;
+
+#if 0
        if (dropped_frames > 0) {
                fprintf(stderr, "Card %u dropped %u frame(s) to keep latency down.\n",
                        card_index, dropped_frames);
        }
+#endif
 }
 
+pair<string, string> Mixer::get_channels_json()
+{
+       Channels ret;
+       for (int channel_idx = 2; channel_idx < theme->get_num_channels(); ++channel_idx) {
+               Channel *channel = ret.add_channel();
+               channel->set_index(channel_idx);
+               channel->set_name(theme->get_channel_name(channel_idx));
+               channel->set_color(theme->get_channel_color(channel_idx));
+       }
+       string contents;
+       google::protobuf::util::MessageToJsonString(ret, &contents);  // Ignore any errors.
+       return make_pair(contents, "text/json");
+}
+
+pair<string, string> Mixer::get_channel_color_http(unsigned channel_idx)
+{
+       return make_pair(theme->get_channel_color(channel_idx), "text/plain");
+}
 
 Mixer::OutputFrameInfo Mixer::get_one_frame_from_each_card(unsigned master_card_index, bool master_card_is_output, CaptureCard::NewFrame new_frames[MAX_VIDEO_CARDS], bool has_new_frame[MAX_VIDEO_CARDS])
 {
@@ -804,26 +1198,25 @@ start:
                // and then restart.
                assert(cards[master_card_index].capture->get_disconnected());
                handle_hotplugged_cards();
+               lock.unlock();
                goto start;
        }
 
-       if (!master_card_is_output) {
-               output_frame_info.frame_timestamp =
-                       cards[master_card_index].new_frames.front().received_timestamp;
-       }
-
-       for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+       for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
                CaptureCard *card = &cards[card_index];
-               if (input_card_is_master_clock(card_index, master_card_index)) {
-                       // We don't use the queue length policy for the master card,
-                       // but we will if it stops being the master. Thus, clear out
-                       // the policy in case we switch in the future.
-                       card->queue_length_policy.reset(card_index);
-                       assert(!card->new_frames.empty());
+               if (card->new_frames.empty()) {  // Starvation.
+                       ++card->metric_input_duped_frames;
+#ifdef HAVE_CEF
+                       if (card->is_cef_capture && card->may_have_dropped_last_frame) {
+                               // Unlike other sources, CEF is not guaranteed to send us a steady
+                               // stream of frames, so we'll have to ask it to repaint the frame
+                               // we dropped. (may_have_dropped_last_frame is set whenever we
+                               // trim the queue completely away, and cleared when we actually
+                               // get a new frame.)
+                               ((CEFCapture *)card->capture.get())->request_new_frame();
+                       }
+#endif
                } else {
-                       trim_queue(card, card_index);
-               }
-               if (!card->new_frames.empty()) {
                        new_frames[card_index] = move(card->new_frames.front());
                        has_new_frame[card_index] = true;
                        card->new_frames.pop_front();
@@ -832,10 +1225,32 @@ start:
        }
 
        if (!master_card_is_output) {
+               output_frame_info.frame_timestamp = new_frames[master_card_index].received_timestamp;
                output_frame_info.dropped_frames = new_frames[master_card_index].dropped_frames;
                output_frame_info.frame_duration = new_frames[master_card_index].length;
        }
 
+       if (!output_frame_info.is_preroll) {
+               output_jitter_history.frame_arrived(output_frame_info.frame_timestamp, output_frame_info.frame_duration, output_frame_info.dropped_frames);
+       }
+
+       for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
+               CaptureCard *card = &cards[card_index];
+               if (has_new_frame[card_index] &&
+                   !input_card_is_master_clock(card_index, master_card_index) &&
+                   !output_frame_info.is_preroll) {
+                       card->queue_length_policy.update_policy(
+                               output_frame_info.frame_timestamp,
+                               card->jitter_history.get_expected_next_frame(),
+                               new_frames[master_card_index].length,
+                               output_frame_info.frame_duration,
+                               card->jitter_history.estimate_max_jitter(),
+                               output_jitter_history.estimate_max_jitter());
+                       trim_queue(card, min<int>(global_flags.max_input_queue_frames,
+                                                 card->queue_length_policy.get_safe_queue_length()));
+               }
+       }
+
        // This might get off by a fractional sample when changing master card
        // between ones with different frame rates, but that's fine.
        int num_samples_times_timebase = OUTPUT_FREQUENCY * output_frame_info.frame_duration + fractional_samples;
@@ -854,7 +1269,7 @@ void Mixer::handle_hotplugged_cards()
                if (card->capture->get_disconnected()) {
                        fprintf(stderr, "Card %u went away, replacing with a fake card.\n", card_index);
                        FakeCapture *capture = new FakeCapture(global_flags.width, global_flags.height, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
-                       configure_card(card_index, capture, /*is_fake_capture=*/true, /*output=*/nullptr);
+                       configure_card(card_index, capture, CardType::FAKE_CAPTURE, /*output=*/nullptr);
                        card->queue_length_policy.reset(card_index);
                        card->capture->start_bm_capture();
                }
@@ -884,7 +1299,7 @@ void Mixer::handle_hotplugged_cards()
                        fprintf(stderr, "New card plugged in, choosing slot %d.\n", free_card_index);
                        CaptureCard *card = &cards[free_card_index];
                        BMUSBCapture *capture = new BMUSBCapture(free_card_index, new_dev);
-                       configure_card(free_card_index, capture, /*is_fake_capture=*/false, /*output=*/nullptr);
+                       configure_card(free_card_index, capture, CardType::LIVE_CARD, /*output=*/nullptr);
                        card->queue_length_policy.reset(free_card_index);
                        capture->set_card_disconnected_callback(bind(&Mixer::bm_hotplug_remove, this, free_card_index));
                        capture->start_bm_capture();
@@ -929,62 +1344,145 @@ void Mixer::schedule_audio_resampling_tasks(unsigned dropped_frames, int num_sam
 
 void Mixer::render_one_frame(int64_t duration)
 {
+       // Determine the time code for this frame before we start rendering.
+       string timecode_text = timecode_renderer->get_timecode_text(double(pts_int) / TIMEBASE, frame_num);
+       if (display_timecode_on_stdout) {
+               printf("Timecode: '%s'\n", timecode_text.c_str());
+       }
+
+       // Update Y'CbCr settings for all cards.
+       {
+               unique_lock<mutex> lock(card_mutex);
+               for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+                       YCbCrInterpretation *interpretation = &ycbcr_interpretation[card_index];
+                       input_state.ycbcr_coefficients_auto[card_index] = interpretation->ycbcr_coefficients_auto;
+                       input_state.ycbcr_coefficients[card_index] = interpretation->ycbcr_coefficients;
+                       input_state.full_range[card_index] = interpretation->full_range;
+               }
+       }
+
        // Get the main chain from the theme, and set its state immediately.
        Theme::Chain theme_main_chain = theme->get_chain(0, pts(), global_flags.width, global_flags.height, input_state);
        EffectChain *chain = theme_main_chain.chain;
        theme_main_chain.setup_chain();
        //theme_main_chain.chain->enable_phase_timing(true);
 
-       GLuint y_tex, cbcr_tex;
-       bool got_frame = video_encoder->begin_frame(&y_tex, &cbcr_tex);
+       // If HDMI/SDI output is active and the user has requested auto mode,
+       // its mode overrides the existing Y'CbCr setting for the chain.
+       YCbCrLumaCoefficients ycbcr_output_coefficients;
+       if (global_flags.ycbcr_auto_coefficients && output_card_index != -1) {
+               ycbcr_output_coefficients = cards[output_card_index].output->preferred_ycbcr_coefficients();
+       } else {
+               ycbcr_output_coefficients = global_flags.ycbcr_rec709_coefficients ? YCBCR_REC_709 : YCBCR_REC_601;
+       }
+
+       // TODO: Reduce the duplication against theme.cpp.
+       YCbCrFormat output_ycbcr_format;
+       output_ycbcr_format.chroma_subsampling_x = 1;
+       output_ycbcr_format.chroma_subsampling_y = 1;
+       output_ycbcr_format.luma_coefficients = ycbcr_output_coefficients;
+       output_ycbcr_format.full_range = false;
+       output_ycbcr_format.num_levels = 1 << global_flags.x264_bit_depth;
+       chain->change_ycbcr_output_format(output_ycbcr_format);
+
+       // Render main chain. If we're using zerocopy Quick Sync encoding
+       // (the default case), we take an extra copy of the created outputs,
+       // so that we can display it back to the screen later (it's less memory
+       // bandwidth than writing and reading back an RGBA texture, even at 16-bit).
+       // Ideally, we'd like to avoid taking copies and just use the main textures
+       // for display as well, but they're just views into VA-API memory and must be
+       // unmapped during encoding, so we can't use them for display, unfortunately.
+       GLuint y_tex, cbcr_full_tex, cbcr_tex;
+       GLuint y_copy_tex, cbcr_copy_tex = 0;
+       GLuint y_display_tex, cbcr_display_tex;
+       GLenum y_type = (global_flags.x264_bit_depth > 8) ? GL_R16 : GL_R8;
+       GLenum cbcr_type = (global_flags.x264_bit_depth > 8) ? GL_RG16 : GL_RG8;
+       const bool is_zerocopy = video_encoder->is_zerocopy();
+       if (is_zerocopy) {
+               cbcr_full_tex = resource_pool->create_2d_texture(cbcr_type, global_flags.width, global_flags.height);
+               y_copy_tex = resource_pool->create_2d_texture(y_type, global_flags.width, global_flags.height);
+               cbcr_copy_tex = resource_pool->create_2d_texture(cbcr_type, global_flags.width / 2, global_flags.height / 2);
+
+               y_display_tex = y_copy_tex;
+               cbcr_display_tex = cbcr_copy_tex;
+
+               // y_tex and cbcr_tex will be given by VideoEncoder.
+       } else {
+               cbcr_full_tex = resource_pool->create_2d_texture(cbcr_type, global_flags.width, global_flags.height);
+               y_tex = resource_pool->create_2d_texture(y_type, global_flags.width, global_flags.height);
+               cbcr_tex = resource_pool->create_2d_texture(cbcr_type, global_flags.width / 2, global_flags.height / 2);
+
+               y_display_tex = y_tex;
+               cbcr_display_tex = cbcr_tex;
+       }
+
+       const int64_t av_delay = lrint(global_flags.audio_queue_length_ms * 0.001 * TIMEBASE);  // Corresponds to the delay in ResamplingQueue.
+       bool got_frame = video_encoder->begin_frame(pts_int + av_delay, duration, ycbcr_output_coefficients, theme_main_chain.input_frames, &y_tex, &cbcr_tex);
        assert(got_frame);
 
-       // Render main chain.
-       GLuint cbcr_full_tex = resource_pool->create_2d_texture(GL_RG8, global_flags.width, global_flags.height);
-       GLuint rgba_tex = resource_pool->create_2d_texture(GL_RGB565, global_flags.width, global_flags.height);  // Saves texture bandwidth, although dithering gets messed up.
-       GLuint fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex, rgba_tex);
+       GLuint fbo;
+       if (is_zerocopy) {
+               fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex, y_copy_tex);
+       } else {
+               fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex);
+       }
        check_error();
        chain->render_to_fbo(fbo, global_flags.width, global_flags.height);
+
+       if (display_timecode_in_stream) {
+               // Render the timecode on top.
+               timecode_renderer->render_timecode(fbo, timecode_text);
+       }
+
        resource_pool->release_fbo(fbo);
 
-       chroma_subsampler->subsample_chroma(cbcr_full_tex, global_flags.width, global_flags.height, cbcr_tex);
+       if (is_zerocopy) {
+               chroma_subsampler->subsample_chroma(cbcr_full_tex, global_flags.width, global_flags.height, cbcr_tex, cbcr_copy_tex);
+       } else {
+               chroma_subsampler->subsample_chroma(cbcr_full_tex, global_flags.width, global_flags.height, cbcr_tex);
+       }
        if (output_card_index != -1) {
-               cards[output_card_index].output->send_frame(y_tex, cbcr_full_tex, theme_main_chain.input_frames, pts_int, duration);
+               cards[output_card_index].output->send_frame(y_tex, cbcr_full_tex, ycbcr_output_coefficients, theme_main_chain.input_frames, pts_int, duration);
        }
        resource_pool->release_2d_texture(cbcr_full_tex);
 
-       // Set the right state for rgba_tex.
+       // Set the right state for the Y' and CbCr textures we use for display.
        glBindFramebuffer(GL_FRAMEBUFFER, 0);
-       glBindTexture(GL_TEXTURE_2D, rgba_tex);
+       glBindTexture(GL_TEXTURE_2D, y_display_tex);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
 
-       const int64_t av_delay = lrint(global_flags.audio_queue_length_ms * 0.001 * TIMEBASE);  // Corresponds to the delay in ResamplingQueue.
-       RefCountedGLsync fence = video_encoder->end_frame(pts_int + av_delay, duration, theme_main_chain.input_frames);
+       glBindTexture(GL_TEXTURE_2D, cbcr_display_tex);
+       glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+       glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+       glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+
+       RefCountedGLsync fence = video_encoder->end_frame();
 
-       // The live frame just shows the RGBA texture we just rendered.
-       // It owns rgba_tex now.
+       // The live frame pieces the Y'CbCr texture copies back into RGB and displays them.
+       // It owns y_display_tex and cbcr_display_tex now (whichever textures they are).
        DisplayFrame live_frame;
        live_frame.chain = display_chain.get();
-       live_frame.setup_chain = [this, rgba_tex]{
-               display_input->set_texture_num(rgba_tex);
+       live_frame.setup_chain = [this, y_display_tex, cbcr_display_tex]{
+               display_input->set_texture_num(0, y_display_tex);
+               display_input->set_texture_num(1, cbcr_display_tex);
        };
        live_frame.ready_fence = fence;
        live_frame.input_frames = {};
-       live_frame.temp_textures = { rgba_tex };
-       output_channel[OUTPUT_LIVE].output_frame(live_frame);
+       live_frame.temp_textures = { y_display_tex, cbcr_display_tex };
+       output_channel[OUTPUT_LIVE].output_frame(move(live_frame));
 
        // Set up preview and any additional channels.
        for (int i = 1; i < theme->get_num_channels() + 2; ++i) {
                DisplayFrame display_frame;
                Theme::Chain chain = theme->get_chain(i, pts(), global_flags.width, global_flags.height, input_state);  // FIXME: dimensions
-               display_frame.chain = chain.chain;
-               display_frame.setup_chain = chain.setup_chain;
+               display_frame.chain = move(chain.chain);
+               display_frame.setup_chain = move(chain.setup_chain);
                display_frame.ready_fence = fence;
-               display_frame.input_frames = chain.input_frames;
+               display_frame.input_frames = move(chain.input_frames);
                display_frame.temp_textures = {};
-               output_channel[i].output_frame(display_frame);
+               output_channel[i].output_frame(move(display_frame));
        }
 }
 
@@ -1007,7 +1505,7 @@ void Mixer::audio_thread_func()
 
                ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy =
                        task.adjust_rate ? ResamplingQueue::ADJUST_RATE : ResamplingQueue::DO_NOT_ADJUST_RATE;
-               vector<float> samples_out = audio_mixer.get_output(
+               vector<float> samples_out = audio_mixer->get_output(
                        task.frame_timestamp,
                        task.num_samples,
                        rate_adjustment_policy);
@@ -1058,6 +1556,18 @@ void Mixer::channel_clicked(int preview_num)
        theme->channel_clicked(preview_num);
 }
 
+YCbCrInterpretation Mixer::get_input_ycbcr_interpretation(unsigned card_index) const
+{
+       unique_lock<mutex> lock(card_mutex);
+       return ycbcr_interpretation[card_index];
+}
+
+void Mixer::set_input_ycbcr_interpretation(unsigned card_index, const YCbCrInterpretation &interpretation)
+{
+       unique_lock<mutex> lock(card_mutex);
+       ycbcr_interpretation[card_index] = interpretation;
+}
+
 void Mixer::start_mode_scanning(unsigned card_index)
 {
        assert(card_index < num_cards);
@@ -1075,13 +1585,32 @@ void Mixer::start_mode_scanning(unsigned card_index)
        last_mode_scan_change[card_index] = steady_clock::now();
 }
 
-map<uint32_t, bmusb::VideoMode> Mixer::get_available_output_video_modes() const
+map<uint32_t, VideoMode> Mixer::get_available_output_video_modes() const
 {
        assert(desired_output_card_index != -1);
        unique_lock<mutex> lock(card_mutex);
        return cards[desired_output_card_index].output->get_available_video_modes();
 }
 
+string Mixer::get_ffmpeg_filename(unsigned card_index) const
+{
+       assert(card_index >= num_cards && card_index < num_cards + num_video_inputs);
+       return ((FFmpegCapture *)(cards[card_index].capture.get()))->get_filename();
+}
+
+void Mixer::set_ffmpeg_filename(unsigned card_index, const string &filename) {
+       assert(card_index >= num_cards && card_index < num_cards + num_video_inputs);
+       ((FFmpegCapture *)(cards[card_index].capture.get()))->change_filename(filename);
+}
+
+void Mixer::wait_for_next_frame()
+{
+       unique_lock<mutex> lock(frame_num_mutex);
+       unsigned old_frame_num = frame_num;
+       frame_num_updated.wait_for(lock, seconds(1),  // Timeout is just in case.
+               [old_frame_num, this]{ return this->frame_num > old_frame_num; });
+}
+
 Mixer::OutputChannel::~OutputChannel()
 {
        if (has_current_frame) {
@@ -1092,7 +1621,7 @@ Mixer::OutputChannel::~OutputChannel()
        }
 }
 
-void Mixer::OutputChannel::output_frame(DisplayFrame frame)
+void Mixer::OutputChannel::output_frame(DisplayFrame &&frame)
 {
        // Store this frame for display. Remove the ready frame if any
        // (it was seemingly never used).
@@ -1101,12 +1630,14 @@ void Mixer::OutputChannel::output_frame(DisplayFrame frame)
                if (has_ready_frame) {
                        parent->release_display_frame(&ready_frame);
                }
-               ready_frame = frame;
+               ready_frame = move(frame);
                has_ready_frame = true;
-       }
 
-       if (new_frame_ready_callback) {
-               new_frame_ready_callback();
+               // Call the callbacks under the mutex (they should be short),
+               // so that we don't race against a callback removal.
+               for (const auto &key_and_callback : new_frame_ready_callbacks) {
+                       key_and_callback.second();
+               }
        }
 
        // Reduce the number of callbacks by filtering duplicates. The reason
@@ -1162,7 +1693,7 @@ bool Mixer::OutputChannel::get_display_frame(DisplayFrame *frame)
        }
        if (has_ready_frame) {
                assert(!has_current_frame);
-               current_frame = ready_frame;
+               current_frame = move(ready_frame);
                ready_frame.ready_fence.reset();  // Drop the refcount.
                ready_frame.input_frames.clear();  // Drop the refcounts.
                has_current_frame = true;
@@ -1173,9 +1704,16 @@ bool Mixer::OutputChannel::get_display_frame(DisplayFrame *frame)
        return true;
 }
 
-void Mixer::OutputChannel::set_frame_ready_callback(Mixer::new_frame_ready_callback_t callback)
+void Mixer::OutputChannel::add_frame_ready_callback(void *key, Mixer::new_frame_ready_callback_t callback)
 {
-       new_frame_ready_callback = callback;
+       unique_lock<mutex> lock(frame_mutex);
+       new_frame_ready_callbacks[key] = callback;
+}
+
+void Mixer::OutputChannel::remove_frame_ready_callback(void *key)
+{
+       unique_lock<mutex> lock(frame_mutex);
+       new_frame_ready_callbacks.erase(key);
 }
 
 void Mixer::OutputChannel::set_transition_names_updated_callback(Mixer::transition_names_updated_callback_t callback)