]> git.sesse.net Git - nageru/blob - mixer.cpp
Factor the basic metrics (memory, uptime, etc.) into a separate class, so that Kaeru...
[nageru] / mixer.cpp
1 #undef Success
2
3 #include "mixer.h"
4
5 #include <assert.h>
6 #include <epoxy/egl.h>
7 #include <movit/effect_chain.h>
8 #include <movit/effect_util.h>
9 #include <movit/flat_input.h>
10 #include <movit/image_format.h>
11 #include <movit/init.h>
12 #include <movit/resource_pool.h>
13 #include <pthread.h>
14 #include <stdint.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <algorithm>
18 #include <chrono>
19 #include <condition_variable>
20 #include <cstddef>
21 #include <cstdint>
22 #include <memory>
23 #include <mutex>
24 #include <ratio>
25 #include <string>
26 #include <thread>
27 #include <utility>
28 #include <vector>
29
30 #include "DeckLinkAPI.h"
31 #include "LinuxCOM.h"
32 #include "alsa_output.h"
33 #include "basic_stats.h"
34 #include "bmusb/bmusb.h"
35 #include "bmusb/fake_capture.h"
36 #include "chroma_subsampler.h"
37 #include "context.h"
38 #include "decklink_capture.h"
39 #include "decklink_output.h"
40 #include "defs.h"
41 #include "disk_space_estimator.h"
42 #include "ffmpeg_capture.h"
43 #include "flags.h"
44 #include "input_mapping.h"
45 #include "metrics.h"
46 #include "pbo_frame_allocator.h"
47 #include "ref_counted_gl_sync.h"
48 #include "resampling_queue.h"
49 #include "timebase.h"
50 #include "timecode_renderer.h"
51 #include "v210_converter.h"
52 #include "video_encoder.h"
53
54 class IDeckLink;
55 class QOpenGLContext;
56
57 using namespace movit;
58 using namespace std;
59 using namespace std::chrono;
60 using namespace std::placeholders;
61 using namespace bmusb;
62
63 Mixer *global_mixer = nullptr;
64
65 namespace {
66
67 void insert_new_frame(RefCountedFrame frame, unsigned field_num, bool interlaced, unsigned card_index, InputState *input_state)
68 {
69         if (interlaced) {
70                 for (unsigned frame_num = FRAME_HISTORY_LENGTH; frame_num --> 1; ) {  // :-)
71                         input_state->buffered_frames[card_index][frame_num] =
72                                 input_state->buffered_frames[card_index][frame_num - 1];
73                 }
74                 input_state->buffered_frames[card_index][0] = { frame, field_num };
75         } else {
76                 for (unsigned frame_num = 0; frame_num < FRAME_HISTORY_LENGTH; ++frame_num) {
77                         input_state->buffered_frames[card_index][frame_num] = { frame, field_num };
78                 }
79         }
80 }
81
82 void ensure_texture_resolution(PBOFrameAllocator::Userdata *userdata, unsigned field, unsigned width, unsigned height, unsigned cbcr_width, unsigned cbcr_height, unsigned v210_width)
83 {
84         bool first;
85         switch (userdata->pixel_format) {
86         case PixelFormat_10BitYCbCr:
87                 first = userdata->tex_v210[field] == 0 || userdata->tex_444[field] == 0;
88                 break;
89         case PixelFormat_8BitYCbCr:
90                 first = userdata->tex_y[field] == 0 || userdata->tex_cbcr[field] == 0;
91                 break;
92         case PixelFormat_8BitBGRA:
93                 first = userdata->tex_rgba[field] == 0;
94                 break;
95         case PixelFormat_8BitYCbCrPlanar:
96                 first = userdata->tex_y[field] == 0 || userdata->tex_cb[field] == 0 || userdata->tex_cr[field] == 0;
97                 break;
98         default:
99                 assert(false);
100         }
101
102         if (first ||
103             width != userdata->last_width[field] ||
104             height != userdata->last_height[field] ||
105             cbcr_width != userdata->last_cbcr_width[field] ||
106             cbcr_height != userdata->last_cbcr_height[field]) {
107                 // We changed resolution since last use of this texture, so we need to create
108                 // a new object. Note that this each card has its own PBOFrameAllocator,
109                 // we don't need to worry about these flip-flopping between resolutions.
110                 switch (userdata->pixel_format) {
111                 case PixelFormat_10BitYCbCr:
112                         glBindTexture(GL_TEXTURE_2D, userdata->tex_444[field]);
113                         check_error();
114                         glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB10_A2, width, height, 0, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, nullptr);
115                         check_error();
116                         break;
117                 case PixelFormat_8BitYCbCr: {
118                         glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]);
119                         check_error();
120                         glTexImage2D(GL_TEXTURE_2D, 0, GL_RG8, cbcr_width, height, 0, GL_RG, GL_UNSIGNED_BYTE, nullptr);
121                         check_error();
122                         glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]);
123                         check_error();
124                         glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
125                         check_error();
126                         break;
127                 }
128                 case PixelFormat_8BitYCbCrPlanar: {
129                         glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]);
130                         check_error();
131                         glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
132                         check_error();
133                         glBindTexture(GL_TEXTURE_2D, userdata->tex_cb[field]);
134                         check_error();
135                         glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, cbcr_width, cbcr_height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
136                         check_error();
137                         glBindTexture(GL_TEXTURE_2D, userdata->tex_cr[field]);
138                         check_error();
139                         glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, cbcr_width, cbcr_height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
140                         check_error();
141                         break;
142                 }
143                 case PixelFormat_8BitBGRA:
144                         glBindTexture(GL_TEXTURE_2D, userdata->tex_rgba[field]);
145                         check_error();
146                         if (global_flags.can_disable_srgb_decoder) {  // See the comments in tweaked_inputs.h.
147                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8_ALPHA8, width, height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, nullptr);
148                         } else {
149                                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, nullptr);
150                         }
151                         check_error();
152                         break;
153                 }
154                 userdata->last_width[field] = width;
155                 userdata->last_height[field] = height;
156                 userdata->last_cbcr_width[field] = cbcr_width;
157                 userdata->last_cbcr_height[field] = cbcr_height;
158         }
159         if (global_flags.ten_bit_input &&
160             (first || v210_width != userdata->last_v210_width[field])) {
161                 // Same as above; we need to recreate the texture.
162                 glBindTexture(GL_TEXTURE_2D, userdata->tex_v210[field]);
163                 check_error();
164                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB10_A2, v210_width, height, 0, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, nullptr);
165                 check_error();
166                 userdata->last_v210_width[field] = v210_width;
167         }
168 }
169
170 void upload_texture(GLuint tex, GLuint width, GLuint height, GLuint stride, bool interlaced_stride, GLenum format, GLenum type, GLintptr offset)
171 {
172         if (interlaced_stride) {
173                 stride *= 2;
174         }
175         if (global_flags.flush_pbos) {
176                 glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, offset, stride * height);
177                 check_error();
178         }
179
180         glBindTexture(GL_TEXTURE_2D, tex);
181         check_error();
182         if (interlaced_stride) {
183                 glPixelStorei(GL_UNPACK_ROW_LENGTH, width * 2);
184                 check_error();
185         } else {
186                 glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
187                 check_error();
188         }
189
190         glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, format, type, BUFFER_OFFSET(offset));
191         check_error();
192         glBindTexture(GL_TEXTURE_2D, 0);
193         check_error();
194         glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
195         check_error();
196 }
197
198 }  // namespace
199
200 void JitterHistory::register_metrics(const vector<pair<string, string>> &labels)
201 {
202         global_metrics.add("input_underestimated_jitter_frames", labels, &metric_input_underestimated_jitter_frames);
203         global_metrics.add("input_estimated_max_jitter_seconds", labels, &metric_input_estimated_max_jitter_seconds, Metrics::TYPE_GAUGE);
204 }
205
206 void JitterHistory::unregister_metrics(const vector<pair<string, string>> &labels)
207 {
208         global_metrics.remove("input_underestimated_jitter_frames", labels);
209         global_metrics.remove("input_estimated_max_jitter_seconds", labels);
210 }
211
212 void JitterHistory::frame_arrived(steady_clock::time_point now, int64_t frame_duration, size_t dropped_frames)
213 {
214         if (expected_timestamp > steady_clock::time_point::min()) {
215                 expected_timestamp += dropped_frames * nanoseconds(frame_duration * 1000000000 / TIMEBASE);
216                 double jitter_seconds = fabs(duration<double>(expected_timestamp - now).count());
217                 history.push_back(orders.insert(jitter_seconds));
218                 if (jitter_seconds > estimate_max_jitter()) {
219                         ++metric_input_underestimated_jitter_frames;
220                 }
221
222                 metric_input_estimated_max_jitter_seconds = estimate_max_jitter();
223
224                 if (history.size() > history_length) {
225                         orders.erase(history.front());
226                         history.pop_front();
227                 }
228                 assert(history.size() <= history_length);
229         }
230         expected_timestamp = now + nanoseconds(frame_duration * 1000000000 / TIMEBASE);
231 }
232
233 double JitterHistory::estimate_max_jitter() const
234 {
235         if (orders.empty()) {
236                 return 0.0;
237         }
238         size_t elem_idx = lrint((orders.size() - 1) * percentile);
239         if (percentile <= 0.5) {
240                 return *next(orders.begin(), elem_idx) * multiplier;
241         } else {
242                 return *prev(orders.end(), elem_idx + 1) * multiplier;
243         }
244 }
245
246 void QueueLengthPolicy::register_metrics(const vector<pair<string, string>> &labels)
247 {
248         global_metrics.add("input_queue_safe_length_frames", labels, &metric_input_queue_safe_length_frames, Metrics::TYPE_GAUGE);
249 }
250
251 void QueueLengthPolicy::unregister_metrics(const vector<pair<string, string>> &labels)
252 {
253         global_metrics.remove("input_queue_safe_length_frames", labels);
254 }
255
256 void QueueLengthPolicy::update_policy(steady_clock::time_point now,
257                                       steady_clock::time_point expected_next_frame,
258                                       int64_t input_frame_duration,
259                                       int64_t master_frame_duration,
260                                       double max_input_card_jitter_seconds,
261                                       double max_master_card_jitter_seconds)
262 {
263         double input_frame_duration_seconds = input_frame_duration / double(TIMEBASE);
264         double master_frame_duration_seconds = master_frame_duration / double(TIMEBASE);
265
266         // Figure out when we can expect the next frame for this card, assuming
267         // worst-case jitter (ie., the frame is maximally late).
268         double seconds_until_next_frame = max(duration<double>(expected_next_frame - now).count() + max_input_card_jitter_seconds, 0.0);
269
270         // How many times are the master card expected to tick in that time?
271         // We assume the master clock has worst-case jitter but not any rate
272         // discrepancy, ie., it ticks as early as possible every time, but not
273         // cumulatively.
274         double frames_needed = (seconds_until_next_frame + max_master_card_jitter_seconds) / master_frame_duration_seconds;
275
276         // As a special case, if the master card ticks faster than the input card,
277         // we expect the queue to drain by itself even without dropping. But if
278         // the difference is small (e.g. 60 Hz master and 59.94 input), it would
279         // go slowly enough that the effect wouldn't really be appreciable.
280         // We account for this by looking at the situation five frames ahead,
281         // assuming everything else is the same.
282         double frames_allowed;
283         if (master_frame_duration < input_frame_duration) {
284                 frames_allowed = frames_needed + 5 * (input_frame_duration_seconds - master_frame_duration_seconds) / master_frame_duration_seconds;
285         } else {
286                 frames_allowed = frames_needed;
287         }
288
289         safe_queue_length = max<int>(floor(frames_allowed), 0);
290         metric_input_queue_safe_length_frames = safe_queue_length;
291 }
292
293 Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
294         : httpd(),
295           num_cards(num_cards),
296           mixer_surface(create_surface(format)),
297           h264_encoder_surface(create_surface(format)),
298           decklink_output_surface(create_surface(format)),
299           audio_mixer(num_cards)
300 {
301         memcpy(ycbcr_interpretation, global_flags.ycbcr_interpretation, sizeof(ycbcr_interpretation));
302         CHECK(init_movit(MOVIT_SHADER_DIR, MOVIT_DEBUG_OFF));
303         check_error();
304
305         // This nearly always should be true.
306         global_flags.can_disable_srgb_decoder =
307                 epoxy_has_gl_extension("GL_EXT_texture_sRGB_decode") &&
308                 epoxy_has_gl_extension("GL_ARB_sampler_objects");
309
310         // Since we allow non-bouncing 4:2:2 YCbCrInputs, effective subpixel precision
311         // will be halved when sampling them, and we need to compensate here.
312         movit_texel_subpixel_precision /= 2.0;
313
314         resource_pool.reset(new ResourcePool);
315         for (unsigned i = 0; i < NUM_OUTPUTS; ++i) {
316                 output_channel[i].parent = this;
317                 output_channel[i].channel = i;
318         }
319
320         ImageFormat inout_format;
321         inout_format.color_space = COLORSPACE_sRGB;
322         inout_format.gamma_curve = GAMMA_sRGB;
323
324         // Matches the 4:2:0 format created by the main chain.
325         YCbCrFormat ycbcr_format;
326         ycbcr_format.chroma_subsampling_x = 2;
327         ycbcr_format.chroma_subsampling_y = 2;
328         if (global_flags.ycbcr_rec709_coefficients) {
329                 ycbcr_format.luma_coefficients = YCBCR_REC_709;
330         } else {
331                 ycbcr_format.luma_coefficients = YCBCR_REC_601;
332         }
333         ycbcr_format.full_range = false;
334         ycbcr_format.num_levels = 1 << global_flags.x264_bit_depth;
335         ycbcr_format.cb_x_position = 0.0f;
336         ycbcr_format.cr_x_position = 0.0f;
337         ycbcr_format.cb_y_position = 0.5f;
338         ycbcr_format.cr_y_position = 0.5f;
339
340         // Display chain; shows the live output produced by the main chain (or rather, a copy of it).
341         display_chain.reset(new EffectChain(global_flags.width, global_flags.height, resource_pool.get()));
342         check_error();
343         GLenum type = global_flags.x264_bit_depth > 8 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_BYTE;
344         display_input = new YCbCrInput(inout_format, ycbcr_format, global_flags.width, global_flags.height, YCBCR_INPUT_SPLIT_Y_AND_CBCR, type);
345         display_chain->add_input(display_input);
346         display_chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
347         display_chain->set_dither_bits(0);  // Don't bother.
348         display_chain->finalize();
349
350         video_encoder.reset(new VideoEncoder(resource_pool.get(), h264_encoder_surface, global_flags.va_display, global_flags.width, global_flags.height, &httpd, global_disk_space_estimator));
351
352         // Must be instantiated after VideoEncoder has initialized global_flags.use_zerocopy.
353         theme.reset(new Theme(global_flags.theme_filename, global_flags.theme_dirs, resource_pool.get(), num_cards));
354
355         // Start listening for clients only once VideoEncoder has written its header, if any.
356         httpd.start(9095);
357
358         // First try initializing the then PCI devices, then USB, then
359         // fill up with fake cards until we have the desired number of cards.
360         unsigned num_pci_devices = 0;
361         unsigned card_index = 0;
362
363         {
364                 IDeckLinkIterator *decklink_iterator = CreateDeckLinkIteratorInstance();
365                 if (decklink_iterator != nullptr) {
366                         for ( ; card_index < num_cards; ++card_index) {
367                                 IDeckLink *decklink;
368                                 if (decklink_iterator->Next(&decklink) != S_OK) {
369                                         break;
370                                 }
371
372                                 DeckLinkCapture *capture = new DeckLinkCapture(decklink, card_index);
373                                 DeckLinkOutput *output = new DeckLinkOutput(resource_pool.get(), decklink_output_surface, global_flags.width, global_flags.height, card_index);
374                                 output->set_device(decklink);
375                                 configure_card(card_index, capture, CardType::LIVE_CARD, output);
376                                 ++num_pci_devices;
377                         }
378                         decklink_iterator->Release();
379                         fprintf(stderr, "Found %u DeckLink PCI card(s).\n", num_pci_devices);
380                 } else {
381                         fprintf(stderr, "DeckLink drivers not found. Probing for USB cards only.\n");
382                 }
383         }
384
385         unsigned num_usb_devices = BMUSBCapture::num_cards();
386         for (unsigned usb_card_index = 0; usb_card_index < num_usb_devices && card_index < num_cards; ++usb_card_index, ++card_index) {
387                 BMUSBCapture *capture = new BMUSBCapture(usb_card_index);
388                 capture->set_card_disconnected_callback(bind(&Mixer::bm_hotplug_remove, this, card_index));
389                 configure_card(card_index, capture, CardType::LIVE_CARD, /*output=*/nullptr);
390         }
391         fprintf(stderr, "Found %u USB card(s).\n", num_usb_devices);
392
393         unsigned num_fake_cards = 0;
394         for ( ; card_index < num_cards; ++card_index, ++num_fake_cards) {
395                 FakeCapture *capture = new FakeCapture(global_flags.width, global_flags.height, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
396                 configure_card(card_index, capture, CardType::FAKE_CAPTURE, /*output=*/nullptr);
397         }
398
399         if (num_fake_cards > 0) {
400                 fprintf(stderr, "Initialized %u fake cards.\n", num_fake_cards);
401         }
402
403         // Initialize all video inputs the theme asked for. Note that these are
404         // all put _after_ the regular cards, which stop at <num_cards> - 1.
405         std::vector<FFmpegCapture *> video_inputs = theme->get_video_inputs();
406         for (unsigned video_card_index = 0; video_card_index < video_inputs.size(); ++card_index, ++video_card_index) {
407                 if (card_index >= MAX_VIDEO_CARDS) {
408                         fprintf(stderr, "ERROR: Not enough card slots available for the videos the theme requested.\n");
409                         exit(1);
410                 }
411                 configure_card(card_index, video_inputs[video_card_index], CardType::FFMPEG_INPUT, /*output=*/nullptr);
412                 video_inputs[video_card_index]->set_card_index(card_index);
413         }
414         num_video_inputs = video_inputs.size();
415
416         BMUSBCapture::set_card_connected_callback(bind(&Mixer::bm_hotplug_add, this, _1));
417         BMUSBCapture::start_bm_thread();
418
419         for (unsigned card_index = 0; card_index < num_cards + num_video_inputs; ++card_index) {
420                 cards[card_index].queue_length_policy.reset(card_index);
421         }
422
423         chroma_subsampler.reset(new ChromaSubsampler(resource_pool.get()));
424
425         if (global_flags.ten_bit_input) {
426                 if (!v210Converter::has_hardware_support()) {
427                         fprintf(stderr, "ERROR: --ten-bit-input requires support for OpenGL compute shaders\n");
428                         fprintf(stderr, "       (OpenGL 4.3, or GL_ARB_compute_shader + GL_ARB_shader_image_load_store).\n");
429                         exit(1);
430                 }
431                 v210_converter.reset(new v210Converter());
432
433                 // These are all the widths listed in the Blackmagic SDK documentation
434                 // (section 2.7.3, “Display Modes”).
435                 v210_converter->precompile_shader(720);
436                 v210_converter->precompile_shader(1280);
437                 v210_converter->precompile_shader(1920);
438                 v210_converter->precompile_shader(2048);
439                 v210_converter->precompile_shader(3840);
440                 v210_converter->precompile_shader(4096);
441         }
442         if (global_flags.ten_bit_output) {
443                 if (!v210Converter::has_hardware_support()) {
444                         fprintf(stderr, "ERROR: --ten-bit-output requires support for OpenGL compute shaders\n");
445                         fprintf(stderr, "       (OpenGL 4.3, or GL_ARB_compute_shader + GL_ARB_shader_image_load_store).\n");
446                         exit(1);
447                 }
448         }
449
450         timecode_renderer.reset(new TimecodeRenderer(resource_pool.get(), global_flags.width, global_flags.height));
451         display_timecode_in_stream = global_flags.display_timecode_in_stream;
452         display_timecode_on_stdout = global_flags.display_timecode_on_stdout;
453
454         if (global_flags.enable_alsa_output) {
455                 alsa.reset(new ALSAOutput(OUTPUT_FREQUENCY, /*num_channels=*/2));
456         }
457         if (global_flags.output_card != -1) {
458                 desired_output_card_index = global_flags.output_card;
459                 set_output_card_internal(global_flags.output_card);
460         }
461
462         output_jitter_history.register_metrics({{ "card", "output" }});
463 }
464
465 Mixer::~Mixer()
466 {
467         BMUSBCapture::stop_bm_thread();
468
469         for (unsigned card_index = 0; card_index < num_cards + num_video_inputs; ++card_index) {
470                 {
471                         unique_lock<mutex> lock(card_mutex);
472                         cards[card_index].should_quit = true;  // Unblock thread.
473                         cards[card_index].new_frames_changed.notify_all();
474                 }
475                 cards[card_index].capture->stop_dequeue_thread();
476                 if (cards[card_index].output) {
477                         cards[card_index].output->end_output();
478                         cards[card_index].output.reset();
479                 }
480         }
481
482         video_encoder.reset(nullptr);
483 }
484
485 void Mixer::configure_card(unsigned card_index, CaptureInterface *capture, CardType card_type, DeckLinkOutput *output)
486 {
487         printf("Configuring card %d...\n", card_index);
488
489         CaptureCard *card = &cards[card_index];
490         if (card->capture != nullptr) {
491                 card->capture->stop_dequeue_thread();
492         }
493         card->capture.reset(capture);
494         card->is_fake_capture = (card_type == CardType::FAKE_CAPTURE);
495         card->type = card_type;
496         if (card->output.get() != output) {
497                 card->output.reset(output);
498         }
499
500         PixelFormat pixel_format;
501         if (card_type == CardType::FFMPEG_INPUT) {
502                 pixel_format = capture->get_current_pixel_format();
503         } else if (global_flags.ten_bit_input) {
504                 pixel_format = PixelFormat_10BitYCbCr;
505         } else {
506                 pixel_format = PixelFormat_8BitYCbCr;
507         }
508
509         card->capture->set_frame_callback(bind(&Mixer::bm_frame, this, card_index, _1, _2, _3, _4, _5, _6, _7));
510         if (card->frame_allocator == nullptr) {
511                 card->frame_allocator.reset(new PBOFrameAllocator(pixel_format, 8 << 20, global_flags.width, global_flags.height));  // 8 MB.
512         }
513         card->capture->set_video_frame_allocator(card->frame_allocator.get());
514         if (card->surface == nullptr) {
515                 card->surface = create_surface_with_same_format(mixer_surface);
516         }
517         while (!card->new_frames.empty()) card->new_frames.pop_front();
518         card->last_timecode = -1;
519         card->capture->set_pixel_format(pixel_format);
520         card->capture->configure_card();
521
522         // NOTE: start_bm_capture() happens in thread_func().
523
524         DeviceSpec device{InputSourceType::CAPTURE_CARD, card_index};
525         audio_mixer.reset_resampler(device);
526         audio_mixer.set_display_name(device, card->capture->get_description());
527         audio_mixer.trigger_state_changed_callback();
528
529         // Unregister old metrics, if any.
530         if (!card->labels.empty()) {
531                 const vector<pair<string, string>> &labels = card->labels;
532                 card->jitter_history.unregister_metrics(labels);
533                 card->queue_length_policy.unregister_metrics(labels);
534                 global_metrics.remove("input_received_frames", labels);
535                 global_metrics.remove("input_dropped_frames_jitter", labels);
536                 global_metrics.remove("input_dropped_frames_error", labels);
537                 global_metrics.remove("input_dropped_frames_resets", labels);
538                 global_metrics.remove("input_queue_length_frames", labels);
539                 global_metrics.remove("input_queue_duped_frames", labels);
540
541                 global_metrics.remove("input_has_signal_bool", labels);
542                 global_metrics.remove("input_is_connected_bool", labels);
543                 global_metrics.remove("input_interlaced_bool", labels);
544                 global_metrics.remove("input_width_pixels", labels);
545                 global_metrics.remove("input_height_pixels", labels);
546                 global_metrics.remove("input_frame_rate_nom", labels);
547                 global_metrics.remove("input_frame_rate_den", labels);
548                 global_metrics.remove("input_sample_rate_hz", labels);
549         }
550
551         // Register metrics.
552         vector<pair<string, string>> labels;
553         char card_name[64];
554         snprintf(card_name, sizeof(card_name), "%d", card_index);
555         labels.emplace_back("card", card_name);
556
557         switch (card_type) {
558         case CardType::LIVE_CARD:
559                 labels.emplace_back("cardtype", "live");
560                 break;
561         case CardType::FAKE_CAPTURE:
562                 labels.emplace_back("cardtype", "fake");
563                 break;
564         case CardType::FFMPEG_INPUT:
565                 labels.emplace_back("cardtype", "ffmpeg");
566                 break;
567         default:
568                 assert(false);
569         }
570         card->jitter_history.register_metrics(labels);
571         card->queue_length_policy.register_metrics(labels);
572         global_metrics.add("input_received_frames", labels, &card->metric_input_received_frames);
573         global_metrics.add("input_dropped_frames_jitter", labels, &card->metric_input_dropped_frames_jitter);
574         global_metrics.add("input_dropped_frames_error", labels, &card->metric_input_dropped_frames_error);
575         global_metrics.add("input_dropped_frames_resets", labels, &card->metric_input_resets);
576         global_metrics.add("input_queue_length_frames", labels, &card->metric_input_queue_length_frames, Metrics::TYPE_GAUGE);
577         global_metrics.add("input_queue_duped_frames", labels, &card->metric_input_duped_frames);
578
579         global_metrics.add("input_has_signal_bool", labels, &card->metric_input_has_signal_bool, Metrics::TYPE_GAUGE);
580         global_metrics.add("input_is_connected_bool", labels, &card->metric_input_is_connected_bool, Metrics::TYPE_GAUGE);
581         global_metrics.add("input_interlaced_bool", labels, &card->metric_input_interlaced_bool, Metrics::TYPE_GAUGE);
582         global_metrics.add("input_width_pixels", labels, &card->metric_input_width_pixels, Metrics::TYPE_GAUGE);
583         global_metrics.add("input_height_pixels", labels, &card->metric_input_height_pixels, Metrics::TYPE_GAUGE);
584         global_metrics.add("input_frame_rate_nom", labels, &card->metric_input_frame_rate_nom, Metrics::TYPE_GAUGE);
585         global_metrics.add("input_frame_rate_den", labels, &card->metric_input_frame_rate_den, Metrics::TYPE_GAUGE);
586         global_metrics.add("input_sample_rate_hz", labels, &card->metric_input_sample_rate_hz, Metrics::TYPE_GAUGE);
587         card->labels = labels;
588 }
589
590 void Mixer::set_output_card_internal(int card_index)
591 {
592         // We don't really need to take card_mutex, since we're in the mixer
593         // thread and don't mess with any queues (which is the only thing that happens
594         // from other threads), but it's probably the safest in the long run.
595         unique_lock<mutex> lock(card_mutex);
596         if (output_card_index != -1) {
597                 // Switch the old card from output to input.
598                 CaptureCard *old_card = &cards[output_card_index];
599                 old_card->output->end_output();
600
601                 // Stop the fake card that we put into place.
602                 // This needs to _not_ happen under the mutex, to avoid deadlock
603                 // (delivering the last frame needs to take the mutex).
604                 CaptureInterface *fake_capture = old_card->capture.get();
605                 lock.unlock();
606                 fake_capture->stop_dequeue_thread();
607                 lock.lock();
608                 old_card->capture = move(old_card->parked_capture);  // TODO: reset the metrics
609                 old_card->is_fake_capture = false;
610                 old_card->capture->start_bm_capture();
611         }
612         if (card_index != -1) {
613                 CaptureCard *card = &cards[card_index];
614                 CaptureInterface *capture = card->capture.get();
615                 // TODO: DeckLinkCapture::stop_dequeue_thread can actually take
616                 // several seconds to complete (blocking on DisableVideoInput);
617                 // see if we can maybe do it asynchronously.
618                 lock.unlock();
619                 capture->stop_dequeue_thread();
620                 lock.lock();
621                 card->parked_capture = move(card->capture);
622                 CaptureInterface *fake_capture = new FakeCapture(global_flags.width, global_flags.height, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
623                 configure_card(card_index, fake_capture, CardType::FAKE_CAPTURE, card->output.release());
624                 card->queue_length_policy.reset(card_index);
625                 card->capture->start_bm_capture();
626                 desired_output_video_mode = output_video_mode = card->output->pick_video_mode(desired_output_video_mode);
627                 card->output->start_output(desired_output_video_mode, pts_int);
628         }
629         output_card_index = card_index;
630         output_jitter_history.clear();
631 }
632
633 namespace {
634
635 int unwrap_timecode(uint16_t current_wrapped, int last)
636 {
637         uint16_t last_wrapped = last & 0xffff;
638         if (current_wrapped > last_wrapped) {
639                 return (last & ~0xffff) | current_wrapped;
640         } else {
641                 return 0x10000 + ((last & ~0xffff) | current_wrapped);
642         }
643 }
644
645 }  // namespace
646
647 void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
648                      FrameAllocator::Frame video_frame, size_t video_offset, VideoFormat video_format,
649                      FrameAllocator::Frame audio_frame, size_t audio_offset, AudioFormat audio_format)
650 {
651         DeviceSpec device{InputSourceType::CAPTURE_CARD, card_index};
652         CaptureCard *card = &cards[card_index];
653
654         ++card->metric_input_received_frames;
655         card->metric_input_has_signal_bool = video_format.has_signal;
656         card->metric_input_is_connected_bool = video_format.is_connected;
657         card->metric_input_interlaced_bool = video_format.interlaced;
658         card->metric_input_width_pixels = video_format.width;
659         card->metric_input_height_pixels = video_format.height;
660         card->metric_input_frame_rate_nom = video_format.frame_rate_nom;
661         card->metric_input_frame_rate_den = video_format.frame_rate_den;
662         card->metric_input_sample_rate_hz = audio_format.sample_rate;
663
664         if (is_mode_scanning[card_index]) {
665                 if (video_format.has_signal) {
666                         // Found a stable signal, so stop scanning.
667                         is_mode_scanning[card_index] = false;
668                 } else {
669                         static constexpr double switch_time_s = 0.1;  // Should be enough time for the signal to stabilize.
670                         steady_clock::time_point now = steady_clock::now();
671                         double sec_since_last_switch = duration<double>(steady_clock::now() - last_mode_scan_change[card_index]).count();
672                         if (sec_since_last_switch > switch_time_s) {
673                                 // It isn't this mode; try the next one.
674                                 mode_scanlist_index[card_index]++;
675                                 mode_scanlist_index[card_index] %= mode_scanlist[card_index].size();
676                                 cards[card_index].capture->set_video_mode(mode_scanlist[card_index][mode_scanlist_index[card_index]]);
677                                 last_mode_scan_change[card_index] = now;
678                         }
679                 }
680         }
681
682         int64_t frame_length = int64_t(TIMEBASE) * video_format.frame_rate_den / video_format.frame_rate_nom;
683         assert(frame_length > 0);
684
685         size_t num_samples = (audio_frame.len > audio_offset) ? (audio_frame.len - audio_offset) / audio_format.num_channels / (audio_format.bits_per_sample / 8) : 0;
686         if (num_samples > OUTPUT_FREQUENCY / 10) {
687                 printf("Card %d: Dropping frame with implausible audio length (len=%d, offset=%d) [timecode=0x%04x video_len=%d video_offset=%d video_format=%x)\n",
688                         card_index, int(audio_frame.len), int(audio_offset),
689                         timecode, int(video_frame.len), int(video_offset), video_format.id);
690                 if (video_frame.owner) {
691                         video_frame.owner->release_frame(video_frame);
692                 }
693                 if (audio_frame.owner) {
694                         audio_frame.owner->release_frame(audio_frame);
695                 }
696                 return;
697         }
698
699         int dropped_frames = 0;
700         if (card->last_timecode != -1) {
701                 dropped_frames = unwrap_timecode(timecode, card->last_timecode) - card->last_timecode - 1;
702         }
703
704         // Number of samples per frame if we need to insert silence.
705         // (Could be nonintegral, but resampling will save us then.)
706         const int silence_samples = OUTPUT_FREQUENCY * video_format.frame_rate_den / video_format.frame_rate_nom;
707
708         if (dropped_frames > MAX_FPS * 2) {
709                 fprintf(stderr, "Card %d lost more than two seconds (or time code jumping around; from 0x%04x to 0x%04x), resetting resampler\n",
710                         card_index, card->last_timecode, timecode);
711                 audio_mixer.reset_resampler(device);
712                 dropped_frames = 0;
713                 ++card->metric_input_resets;
714         } else if (dropped_frames > 0) {
715                 // Insert silence as needed.
716                 fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
717                         card_index, dropped_frames, timecode);
718                 card->metric_input_dropped_frames_error += dropped_frames;
719
720                 bool success;
721                 do {
722                         success = audio_mixer.add_silence(device, silence_samples, dropped_frames, frame_length);
723                 } while (!success);
724         }
725
726         if (num_samples > 0) {
727                 audio_mixer.add_audio(device, audio_frame.data + audio_offset, num_samples, audio_format, frame_length, audio_frame.received_timestamp);
728         }
729
730         // Done with the audio, so release it.
731         if (audio_frame.owner) {
732                 audio_frame.owner->release_frame(audio_frame);
733         }
734
735         card->last_timecode = timecode;
736
737         PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)video_frame.userdata;
738
739         size_t cbcr_width, cbcr_height, cbcr_offset, y_offset;
740         size_t expected_length = video_format.stride * (video_format.height + video_format.extra_lines_top + video_format.extra_lines_bottom);
741         if (userdata != nullptr && userdata->pixel_format == PixelFormat_8BitYCbCrPlanar) {
742                 // The calculation above is wrong for planar Y'CbCr, so just override it.
743                 assert(card->type == CardType::FFMPEG_INPUT);
744                 assert(video_offset == 0);
745                 expected_length = video_frame.len;
746
747                 userdata->ycbcr_format = (static_cast<FFmpegCapture *>(card->capture.get()))->get_current_frame_ycbcr_format();
748                 cbcr_width = video_format.width / userdata->ycbcr_format.chroma_subsampling_x;
749                 cbcr_height = video_format.height / userdata->ycbcr_format.chroma_subsampling_y;
750                 cbcr_offset = video_format.width * video_format.height;
751                 y_offset = 0;
752         } else {
753                 // All the other Y'CbCr formats are 4:2:2.
754                 cbcr_width = video_format.width / 2;
755                 cbcr_height = video_format.height;
756                 cbcr_offset = video_offset / 2;
757                 y_offset = video_frame.size / 2 + video_offset / 2;
758         }
759         if (video_frame.len - video_offset == 0 ||
760             video_frame.len - video_offset != expected_length) {
761                 if (video_frame.len != 0) {
762                         printf("Card %d: Dropping video frame with wrong length (%ld; expected %ld)\n",
763                                 card_index, video_frame.len - video_offset, expected_length);
764                 }
765                 if (video_frame.owner) {
766                         video_frame.owner->release_frame(video_frame);
767                 }
768
769                 // Still send on the information that we _had_ a frame, even though it's corrupted,
770                 // so that pts can go up accordingly.
771                 {
772                         unique_lock<mutex> lock(card_mutex);
773                         CaptureCard::NewFrame new_frame;
774                         new_frame.frame = RefCountedFrame(FrameAllocator::Frame());
775                         new_frame.length = frame_length;
776                         new_frame.interlaced = false;
777                         new_frame.dropped_frames = dropped_frames;
778                         new_frame.received_timestamp = video_frame.received_timestamp;
779                         card->new_frames.push_back(move(new_frame));
780                         card->jitter_history.frame_arrived(video_frame.received_timestamp, frame_length, dropped_frames);
781                 }
782                 card->new_frames_changed.notify_all();
783                 return;
784         }
785
786         unsigned num_fields = video_format.interlaced ? 2 : 1;
787         steady_clock::time_point frame_upload_start;
788         bool interlaced_stride = false;
789         if (video_format.interlaced) {
790                 // Send the two fields along as separate frames; the other side will need to add
791                 // a deinterlacer to actually get this right.
792                 assert(video_format.height % 2 == 0);
793                 video_format.height /= 2;
794                 cbcr_height /= 2;
795                 assert(frame_length % 2 == 0);
796                 frame_length /= 2;
797                 num_fields = 2;
798                 if (video_format.second_field_start == 1) {
799                         interlaced_stride = true;
800                 }
801                 frame_upload_start = steady_clock::now();
802         }
803         userdata->last_interlaced = video_format.interlaced;
804         userdata->last_has_signal = video_format.has_signal;
805         userdata->last_is_connected = video_format.is_connected;
806         userdata->last_frame_rate_nom = video_format.frame_rate_nom;
807         userdata->last_frame_rate_den = video_format.frame_rate_den;
808         RefCountedFrame frame(video_frame);
809
810         // Upload the textures.
811         for (unsigned field = 0; field < num_fields; ++field) {
812                 // Put the actual texture upload in a lambda that is executed in the main thread.
813                 // It is entirely possible to do this in the same thread (and it might even be
814                 // faster, depending on the GPU and driver), but it appears to be trickling
815                 // driver bugs very easily.
816                 //
817                 // Note that this means we must hold on to the actual frame data in <userdata>
818                 // until the upload command is run, but we hold on to <frame> much longer than that
819                 // (in fact, all the way until we no longer use the texture in rendering).
820                 auto upload_func = [this, field, video_format, y_offset, video_offset, cbcr_offset, cbcr_width, cbcr_height, interlaced_stride, userdata]() {
821                         unsigned field_start_line;
822                         if (field == 1) {
823                                 field_start_line = video_format.second_field_start;
824                         } else {
825                                 field_start_line = video_format.extra_lines_top;
826                         }
827
828                         // For anything not FRAME_FORMAT_YCBCR_10BIT, v210_width will be nonsensical but not used.
829                         size_t v210_width = video_format.stride / sizeof(uint32_t);
830                         ensure_texture_resolution(userdata, field, video_format.width, video_format.height, cbcr_width, cbcr_height, v210_width);
831
832                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, userdata->pbo);
833                         check_error();
834
835                         switch (userdata->pixel_format) {
836                         case PixelFormat_10BitYCbCr: {
837                                 size_t field_start = video_offset + video_format.stride * field_start_line;
838                                 upload_texture(userdata->tex_v210[field], v210_width, video_format.height, video_format.stride, interlaced_stride, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, field_start);
839                                 v210_converter->convert(userdata->tex_v210[field], userdata->tex_444[field], video_format.width, video_format.height);
840                                 break;
841                         }
842                         case PixelFormat_8BitYCbCr: {
843                                 size_t field_y_start = y_offset + video_format.width * field_start_line;
844                                 size_t field_cbcr_start = cbcr_offset + cbcr_width * field_start_line * sizeof(uint16_t);
845
846                                 // Make up our own strides, since we are interleaving.
847                                 upload_texture(userdata->tex_y[field], video_format.width, video_format.height, video_format.width, interlaced_stride, GL_RED, GL_UNSIGNED_BYTE, field_y_start);
848                                 upload_texture(userdata->tex_cbcr[field], cbcr_width, cbcr_height, cbcr_width * sizeof(uint16_t), interlaced_stride, GL_RG, GL_UNSIGNED_BYTE, field_cbcr_start);
849                                 break;
850                         }
851                         case PixelFormat_8BitYCbCrPlanar: {
852                                 assert(field_start_line == 0);  // We don't really support interlaced here.
853                                 size_t field_y_start = y_offset;
854                                 size_t field_cb_start = cbcr_offset;
855                                 size_t field_cr_start = cbcr_offset + cbcr_width * cbcr_height;
856
857                                 // Make up our own strides, since we are interleaving.
858                                 upload_texture(userdata->tex_y[field], video_format.width, video_format.height, video_format.width, interlaced_stride, GL_RED, GL_UNSIGNED_BYTE, field_y_start);
859                                 upload_texture(userdata->tex_cb[field], cbcr_width, cbcr_height, cbcr_width, interlaced_stride, GL_RED, GL_UNSIGNED_BYTE, field_cb_start);
860                                 upload_texture(userdata->tex_cr[field], cbcr_width, cbcr_height, cbcr_width, interlaced_stride, GL_RED, GL_UNSIGNED_BYTE, field_cr_start);
861                                 break;
862                         }
863                         case PixelFormat_8BitBGRA: {
864                                 size_t field_start = video_offset + video_format.stride * field_start_line;
865                                 upload_texture(userdata->tex_rgba[field], video_format.width, video_format.height, video_format.stride, interlaced_stride, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, field_start);
866                                 // These could be asked to deliver mipmaps at any time.
867                                 glBindTexture(GL_TEXTURE_2D, userdata->tex_rgba[field]);
868                                 check_error();
869                                 glGenerateMipmap(GL_TEXTURE_2D);
870                                 check_error();
871                                 glBindTexture(GL_TEXTURE_2D, 0);
872                                 check_error();
873                                 break;
874                         }
875                         default:
876                                 assert(false);
877                         }
878
879                         glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
880                         check_error();
881                 };
882
883                 if (field == 1) {
884                         // Don't upload the second field as fast as we can; wait until
885                         // the field time has approximately passed. (Otherwise, we could
886                         // get timing jitter against the other sources, and possibly also
887                         // against the video display, although the latter is not as critical.)
888                         // This requires our system clock to be reasonably close to the
889                         // video clock, but that's not an unreasonable assumption.
890                         steady_clock::time_point second_field_start = frame_upload_start +
891                                 nanoseconds(frame_length * 1000000000 / TIMEBASE);
892                         this_thread::sleep_until(second_field_start);
893                 }
894
895                 {
896                         unique_lock<mutex> lock(card_mutex);
897                         CaptureCard::NewFrame new_frame;
898                         new_frame.frame = frame;
899                         new_frame.length = frame_length;
900                         new_frame.field = field;
901                         new_frame.interlaced = video_format.interlaced;
902                         new_frame.upload_func = upload_func;
903                         new_frame.dropped_frames = dropped_frames;
904                         new_frame.received_timestamp = video_frame.received_timestamp;  // Ignore the audio timestamp.
905                         card->new_frames.push_back(move(new_frame));
906                         card->jitter_history.frame_arrived(video_frame.received_timestamp, frame_length, dropped_frames);
907                 }
908                 card->new_frames_changed.notify_all();
909         }
910 }
911
912 void Mixer::bm_hotplug_add(libusb_device *dev)
913 {
914         lock_guard<mutex> lock(hotplug_mutex);
915         hotplugged_cards.push_back(dev);
916 }
917
918 void Mixer::bm_hotplug_remove(unsigned card_index)
919 {
920         cards[card_index].new_frames_changed.notify_all();
921 }
922
923 void Mixer::thread_func()
924 {
925         pthread_setname_np(pthread_self(), "Mixer_OpenGL");
926
927         eglBindAPI(EGL_OPENGL_API);
928         QOpenGLContext *context = create_context(mixer_surface);
929         if (!make_current(context, mixer_surface)) {
930                 printf("oops\n");
931                 exit(1);
932         }
933
934         // Start the actual capture. (We don't want to do it before we're actually ready
935         // to process output frames.)
936         for (unsigned card_index = 0; card_index < num_cards + num_video_inputs; ++card_index) {
937                 if (int(card_index) != output_card_index) {
938                         cards[card_index].capture->start_bm_capture();
939                 }
940         }
941
942         BasicStats basic_stats(/*verbose=*/true);
943         int stats_dropped_frames = 0;
944
945         while (!should_quit) {
946                 if (desired_output_card_index != output_card_index) {
947                         set_output_card_internal(desired_output_card_index);
948                 }
949                 if (output_card_index != -1 &&
950                     desired_output_video_mode != output_video_mode) {
951                         DeckLinkOutput *output = cards[output_card_index].output.get();
952                         output->end_output();
953                         desired_output_video_mode = output_video_mode = output->pick_video_mode(desired_output_video_mode);
954                         output->start_output(desired_output_video_mode, pts_int);
955                 }
956
957                 CaptureCard::NewFrame new_frames[MAX_VIDEO_CARDS];
958                 bool has_new_frame[MAX_VIDEO_CARDS] = { false };
959
960                 bool master_card_is_output;
961                 unsigned master_card_index;
962                 if (output_card_index != -1) {
963                         master_card_is_output = true;
964                         master_card_index = output_card_index;
965                 } else {
966                         master_card_is_output = false;
967                         master_card_index = theme->map_signal(master_clock_channel);
968                         assert(master_card_index < num_cards);
969                 }
970
971                 OutputFrameInfo output_frame_info = get_one_frame_from_each_card(master_card_index, master_card_is_output, new_frames, has_new_frame);
972                 schedule_audio_resampling_tasks(output_frame_info.dropped_frames, output_frame_info.num_samples, output_frame_info.frame_duration, output_frame_info.is_preroll, output_frame_info.frame_timestamp);
973                 stats_dropped_frames += output_frame_info.dropped_frames;
974
975                 handle_hotplugged_cards();
976
977                 for (unsigned card_index = 0; card_index < num_cards + num_video_inputs; ++card_index) {
978                         if (card_index == master_card_index || !has_new_frame[card_index]) {
979                                 continue;
980                         }
981                         if (new_frames[card_index].frame->len == 0) {
982                                 ++new_frames[card_index].dropped_frames;
983                         }
984                         if (new_frames[card_index].dropped_frames > 0) {
985                                 printf("Card %u dropped %d frames before this\n",
986                                         card_index, int(new_frames[card_index].dropped_frames));
987                         }
988                 }
989
990                 // If the first card is reporting a corrupted or otherwise dropped frame,
991                 // just increase the pts (skipping over this frame) and don't try to compute anything new.
992                 if (!master_card_is_output && new_frames[master_card_index].frame->len == 0) {
993                         ++stats_dropped_frames;
994                         pts_int += new_frames[master_card_index].length;
995                         continue;
996                 }
997
998                 for (unsigned card_index = 0; card_index < num_cards + num_video_inputs; ++card_index) {
999                         if (!has_new_frame[card_index] || new_frames[card_index].frame->len == 0)
1000                                 continue;
1001
1002                         CaptureCard::NewFrame *new_frame = &new_frames[card_index];
1003                         assert(new_frame->frame != nullptr);
1004                         insert_new_frame(new_frame->frame, new_frame->field, new_frame->interlaced, card_index, &input_state);
1005                         check_error();
1006
1007                         // The new texture might need uploading before use.
1008                         if (new_frame->upload_func) {
1009                                 new_frame->upload_func();
1010                                 new_frame->upload_func = nullptr;
1011                         }
1012                 }
1013
1014                 int64_t frame_duration = output_frame_info.frame_duration;
1015                 render_one_frame(frame_duration);
1016                 ++frame_num;
1017                 pts_int += frame_duration;
1018
1019                 basic_stats.update(frame_num, stats_dropped_frames);
1020                 // if (frame_num % 100 == 0) chain->print_phase_timing();
1021
1022                 if (should_cut.exchange(false)) {  // Test and clear.
1023                         video_encoder->do_cut(frame_num);
1024                 }
1025
1026 #if 0
1027                 // Reset every 100 frames, so that local variations in frame times
1028                 // (especially for the first few frames, when the shaders are
1029                 // compiled etc.) don't make it hard to measure for the entire
1030                 // remaining duration of the program.
1031                 if (frame == 10000) {
1032                         frame = 0;
1033                         start = now;
1034                 }
1035 #endif
1036                 check_error();
1037         }
1038
1039         resource_pool->clean_context();
1040 }
1041
1042 bool Mixer::input_card_is_master_clock(unsigned card_index, unsigned master_card_index) const
1043 {
1044         if (output_card_index != -1) {
1045                 // The output card (ie., cards[output_card_index].output) is the master clock,
1046                 // so no input card (ie., cards[card_index].capture) is.
1047                 return false;
1048         }
1049         return (card_index == master_card_index);
1050 }
1051
1052 void Mixer::trim_queue(CaptureCard *card, size_t safe_queue_length)
1053 {
1054         // Count the number of frames in the queue, including any frames
1055         // we dropped. It's hard to know exactly how we should deal with
1056         // dropped (corrupted) input frames; they don't help our goal of
1057         // avoiding starvation, but they still add to the problem of latency.
1058         // Since dropped frames is going to mean a bump in the signal anyway,
1059         // we err on the side of having more stable latency instead.
1060         unsigned queue_length = 0;
1061         for (const CaptureCard::NewFrame &frame : card->new_frames) {
1062                 queue_length += frame.dropped_frames + 1;
1063         }
1064
1065         // If needed, drop frames until the queue is below the safe limit.
1066         // We prefer to drop from the head, because all else being equal,
1067         // we'd like more recent frames (less latency).
1068         unsigned dropped_frames = 0;
1069         while (queue_length > safe_queue_length) {
1070                 assert(!card->new_frames.empty());
1071                 assert(queue_length > card->new_frames.front().dropped_frames);
1072                 queue_length -= card->new_frames.front().dropped_frames;
1073
1074                 if (queue_length <= safe_queue_length) {
1075                         // No need to drop anything.
1076                         break;
1077                 }
1078
1079                 card->new_frames.pop_front();
1080                 card->new_frames_changed.notify_all();
1081                 --queue_length;
1082                 ++dropped_frames;
1083         }
1084
1085         card->metric_input_dropped_frames_jitter += dropped_frames;
1086         card->metric_input_queue_length_frames = queue_length;
1087
1088 #if 0
1089         if (dropped_frames > 0) {
1090                 fprintf(stderr, "Card %u dropped %u frame(s) to keep latency down.\n",
1091                         card_index, dropped_frames);
1092         }
1093 #endif
1094 }
1095
1096
1097 Mixer::OutputFrameInfo Mixer::get_one_frame_from_each_card(unsigned master_card_index, bool master_card_is_output, CaptureCard::NewFrame new_frames[MAX_VIDEO_CARDS], bool has_new_frame[MAX_VIDEO_CARDS])
1098 {
1099         OutputFrameInfo output_frame_info;
1100 start:
1101         unique_lock<mutex> lock(card_mutex, defer_lock);
1102         if (master_card_is_output) {
1103                 // Clocked to the output, so wait for it to be ready for the next frame.
1104                 cards[master_card_index].output->wait_for_frame(pts_int, &output_frame_info.dropped_frames, &output_frame_info.frame_duration, &output_frame_info.is_preroll, &output_frame_info.frame_timestamp);
1105                 lock.lock();
1106         } else {
1107                 // Wait for the master card to have a new frame.
1108                 // TODO: Add a timeout.
1109                 output_frame_info.is_preroll = false;
1110                 lock.lock();
1111                 cards[master_card_index].new_frames_changed.wait(lock, [this, master_card_index]{ return !cards[master_card_index].new_frames.empty() || cards[master_card_index].capture->get_disconnected(); });
1112         }
1113
1114         if (master_card_is_output) {
1115                 handle_hotplugged_cards();
1116         } else if (cards[master_card_index].new_frames.empty()) {
1117                 // We were woken up, but not due to a new frame. Deal with it
1118                 // and then restart.
1119                 assert(cards[master_card_index].capture->get_disconnected());
1120                 handle_hotplugged_cards();
1121                 lock.unlock();
1122                 goto start;
1123         }
1124
1125         for (unsigned card_index = 0; card_index < num_cards + num_video_inputs; ++card_index) {
1126                 CaptureCard *card = &cards[card_index];
1127                 if (card->new_frames.empty()) {  // Starvation.
1128                         ++card->metric_input_duped_frames;
1129                 } else {
1130                         new_frames[card_index] = move(card->new_frames.front());
1131                         has_new_frame[card_index] = true;
1132                         card->new_frames.pop_front();
1133                         card->new_frames_changed.notify_all();
1134                 }
1135         }
1136
1137         if (!master_card_is_output) {
1138                 output_frame_info.frame_timestamp = new_frames[master_card_index].received_timestamp;
1139                 output_frame_info.dropped_frames = new_frames[master_card_index].dropped_frames;
1140                 output_frame_info.frame_duration = new_frames[master_card_index].length;
1141         }
1142
1143         if (!output_frame_info.is_preroll) {
1144                 output_jitter_history.frame_arrived(output_frame_info.frame_timestamp, output_frame_info.frame_duration, output_frame_info.dropped_frames);
1145         }
1146
1147         for (unsigned card_index = 0; card_index < num_cards + num_video_inputs; ++card_index) {
1148                 CaptureCard *card = &cards[card_index];
1149                 if (has_new_frame[card_index] &&
1150                     !input_card_is_master_clock(card_index, master_card_index) &&
1151                     !output_frame_info.is_preroll) {
1152                         card->queue_length_policy.update_policy(
1153                                 output_frame_info.frame_timestamp,
1154                                 card->jitter_history.get_expected_next_frame(),
1155                                 new_frames[master_card_index].length,
1156                                 output_frame_info.frame_duration,
1157                                 card->jitter_history.estimate_max_jitter(),
1158                                 output_jitter_history.estimate_max_jitter());
1159                         trim_queue(card, min<int>(global_flags.max_input_queue_frames,
1160                                                   card->queue_length_policy.get_safe_queue_length()));
1161                 }
1162         }
1163
1164         // This might get off by a fractional sample when changing master card
1165         // between ones with different frame rates, but that's fine.
1166         int num_samples_times_timebase = OUTPUT_FREQUENCY * output_frame_info.frame_duration + fractional_samples;
1167         output_frame_info.num_samples = num_samples_times_timebase / TIMEBASE;
1168         fractional_samples = num_samples_times_timebase % TIMEBASE;
1169         assert(output_frame_info.num_samples >= 0);
1170
1171         return output_frame_info;
1172 }
1173
1174 void Mixer::handle_hotplugged_cards()
1175 {
1176         // Check for cards that have been disconnected since last frame.
1177         for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
1178                 CaptureCard *card = &cards[card_index];
1179                 if (card->capture->get_disconnected()) {
1180                         fprintf(stderr, "Card %u went away, replacing with a fake card.\n", card_index);
1181                         FakeCapture *capture = new FakeCapture(global_flags.width, global_flags.height, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
1182                         configure_card(card_index, capture, CardType::FAKE_CAPTURE, /*output=*/nullptr);
1183                         card->queue_length_policy.reset(card_index);
1184                         card->capture->start_bm_capture();
1185                 }
1186         }
1187
1188         // Check for cards that have been connected since last frame.
1189         vector<libusb_device *> hotplugged_cards_copy;
1190         {
1191                 lock_guard<mutex> lock(hotplug_mutex);
1192                 swap(hotplugged_cards, hotplugged_cards_copy);
1193         }
1194         for (libusb_device *new_dev : hotplugged_cards_copy) {
1195                 // Look for a fake capture card where we can stick this in.
1196                 int free_card_index = -1;
1197                 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
1198                         if (cards[card_index].is_fake_capture) {
1199                                 free_card_index = card_index;
1200                                 break;
1201                         }
1202                 }
1203
1204                 if (free_card_index == -1) {
1205                         fprintf(stderr, "New card plugged in, but no free slots -- ignoring.\n");
1206                         libusb_unref_device(new_dev);
1207                 } else {
1208                         // BMUSBCapture takes ownership.
1209                         fprintf(stderr, "New card plugged in, choosing slot %d.\n", free_card_index);
1210                         CaptureCard *card = &cards[free_card_index];
1211                         BMUSBCapture *capture = new BMUSBCapture(free_card_index, new_dev);
1212                         configure_card(free_card_index, capture, CardType::LIVE_CARD, /*output=*/nullptr);
1213                         card->queue_length_policy.reset(free_card_index);
1214                         capture->set_card_disconnected_callback(bind(&Mixer::bm_hotplug_remove, this, free_card_index));
1215                         capture->start_bm_capture();
1216                 }
1217         }
1218 }
1219
1220
1221 void Mixer::schedule_audio_resampling_tasks(unsigned dropped_frames, int num_samples_per_frame, int length_per_frame, bool is_preroll, steady_clock::time_point frame_timestamp)
1222 {
1223         // Resample the audio as needed, including from previously dropped frames.
1224         assert(num_cards > 0);
1225         for (unsigned frame_num = 0; frame_num < dropped_frames + 1; ++frame_num) {
1226                 const bool dropped_frame = (frame_num != dropped_frames);
1227                 {
1228                         // Signal to the audio thread to process this frame.
1229                         // Note that if the frame is a dropped frame, we signal that
1230                         // we don't want to use this frame as base for adjusting
1231                         // the resampler rate. The reason for this is that the timing
1232                         // of these frames is often way too late; they typically don't
1233                         // “arrive” before we synthesize them. Thus, we could end up
1234                         // in a situation where we have inserted e.g. five audio frames
1235                         // into the queue before we then start pulling five of them
1236                         // back out. This makes ResamplingQueue overestimate the delay,
1237                         // causing undue resampler changes. (We _do_ use the last,
1238                         // non-dropped frame; perhaps we should just discard that as well,
1239                         // since dropped frames are expected to be rare, and it might be
1240                         // better to just wait until we have a slightly more normal situation).
1241                         unique_lock<mutex> lock(audio_mutex);
1242                         bool adjust_rate = !dropped_frame && !is_preroll;
1243                         audio_task_queue.push(AudioTask{pts_int, num_samples_per_frame, adjust_rate, frame_timestamp});
1244                         audio_task_queue_changed.notify_one();
1245                 }
1246                 if (dropped_frame) {
1247                         // For dropped frames, increase the pts. Note that if the format changed
1248                         // in the meantime, we have no way of detecting that; we just have to
1249                         // assume the frame length is always the same.
1250                         pts_int += length_per_frame;
1251                 }
1252         }
1253 }
1254
1255 void Mixer::render_one_frame(int64_t duration)
1256 {
1257         // Determine the time code for this frame before we start rendering.
1258         string timecode_text = timecode_renderer->get_timecode_text(double(pts_int) / TIMEBASE, frame_num);
1259         if (display_timecode_on_stdout) {
1260                 printf("Timecode: '%s'\n", timecode_text.c_str());
1261         }
1262
1263         // Update Y'CbCr settings for all cards.
1264         {
1265                 unique_lock<mutex> lock(card_mutex);
1266                 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
1267                         YCbCrInterpretation *interpretation = &ycbcr_interpretation[card_index];
1268                         input_state.ycbcr_coefficients_auto[card_index] = interpretation->ycbcr_coefficients_auto;
1269                         input_state.ycbcr_coefficients[card_index] = interpretation->ycbcr_coefficients;
1270                         input_state.full_range[card_index] = interpretation->full_range;
1271                 }
1272         }
1273
1274         // Get the main chain from the theme, and set its state immediately.
1275         Theme::Chain theme_main_chain = theme->get_chain(0, pts(), global_flags.width, global_flags.height, input_state);
1276         EffectChain *chain = theme_main_chain.chain;
1277         theme_main_chain.setup_chain();
1278         //theme_main_chain.chain->enable_phase_timing(true);
1279
1280         // The theme can't (or at least shouldn't!) call connect_signal() on
1281         // each FFmpeg input, so we'll do it here.
1282         for (const pair<LiveInputWrapper *, FFmpegCapture *> &conn : theme->get_signal_connections()) {
1283                 conn.first->connect_signal_raw(conn.second->get_card_index(), input_state);
1284         }
1285
1286         // If HDMI/SDI output is active and the user has requested auto mode,
1287         // its mode overrides the existing Y'CbCr setting for the chain.
1288         YCbCrLumaCoefficients ycbcr_output_coefficients;
1289         if (global_flags.ycbcr_auto_coefficients && output_card_index != -1) {
1290                 ycbcr_output_coefficients = cards[output_card_index].output->preferred_ycbcr_coefficients();
1291         } else {
1292                 ycbcr_output_coefficients = global_flags.ycbcr_rec709_coefficients ? YCBCR_REC_709 : YCBCR_REC_601;
1293         }
1294
1295         // TODO: Reduce the duplication against theme.cpp.
1296         YCbCrFormat output_ycbcr_format;
1297         output_ycbcr_format.chroma_subsampling_x = 1;
1298         output_ycbcr_format.chroma_subsampling_y = 1;
1299         output_ycbcr_format.luma_coefficients = ycbcr_output_coefficients;
1300         output_ycbcr_format.full_range = false;
1301         output_ycbcr_format.num_levels = 1 << global_flags.x264_bit_depth;
1302         chain->change_ycbcr_output_format(output_ycbcr_format);
1303
1304         // Render main chain. If we're using zerocopy Quick Sync encoding
1305         // (the default case), we take an extra copy of the created outputs,
1306         // so that we can display it back to the screen later (it's less memory
1307         // bandwidth than writing and reading back an RGBA texture, even at 16-bit).
1308         // Ideally, we'd like to avoid taking copies and just use the main textures
1309         // for display as well, but they're just views into VA-API memory and must be
1310         // unmapped during encoding, so we can't use them for display, unfortunately.
1311         GLuint y_tex, cbcr_full_tex, cbcr_tex;
1312         GLuint y_copy_tex, cbcr_copy_tex = 0;
1313         GLuint y_display_tex, cbcr_display_tex;
1314         GLenum y_type = (global_flags.x264_bit_depth > 8) ? GL_R16 : GL_R8;
1315         GLenum cbcr_type = (global_flags.x264_bit_depth > 8) ? GL_RG16 : GL_RG8;
1316         const bool is_zerocopy = video_encoder->is_zerocopy();
1317         if (is_zerocopy) {
1318                 cbcr_full_tex = resource_pool->create_2d_texture(cbcr_type, global_flags.width, global_flags.height);
1319                 y_copy_tex = resource_pool->create_2d_texture(y_type, global_flags.width, global_flags.height);
1320                 cbcr_copy_tex = resource_pool->create_2d_texture(cbcr_type, global_flags.width / 2, global_flags.height / 2);
1321
1322                 y_display_tex = y_copy_tex;
1323                 cbcr_display_tex = cbcr_copy_tex;
1324
1325                 // y_tex and cbcr_tex will be given by VideoEncoder.
1326         } else {
1327                 cbcr_full_tex = resource_pool->create_2d_texture(cbcr_type, global_flags.width, global_flags.height);
1328                 y_tex = resource_pool->create_2d_texture(y_type, global_flags.width, global_flags.height);
1329                 cbcr_tex = resource_pool->create_2d_texture(cbcr_type, global_flags.width / 2, global_flags.height / 2);
1330
1331                 y_display_tex = y_tex;
1332                 cbcr_display_tex = cbcr_tex;
1333         }
1334
1335         const int64_t av_delay = lrint(global_flags.audio_queue_length_ms * 0.001 * TIMEBASE);  // Corresponds to the delay in ResamplingQueue.
1336         bool got_frame = video_encoder->begin_frame(pts_int + av_delay, duration, ycbcr_output_coefficients, theme_main_chain.input_frames, &y_tex, &cbcr_tex);
1337         assert(got_frame);
1338
1339         GLuint fbo;
1340         if (is_zerocopy) {
1341                 fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex, y_copy_tex);
1342         } else {
1343                 fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex);
1344         }
1345         check_error();
1346         chain->render_to_fbo(fbo, global_flags.width, global_flags.height);
1347
1348         if (display_timecode_in_stream) {
1349                 // Render the timecode on top.
1350                 timecode_renderer->render_timecode(fbo, timecode_text);
1351         }
1352
1353         resource_pool->release_fbo(fbo);
1354
1355         if (is_zerocopy) {
1356                 chroma_subsampler->subsample_chroma(cbcr_full_tex, global_flags.width, global_flags.height, cbcr_tex, cbcr_copy_tex);
1357         } else {
1358                 chroma_subsampler->subsample_chroma(cbcr_full_tex, global_flags.width, global_flags.height, cbcr_tex);
1359         }
1360         if (output_card_index != -1) {
1361                 cards[output_card_index].output->send_frame(y_tex, cbcr_full_tex, ycbcr_output_coefficients, theme_main_chain.input_frames, pts_int, duration);
1362         }
1363         resource_pool->release_2d_texture(cbcr_full_tex);
1364
1365         // Set the right state for the Y' and CbCr textures we use for display.
1366         glBindFramebuffer(GL_FRAMEBUFFER, 0);
1367         glBindTexture(GL_TEXTURE_2D, y_display_tex);
1368         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
1369         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
1370         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
1371
1372         glBindTexture(GL_TEXTURE_2D, cbcr_display_tex);
1373         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
1374         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
1375         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
1376
1377         RefCountedGLsync fence = video_encoder->end_frame();
1378
1379         // The live frame pieces the Y'CbCr texture copies back into RGB and displays them.
1380         // It owns y_display_tex and cbcr_display_tex now (whichever textures they are).
1381         DisplayFrame live_frame;
1382         live_frame.chain = display_chain.get();
1383         live_frame.setup_chain = [this, y_display_tex, cbcr_display_tex]{
1384                 display_input->set_texture_num(0, y_display_tex);
1385                 display_input->set_texture_num(1, cbcr_display_tex);
1386         };
1387         live_frame.ready_fence = fence;
1388         live_frame.input_frames = {};
1389         live_frame.temp_textures = { y_display_tex, cbcr_display_tex };
1390         output_channel[OUTPUT_LIVE].output_frame(live_frame);
1391
1392         // Set up preview and any additional channels.
1393         for (int i = 1; i < theme->get_num_channels() + 2; ++i) {
1394                 DisplayFrame display_frame;
1395                 Theme::Chain chain = theme->get_chain(i, pts(), global_flags.width, global_flags.height, input_state);  // FIXME: dimensions
1396                 display_frame.chain = chain.chain;
1397                 display_frame.setup_chain = chain.setup_chain;
1398                 display_frame.ready_fence = fence;
1399                 display_frame.input_frames = chain.input_frames;
1400                 display_frame.temp_textures = {};
1401                 output_channel[i].output_frame(display_frame);
1402         }
1403 }
1404
1405 void Mixer::audio_thread_func()
1406 {
1407         pthread_setname_np(pthread_self(), "Mixer_Audio");
1408
1409         while (!should_quit) {
1410                 AudioTask task;
1411
1412                 {
1413                         unique_lock<mutex> lock(audio_mutex);
1414                         audio_task_queue_changed.wait(lock, [this]{ return should_quit || !audio_task_queue.empty(); });
1415                         if (should_quit) {
1416                                 return;
1417                         }
1418                         task = audio_task_queue.front();
1419                         audio_task_queue.pop();
1420                 }
1421
1422                 ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy =
1423                         task.adjust_rate ? ResamplingQueue::ADJUST_RATE : ResamplingQueue::DO_NOT_ADJUST_RATE;
1424                 vector<float> samples_out = audio_mixer.get_output(
1425                         task.frame_timestamp,
1426                         task.num_samples,
1427                         rate_adjustment_policy);
1428
1429                 // Send the samples to the sound card, then add them to the output.
1430                 if (alsa) {
1431                         alsa->write(samples_out);
1432                 }
1433                 if (output_card_index != -1) {
1434                         const int64_t av_delay = lrint(global_flags.audio_queue_length_ms * 0.001 * TIMEBASE);  // Corresponds to the delay in ResamplingQueue.
1435                         cards[output_card_index].output->send_audio(task.pts_int + av_delay, samples_out);
1436                 }
1437                 video_encoder->add_audio(task.pts_int, move(samples_out));
1438         }
1439 }
1440
1441 void Mixer::release_display_frame(DisplayFrame *frame)
1442 {
1443         for (GLuint texnum : frame->temp_textures) {
1444                 resource_pool->release_2d_texture(texnum);
1445         }
1446         frame->temp_textures.clear();
1447         frame->ready_fence.reset();
1448         frame->input_frames.clear();
1449 }
1450
1451 void Mixer::start()
1452 {
1453         mixer_thread = thread(&Mixer::thread_func, this);
1454         audio_thread = thread(&Mixer::audio_thread_func, this);
1455 }
1456
1457 void Mixer::quit()
1458 {
1459         should_quit = true;
1460         audio_task_queue_changed.notify_one();
1461         mixer_thread.join();
1462         audio_thread.join();
1463 }
1464
1465 void Mixer::transition_clicked(int transition_num)
1466 {
1467         theme->transition_clicked(transition_num, pts());
1468 }
1469
1470 void Mixer::channel_clicked(int preview_num)
1471 {
1472         theme->channel_clicked(preview_num);
1473 }
1474
1475 YCbCrInterpretation Mixer::get_input_ycbcr_interpretation(unsigned card_index) const
1476 {
1477         unique_lock<mutex> lock(card_mutex);
1478         return ycbcr_interpretation[card_index];
1479 }
1480
1481 void Mixer::set_input_ycbcr_interpretation(unsigned card_index, const YCbCrInterpretation &interpretation)
1482 {
1483         unique_lock<mutex> lock(card_mutex);
1484         ycbcr_interpretation[card_index] = interpretation;
1485 }
1486
1487 void Mixer::start_mode_scanning(unsigned card_index)
1488 {
1489         assert(card_index < num_cards);
1490         if (is_mode_scanning[card_index]) {
1491                 return;
1492         }
1493         is_mode_scanning[card_index] = true;
1494         mode_scanlist[card_index].clear();
1495         for (const auto &mode : cards[card_index].capture->get_available_video_modes()) {
1496                 mode_scanlist[card_index].push_back(mode.first);
1497         }
1498         assert(!mode_scanlist[card_index].empty());
1499         mode_scanlist_index[card_index] = 0;
1500         cards[card_index].capture->set_video_mode(mode_scanlist[card_index][0]);
1501         last_mode_scan_change[card_index] = steady_clock::now();
1502 }
1503
1504 map<uint32_t, VideoMode> Mixer::get_available_output_video_modes() const
1505 {
1506         assert(desired_output_card_index != -1);
1507         unique_lock<mutex> lock(card_mutex);
1508         return cards[desired_output_card_index].output->get_available_video_modes();
1509 }
1510
1511 Mixer::OutputChannel::~OutputChannel()
1512 {
1513         if (has_current_frame) {
1514                 parent->release_display_frame(&current_frame);
1515         }
1516         if (has_ready_frame) {
1517                 parent->release_display_frame(&ready_frame);
1518         }
1519 }
1520
1521 void Mixer::OutputChannel::output_frame(DisplayFrame frame)
1522 {
1523         // Store this frame for display. Remove the ready frame if any
1524         // (it was seemingly never used).
1525         {
1526                 unique_lock<mutex> lock(frame_mutex);
1527                 if (has_ready_frame) {
1528                         parent->release_display_frame(&ready_frame);
1529                 }
1530                 ready_frame = frame;
1531                 has_ready_frame = true;
1532
1533                 // Call the callbacks under the mutex (they should be short),
1534                 // so that we don't race against a callback removal.
1535                 for (const auto &key_and_callback : new_frame_ready_callbacks) {
1536                         key_and_callback.second();
1537                 }
1538         }
1539
1540         // Reduce the number of callbacks by filtering duplicates. The reason
1541         // why we bother doing this is that Qt seemingly can get into a state
1542         // where its builds up an essentially unbounded queue of signals,
1543         // consuming more and more memory, and there's no good way of collapsing
1544         // user-defined signals or limiting the length of the queue.
1545         if (transition_names_updated_callback) {
1546                 vector<string> transition_names = global_mixer->get_transition_names();
1547                 bool changed = false;
1548                 if (transition_names.size() != last_transition_names.size()) {
1549                         changed = true;
1550                 } else {
1551                         for (unsigned i = 0; i < transition_names.size(); ++i) {
1552                                 if (transition_names[i] != last_transition_names[i]) {
1553                                         changed = true;
1554                                         break;
1555                                 }
1556                         }
1557                 }
1558                 if (changed) {
1559                         transition_names_updated_callback(transition_names);
1560                         last_transition_names = transition_names;
1561                 }
1562         }
1563         if (name_updated_callback) {
1564                 string name = global_mixer->get_channel_name(channel);
1565                 if (name != last_name) {
1566                         name_updated_callback(name);
1567                         last_name = name;
1568                 }
1569         }
1570         if (color_updated_callback) {
1571                 string color = global_mixer->get_channel_color(channel);
1572                 if (color != last_color) {
1573                         color_updated_callback(color);
1574                         last_color = color;
1575                 }
1576         }
1577 }
1578
1579 bool Mixer::OutputChannel::get_display_frame(DisplayFrame *frame)
1580 {
1581         unique_lock<mutex> lock(frame_mutex);
1582         if (!has_current_frame && !has_ready_frame) {
1583                 return false;
1584         }
1585
1586         if (has_current_frame && has_ready_frame) {
1587                 // We have a new ready frame. Toss the current one.
1588                 parent->release_display_frame(&current_frame);
1589                 has_current_frame = false;
1590         }
1591         if (has_ready_frame) {
1592                 assert(!has_current_frame);
1593                 current_frame = ready_frame;
1594                 ready_frame.ready_fence.reset();  // Drop the refcount.
1595                 ready_frame.input_frames.clear();  // Drop the refcounts.
1596                 has_current_frame = true;
1597                 has_ready_frame = false;
1598         }
1599
1600         *frame = current_frame;
1601         return true;
1602 }
1603
1604 void Mixer::OutputChannel::add_frame_ready_callback(void *key, Mixer::new_frame_ready_callback_t callback)
1605 {
1606         unique_lock<mutex> lock(frame_mutex);
1607         new_frame_ready_callbacks[key] = callback;
1608 }
1609
1610 void Mixer::OutputChannel::remove_frame_ready_callback(void *key)
1611 {
1612         unique_lock<mutex> lock(frame_mutex);
1613         new_frame_ready_callbacks.erase(key);
1614 }
1615
1616 void Mixer::OutputChannel::set_transition_names_updated_callback(Mixer::transition_names_updated_callback_t callback)
1617 {
1618         transition_names_updated_callback = callback;
1619 }
1620
1621 void Mixer::OutputChannel::set_name_updated_callback(Mixer::name_updated_callback_t callback)
1622 {
1623         name_updated_callback = callback;
1624 }
1625
1626 void Mixer::OutputChannel::set_color_updated_callback(Mixer::color_updated_callback_t callback)
1627 {
1628         color_updated_callback = callback;
1629 }
1630
1631 mutex RefCountedGLsync::fence_lock;