7 #include <movit/effect_chain.h>
8 #include <movit/effect_util.h>
9 #include <movit/flat_input.h>
10 #include <movit/image_format.h>
11 #include <movit/init.h>
12 #include <movit/resource_pool.h>
19 #include <condition_variable>
30 #include "DeckLinkAPI.h"
32 #include "alsa_output.h"
33 #include "basic_stats.h"
34 #include "bmusb/bmusb.h"
35 #include "bmusb/fake_capture.h"
37 #include "cef_capture.h"
39 #include "chroma_subsampler.h"
40 #include "shared/context.h"
41 #include "decklink_capture.h"
42 #include "decklink_output.h"
44 #include "shared/disk_space_estimator.h"
45 #include "ffmpeg_capture.h"
47 #include "input_mapping.h"
48 #include "shared/metrics.h"
49 #include "mjpeg_encoder.h"
50 #include "pbo_frame_allocator.h"
51 #include "shared/ref_counted_gl_sync.h"
52 #include "resampling_queue.h"
53 #include "shared/timebase.h"
54 #include "timecode_renderer.h"
55 #include "v210_converter.h"
56 #include "va_display_with_cleanup.h"
57 #include "video_encoder.h"
60 #include <google/protobuf/util/json_util.h>
66 using namespace movit;
68 using namespace std::chrono;
69 using namespace std::placeholders;
70 using namespace bmusb;
72 Mixer *global_mixer = nullptr;
76 void insert_new_frame(RefCountedFrame frame, unsigned field_num, bool interlaced, unsigned card_index, InputState *input_state)
79 for (unsigned frame_num = FRAME_HISTORY_LENGTH; frame_num --> 1; ) { // :-)
80 input_state->buffered_frames[card_index][frame_num] =
81 input_state->buffered_frames[card_index][frame_num - 1];
83 input_state->buffered_frames[card_index][0] = { frame, field_num };
85 for (unsigned frame_num = 0; frame_num < FRAME_HISTORY_LENGTH; ++frame_num) {
86 input_state->buffered_frames[card_index][frame_num] = { frame, field_num };
91 void ensure_texture_resolution(PBOFrameAllocator::Userdata *userdata, unsigned field, unsigned width, unsigned height, unsigned cbcr_width, unsigned cbcr_height, unsigned v210_width)
94 switch (userdata->pixel_format) {
95 case PixelFormat_10BitYCbCr:
96 first = userdata->tex_v210[field] == 0 || userdata->tex_444[field] == 0;
98 case PixelFormat_8BitYCbCr:
99 first = userdata->tex_y[field] == 0 || userdata->tex_cbcr[field] == 0;
101 case PixelFormat_8BitBGRA:
102 first = userdata->tex_rgba[field] == 0;
104 case PixelFormat_8BitYCbCrPlanar:
105 first = userdata->tex_y[field] == 0 || userdata->tex_cb[field] == 0 || userdata->tex_cr[field] == 0;
112 width != userdata->last_width[field] ||
113 height != userdata->last_height[field] ||
114 cbcr_width != userdata->last_cbcr_width[field] ||
115 cbcr_height != userdata->last_cbcr_height[field]) {
116 // We changed resolution since last use of this texture, so we need to create
117 // a new object. Note that this each card has its own PBOFrameAllocator,
118 // we don't need to worry about these flip-flopping between resolutions.
119 switch (userdata->pixel_format) {
120 case PixelFormat_10BitYCbCr:
121 glBindTexture(GL_TEXTURE_2D, userdata->tex_444[field]);
123 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB10_A2, width, height, 0, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, nullptr);
126 case PixelFormat_8BitYCbCr: {
127 glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]);
129 glTexImage2D(GL_TEXTURE_2D, 0, GL_RG8, cbcr_width, height, 0, GL_RG, GL_UNSIGNED_BYTE, nullptr);
131 glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]);
133 glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
137 case PixelFormat_8BitYCbCrPlanar: {
138 glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]);
140 glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
142 glBindTexture(GL_TEXTURE_2D, userdata->tex_cb[field]);
144 glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, cbcr_width, cbcr_height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
146 glBindTexture(GL_TEXTURE_2D, userdata->tex_cr[field]);
148 glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, cbcr_width, cbcr_height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
152 case PixelFormat_8BitBGRA:
153 glBindTexture(GL_TEXTURE_2D, userdata->tex_rgba[field]);
155 if (global_flags.can_disable_srgb_decoder) { // See the comments in tweaked_inputs.h.
156 glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8_ALPHA8, width, height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, nullptr);
158 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, nullptr);
165 userdata->last_width[field] = width;
166 userdata->last_height[field] = height;
167 userdata->last_cbcr_width[field] = cbcr_width;
168 userdata->last_cbcr_height[field] = cbcr_height;
170 if (global_flags.ten_bit_input &&
171 (first || v210_width != userdata->last_v210_width[field])) {
172 // Same as above; we need to recreate the texture.
173 glBindTexture(GL_TEXTURE_2D, userdata->tex_v210[field]);
175 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB10_A2, v210_width, height, 0, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, nullptr);
177 userdata->last_v210_width[field] = v210_width;
181 void upload_texture(GLuint tex, GLuint width, GLuint height, GLuint stride, bool interlaced_stride, GLenum format, GLenum type, GLintptr offset)
183 if (interlaced_stride) {
186 if (global_flags.flush_pbos) {
187 glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, offset, stride * height);
191 glBindTexture(GL_TEXTURE_2D, tex);
193 if (interlaced_stride) {
194 glPixelStorei(GL_UNPACK_ROW_LENGTH, width * 2);
197 glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
201 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, format, type, BUFFER_OFFSET(offset));
203 glBindTexture(GL_TEXTURE_2D, 0);
205 glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
211 void JitterHistory::register_metrics(const vector<pair<string, string>> &labels)
213 global_metrics.add("input_underestimated_jitter_frames", labels, &metric_input_underestimated_jitter_frames);
214 global_metrics.add("input_estimated_max_jitter_seconds", labels, &metric_input_estimated_max_jitter_seconds, Metrics::TYPE_GAUGE);
217 void JitterHistory::unregister_metrics(const vector<pair<string, string>> &labels)
219 global_metrics.remove("input_underestimated_jitter_frames", labels);
220 global_metrics.remove("input_estimated_max_jitter_seconds", labels);
223 void JitterHistory::frame_arrived(steady_clock::time_point now, int64_t frame_duration, size_t dropped_frames)
225 if (expected_timestamp > steady_clock::time_point::min()) {
226 expected_timestamp += dropped_frames * nanoseconds(frame_duration * 1000000000 / TIMEBASE);
227 double jitter_seconds = fabs(duration<double>(expected_timestamp - now).count());
228 history.push_back(orders.insert(jitter_seconds));
229 if (jitter_seconds > estimate_max_jitter()) {
230 ++metric_input_underestimated_jitter_frames;
233 metric_input_estimated_max_jitter_seconds = estimate_max_jitter();
235 if (history.size() > history_length) {
236 orders.erase(history.front());
239 assert(history.size() <= history_length);
241 expected_timestamp = now + nanoseconds(frame_duration * 1000000000 / TIMEBASE);
244 double JitterHistory::estimate_max_jitter() const
246 if (orders.empty()) {
249 size_t elem_idx = lrint((orders.size() - 1) * percentile);
250 if (percentile <= 0.5) {
251 return *next(orders.begin(), elem_idx) * multiplier;
253 return *prev(orders.end(), orders.size() - elem_idx) * multiplier;
257 void QueueLengthPolicy::register_metrics(const vector<pair<string, string>> &labels)
259 global_metrics.add("input_queue_safe_length_frames", labels, &metric_input_queue_safe_length_frames, Metrics::TYPE_GAUGE);
262 void QueueLengthPolicy::unregister_metrics(const vector<pair<string, string>> &labels)
264 global_metrics.remove("input_queue_safe_length_frames", labels);
267 void QueueLengthPolicy::update_policy(steady_clock::time_point now,
268 steady_clock::time_point expected_next_frame,
269 int64_t input_frame_duration,
270 int64_t master_frame_duration,
271 double max_input_card_jitter_seconds,
272 double max_master_card_jitter_seconds)
274 double input_frame_duration_seconds = input_frame_duration / double(TIMEBASE);
275 double master_frame_duration_seconds = master_frame_duration / double(TIMEBASE);
277 // Figure out when we can expect the next frame for this card, assuming
278 // worst-case jitter (ie., the frame is maximally late).
279 double seconds_until_next_frame = max(duration<double>(expected_next_frame - now).count() + max_input_card_jitter_seconds, 0.0);
281 // How many times are the master card expected to tick in that time?
282 // We assume the master clock has worst-case jitter but not any rate
283 // discrepancy, ie., it ticks as early as possible every time, but not
285 double frames_needed = (seconds_until_next_frame + max_master_card_jitter_seconds) / master_frame_duration_seconds;
287 // As a special case, if the master card ticks faster than the input card,
288 // we expect the queue to drain by itself even without dropping. But if
289 // the difference is small (e.g. 60 Hz master and 59.94 input), it would
290 // go slowly enough that the effect wouldn't really be appreciable.
291 // We account for this by looking at the situation five frames ahead,
292 // assuming everything else is the same.
293 double frames_allowed;
294 if (master_frame_duration < input_frame_duration) {
295 frames_allowed = frames_needed + 5 * (input_frame_duration_seconds - master_frame_duration_seconds) / master_frame_duration_seconds;
297 frames_allowed = frames_needed;
300 safe_queue_length = max<int>(floor(frames_allowed), 0);
301 metric_input_queue_safe_length_frames = safe_queue_length;
304 Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
306 num_cards(num_cards),
307 mixer_surface(create_surface(format)),
308 h264_encoder_surface(create_surface(format)),
309 decklink_output_surface(create_surface(format))
311 memcpy(ycbcr_interpretation, global_flags.ycbcr_interpretation, sizeof(ycbcr_interpretation));
312 CHECK(init_movit(MOVIT_SHADER_DIR, MOVIT_DEBUG_OFF));
315 // This nearly always should be true.
316 global_flags.can_disable_srgb_decoder =
317 epoxy_has_gl_extension("GL_EXT_texture_sRGB_decode") &&
318 epoxy_has_gl_extension("GL_ARB_sampler_objects");
320 // Since we allow non-bouncing 4:2:2 YCbCrInputs, effective subpixel precision
321 // will be halved when sampling them, and we need to compensate here.
322 movit_texel_subpixel_precision /= 2.0;
324 resource_pool.reset(new ResourcePool);
325 for (unsigned i = 0; i < NUM_OUTPUTS; ++i) {
326 output_channel[i].parent = this;
327 output_channel[i].channel = i;
330 ImageFormat inout_format;
331 inout_format.color_space = COLORSPACE_sRGB;
332 inout_format.gamma_curve = GAMMA_sRGB;
334 // Matches the 4:2:0 format created by the main chain.
335 YCbCrFormat ycbcr_format;
336 ycbcr_format.chroma_subsampling_x = 2;
337 ycbcr_format.chroma_subsampling_y = 2;
338 if (global_flags.ycbcr_rec709_coefficients) {
339 ycbcr_format.luma_coefficients = YCBCR_REC_709;
341 ycbcr_format.luma_coefficients = YCBCR_REC_601;
343 ycbcr_format.full_range = false;
344 ycbcr_format.num_levels = 1 << global_flags.x264_bit_depth;
345 ycbcr_format.cb_x_position = 0.0f;
346 ycbcr_format.cr_x_position = 0.0f;
347 ycbcr_format.cb_y_position = 0.5f;
348 ycbcr_format.cr_y_position = 0.5f;
350 // Display chain; shows the live output produced by the main chain (or rather, a copy of it).
351 display_chain.reset(new EffectChain(global_flags.width, global_flags.height, resource_pool.get()));
353 GLenum type = global_flags.x264_bit_depth > 8 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_BYTE;
354 display_input = new YCbCrInput(inout_format, ycbcr_format, global_flags.width, global_flags.height, YCBCR_INPUT_SPLIT_Y_AND_CBCR, type);
355 display_chain->add_input(display_input);
356 display_chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
357 display_chain->set_dither_bits(0); // Don't bother.
358 display_chain->finalize();
360 video_encoder.reset(new VideoEncoder(resource_pool.get(), h264_encoder_surface, global_flags.va_display, global_flags.width, global_flags.height, &httpd, global_disk_space_estimator));
361 if (!global_flags.card_to_mjpeg_stream_export.empty()) {
362 mjpeg_encoder.reset(new MJPEGEncoder(&httpd, global_flags.va_display));
365 // Must be instantiated after VideoEncoder has initialized global_flags.use_zerocopy.
366 theme.reset(new Theme(global_flags.theme_filename, global_flags.theme_dirs, resource_pool.get(), num_cards));
368 // Must be instantiated after the theme, as the theme decides the number of FFmpeg inputs.
369 std::vector<FFmpegCapture *> video_inputs = theme->get_video_inputs();
370 audio_mixer.reset(new AudioMixer(num_cards, video_inputs.size()));
372 httpd.add_endpoint("/channels", bind(&Mixer::get_channels_json, this), HTTPD::ALLOW_ALL_ORIGINS);
373 for (int channel_idx = 2; channel_idx < theme->get_num_channels(); ++channel_idx) {
375 snprintf(url, sizeof(url), "/channels/%d/color", channel_idx);
376 httpd.add_endpoint(url, bind(&Mixer::get_channel_color_http, this, unsigned(channel_idx)), HTTPD::ALLOW_ALL_ORIGINS);
379 // Start listening for clients only once VideoEncoder has written its header, if any.
380 httpd.start(global_flags.http_port);
382 // First try initializing the then PCI devices, then USB, then
383 // fill up with fake cards until we have the desired number of cards.
384 unsigned num_pci_devices = 0;
385 unsigned card_index = 0;
388 IDeckLinkIterator *decklink_iterator = CreateDeckLinkIteratorInstance();
389 if (decklink_iterator != nullptr) {
390 for ( ; card_index < num_cards; ++card_index) {
392 if (decklink_iterator->Next(&decklink) != S_OK) {
396 DeckLinkCapture *capture = new DeckLinkCapture(decklink, card_index);
397 DeckLinkOutput *output = new DeckLinkOutput(resource_pool.get(), decklink_output_surface, global_flags.width, global_flags.height, card_index);
398 if (!output->set_device(decklink)) {
402 configure_card(card_index, capture, CardType::LIVE_CARD, output);
405 decklink_iterator->Release();
406 fprintf(stderr, "Found %u DeckLink PCI card(s).\n", num_pci_devices);
408 fprintf(stderr, "DeckLink drivers not found. Probing for USB cards only.\n");
412 unsigned num_usb_devices = BMUSBCapture::num_cards();
413 for (unsigned usb_card_index = 0; usb_card_index < num_usb_devices && card_index < num_cards; ++usb_card_index, ++card_index) {
414 BMUSBCapture *capture = new BMUSBCapture(usb_card_index);
415 capture->set_card_disconnected_callback(bind(&Mixer::bm_hotplug_remove, this, card_index));
416 configure_card(card_index, capture, CardType::LIVE_CARD, /*output=*/nullptr);
418 fprintf(stderr, "Found %u USB card(s).\n", num_usb_devices);
420 unsigned num_fake_cards = 0;
421 for ( ; card_index < num_cards; ++card_index, ++num_fake_cards) {
422 FakeCapture *capture = new FakeCapture(global_flags.width, global_flags.height, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
423 configure_card(card_index, capture, CardType::FAKE_CAPTURE, /*output=*/nullptr);
426 if (num_fake_cards > 0) {
427 fprintf(stderr, "Initialized %u fake cards.\n", num_fake_cards);
430 // Initialize all video inputs the theme asked for. Note that these are
431 // all put _after_ the regular cards, which stop at <num_cards> - 1.
432 for (unsigned video_card_index = 0; video_card_index < video_inputs.size(); ++card_index, ++video_card_index) {
433 if (card_index >= MAX_VIDEO_CARDS) {
434 fprintf(stderr, "ERROR: Not enough card slots available for the videos the theme requested.\n");
437 configure_card(card_index, video_inputs[video_card_index], CardType::FFMPEG_INPUT, /*output=*/nullptr);
438 video_inputs[video_card_index]->set_card_index(card_index);
440 num_video_inputs = video_inputs.size();
443 // Same, for HTML inputs.
444 std::vector<CEFCapture *> html_inputs = theme->get_html_inputs();
445 for (unsigned html_card_index = 0; html_card_index < html_inputs.size(); ++card_index, ++html_card_index) {
446 if (card_index >= MAX_VIDEO_CARDS) {
447 fprintf(stderr, "ERROR: Not enough card slots available for the HTML inputs the theme requested.\n");
450 configure_card(card_index, html_inputs[html_card_index], CardType::CEF_INPUT, /*output=*/nullptr);
451 html_inputs[html_card_index]->set_card_index(card_index);
453 num_html_inputs = html_inputs.size();
456 BMUSBCapture::set_card_connected_callback(bind(&Mixer::bm_hotplug_add, this, _1));
457 BMUSBCapture::start_bm_thread();
459 for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
460 cards[card_index].queue_length_policy.reset(card_index);
463 chroma_subsampler.reset(new ChromaSubsampler(resource_pool.get()));
465 if (global_flags.ten_bit_input) {
466 if (!v210Converter::has_hardware_support()) {
467 fprintf(stderr, "ERROR: --ten-bit-input requires support for OpenGL compute shaders\n");
468 fprintf(stderr, " (OpenGL 4.3, or GL_ARB_compute_shader + GL_ARB_shader_image_load_store).\n");
471 v210_converter.reset(new v210Converter());
473 // These are all the widths listed in the Blackmagic SDK documentation
474 // (section 2.7.3, “Display Modes”).
475 v210_converter->precompile_shader(720);
476 v210_converter->precompile_shader(1280);
477 v210_converter->precompile_shader(1920);
478 v210_converter->precompile_shader(2048);
479 v210_converter->precompile_shader(3840);
480 v210_converter->precompile_shader(4096);
482 if (global_flags.ten_bit_output) {
483 if (!v210Converter::has_hardware_support()) {
484 fprintf(stderr, "ERROR: --ten-bit-output requires support for OpenGL compute shaders\n");
485 fprintf(stderr, " (OpenGL 4.3, or GL_ARB_compute_shader + GL_ARB_shader_image_load_store).\n");
490 timecode_renderer.reset(new TimecodeRenderer(resource_pool.get(), global_flags.width, global_flags.height));
491 display_timecode_in_stream = global_flags.display_timecode_in_stream;
492 display_timecode_on_stdout = global_flags.display_timecode_on_stdout;
494 if (global_flags.enable_alsa_output) {
495 alsa.reset(new ALSAOutput(OUTPUT_FREQUENCY, /*num_channels=*/2));
497 if (global_flags.output_card != -1) {
498 desired_output_card_index = global_flags.output_card;
499 set_output_card_internal(global_flags.output_card);
502 output_jitter_history.register_metrics({{ "card", "output" }});
507 if (mjpeg_encoder != nullptr) {
508 mjpeg_encoder->stop();
511 BMUSBCapture::stop_bm_thread();
513 for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
514 cards[card_index].capture->stop_dequeue_thread();
515 if (cards[card_index].output) {
516 cards[card_index].output->end_output();
517 cards[card_index].output.reset();
521 video_encoder.reset(nullptr);
524 void Mixer::configure_card(unsigned card_index, CaptureInterface *capture, CardType card_type, DeckLinkOutput *output)
526 printf("Configuring card %d...\n", card_index);
528 CaptureCard *card = &cards[card_index];
529 if (card->capture != nullptr) {
530 card->capture->stop_dequeue_thread();
532 card->capture.reset(capture);
533 card->is_fake_capture = (card_type == CardType::FAKE_CAPTURE);
534 card->is_cef_capture = (card_type == CardType::CEF_INPUT);
535 card->may_have_dropped_last_frame = false;
536 card->type = card_type;
537 if (card->output.get() != output) {
538 card->output.reset(output);
541 PixelFormat pixel_format;
542 if (card_type == CardType::FFMPEG_INPUT) {
543 pixel_format = capture->get_current_pixel_format();
544 } else if (card_type == CardType::CEF_INPUT) {
545 pixel_format = PixelFormat_8BitBGRA;
546 } else if (global_flags.ten_bit_input) {
547 pixel_format = PixelFormat_10BitYCbCr;
549 pixel_format = PixelFormat_8BitYCbCr;
552 card->capture->set_frame_callback(bind(&Mixer::bm_frame, this, card_index, _1, _2, _3, _4, _5, _6, _7));
553 if (card->frame_allocator == nullptr) {
554 card->frame_allocator.reset(new PBOFrameAllocator(pixel_format, 8 << 20, global_flags.width, global_flags.height)); // 8 MB.
556 card->capture->set_video_frame_allocator(card->frame_allocator.get());
557 if (card->surface == nullptr) {
558 card->surface = create_surface_with_same_format(mixer_surface);
560 while (!card->new_frames.empty()) card->new_frames.pop_front();
561 card->last_timecode = -1;
562 card->capture->set_pixel_format(pixel_format);
563 card->capture->configure_card();
565 // NOTE: start_bm_capture() happens in thread_func().
568 if (card_type == CardType::FFMPEG_INPUT) {
569 device = DeviceSpec{InputSourceType::FFMPEG_VIDEO_INPUT, card_index - num_cards};
571 device = DeviceSpec{InputSourceType::CAPTURE_CARD, card_index};
573 audio_mixer->reset_resampler(device);
574 audio_mixer->set_display_name(device, card->capture->get_description());
575 audio_mixer->trigger_state_changed_callback();
577 // Unregister old metrics, if any.
578 if (!card->labels.empty()) {
579 const vector<pair<string, string>> &labels = card->labels;
580 card->jitter_history.unregister_metrics(labels);
581 card->queue_length_policy.unregister_metrics(labels);
582 global_metrics.remove("input_received_frames", labels);
583 global_metrics.remove("input_dropped_frames_jitter", labels);
584 global_metrics.remove("input_dropped_frames_error", labels);
585 global_metrics.remove("input_dropped_frames_resets", labels);
586 global_metrics.remove("input_queue_length_frames", labels);
587 global_metrics.remove("input_queue_duped_frames", labels);
589 global_metrics.remove("input_has_signal_bool", labels);
590 global_metrics.remove("input_is_connected_bool", labels);
591 global_metrics.remove("input_interlaced_bool", labels);
592 global_metrics.remove("input_width_pixels", labels);
593 global_metrics.remove("input_height_pixels", labels);
594 global_metrics.remove("input_frame_rate_nom", labels);
595 global_metrics.remove("input_frame_rate_den", labels);
596 global_metrics.remove("input_sample_rate_hz", labels);
600 vector<pair<string, string>> labels;
602 snprintf(card_name, sizeof(card_name), "%d", card_index);
603 labels.emplace_back("card", card_name);
606 case CardType::LIVE_CARD:
607 labels.emplace_back("cardtype", "live");
609 case CardType::FAKE_CAPTURE:
610 labels.emplace_back("cardtype", "fake");
612 case CardType::FFMPEG_INPUT:
613 labels.emplace_back("cardtype", "ffmpeg");
615 case CardType::CEF_INPUT:
616 labels.emplace_back("cardtype", "cef");
621 card->jitter_history.register_metrics(labels);
622 card->queue_length_policy.register_metrics(labels);
623 global_metrics.add("input_received_frames", labels, &card->metric_input_received_frames);
624 global_metrics.add("input_dropped_frames_jitter", labels, &card->metric_input_dropped_frames_jitter);
625 global_metrics.add("input_dropped_frames_error", labels, &card->metric_input_dropped_frames_error);
626 global_metrics.add("input_dropped_frames_resets", labels, &card->metric_input_resets);
627 global_metrics.add("input_queue_length_frames", labels, &card->metric_input_queue_length_frames, Metrics::TYPE_GAUGE);
628 global_metrics.add("input_queue_duped_frames", labels, &card->metric_input_duped_frames);
630 global_metrics.add("input_has_signal_bool", labels, &card->metric_input_has_signal_bool, Metrics::TYPE_GAUGE);
631 global_metrics.add("input_is_connected_bool", labels, &card->metric_input_is_connected_bool, Metrics::TYPE_GAUGE);
632 global_metrics.add("input_interlaced_bool", labels, &card->metric_input_interlaced_bool, Metrics::TYPE_GAUGE);
633 global_metrics.add("input_width_pixels", labels, &card->metric_input_width_pixels, Metrics::TYPE_GAUGE);
634 global_metrics.add("input_height_pixels", labels, &card->metric_input_height_pixels, Metrics::TYPE_GAUGE);
635 global_metrics.add("input_frame_rate_nom", labels, &card->metric_input_frame_rate_nom, Metrics::TYPE_GAUGE);
636 global_metrics.add("input_frame_rate_den", labels, &card->metric_input_frame_rate_den, Metrics::TYPE_GAUGE);
637 global_metrics.add("input_sample_rate_hz", labels, &card->metric_input_sample_rate_hz, Metrics::TYPE_GAUGE);
638 card->labels = labels;
641 void Mixer::set_output_card_internal(int card_index)
643 // We don't really need to take card_mutex, since we're in the mixer
644 // thread and don't mess with any queues (which is the only thing that happens
645 // from other threads), but it's probably the safest in the long run.
646 unique_lock<mutex> lock(card_mutex);
647 if (output_card_index != -1) {
648 // Switch the old card from output to input.
649 CaptureCard *old_card = &cards[output_card_index];
650 old_card->output->end_output();
652 // Stop the fake card that we put into place.
653 // This needs to _not_ happen under the mutex, to avoid deadlock
654 // (delivering the last frame needs to take the mutex).
655 CaptureInterface *fake_capture = old_card->capture.get();
657 fake_capture->stop_dequeue_thread();
659 old_card->capture = move(old_card->parked_capture); // TODO: reset the metrics
660 old_card->is_fake_capture = false;
661 old_card->capture->start_bm_capture();
663 if (card_index != -1) {
664 CaptureCard *card = &cards[card_index];
665 CaptureInterface *capture = card->capture.get();
666 // TODO: DeckLinkCapture::stop_dequeue_thread can actually take
667 // several seconds to complete (blocking on DisableVideoInput);
668 // see if we can maybe do it asynchronously.
670 capture->stop_dequeue_thread();
672 card->parked_capture = move(card->capture);
673 CaptureInterface *fake_capture = new FakeCapture(global_flags.width, global_flags.height, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
674 configure_card(card_index, fake_capture, CardType::FAKE_CAPTURE, card->output.release());
675 card->queue_length_policy.reset(card_index);
676 card->capture->start_bm_capture();
677 desired_output_video_mode = output_video_mode = card->output->pick_video_mode(desired_output_video_mode);
678 card->output->start_output(desired_output_video_mode, pts_int);
680 output_card_index = card_index;
681 output_jitter_history.clear();
686 int unwrap_timecode(uint16_t current_wrapped, int last)
688 uint16_t last_wrapped = last & 0xffff;
689 if (current_wrapped > last_wrapped) {
690 return (last & ~0xffff) | current_wrapped;
692 return 0x10000 + ((last & ~0xffff) | current_wrapped);
696 DeviceSpec card_index_to_device(unsigned card_index, unsigned num_cards)
698 if (card_index >= num_cards) {
699 return DeviceSpec{InputSourceType::FFMPEG_VIDEO_INPUT, card_index - num_cards};
701 return DeviceSpec{InputSourceType::CAPTURE_CARD, card_index};
707 void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
708 FrameAllocator::Frame video_frame, size_t video_offset, VideoFormat video_format,
709 FrameAllocator::Frame audio_frame, size_t audio_offset, AudioFormat audio_format)
711 DeviceSpec device = card_index_to_device(card_index, num_cards);
712 CaptureCard *card = &cards[card_index];
714 ++card->metric_input_received_frames;
715 card->metric_input_has_signal_bool = video_format.has_signal;
716 card->metric_input_is_connected_bool = video_format.is_connected;
717 card->metric_input_interlaced_bool = video_format.interlaced;
718 card->metric_input_width_pixels = video_format.width;
719 card->metric_input_height_pixels = video_format.height;
720 card->metric_input_frame_rate_nom = video_format.frame_rate_nom;
721 card->metric_input_frame_rate_den = video_format.frame_rate_den;
722 card->metric_input_sample_rate_hz = audio_format.sample_rate;
724 if (is_mode_scanning[card_index]) {
725 if (video_format.has_signal) {
726 // Found a stable signal, so stop scanning.
727 is_mode_scanning[card_index] = false;
729 static constexpr double switch_time_s = 0.1; // Should be enough time for the signal to stabilize.
730 steady_clock::time_point now = steady_clock::now();
731 double sec_since_last_switch = duration<double>(steady_clock::now() - last_mode_scan_change[card_index]).count();
732 if (sec_since_last_switch > switch_time_s) {
733 // It isn't this mode; try the next one.
734 mode_scanlist_index[card_index]++;
735 mode_scanlist_index[card_index] %= mode_scanlist[card_index].size();
736 cards[card_index].capture->set_video_mode(mode_scanlist[card_index][mode_scanlist_index[card_index]]);
737 last_mode_scan_change[card_index] = now;
742 int64_t frame_length = int64_t(TIMEBASE) * video_format.frame_rate_den / video_format.frame_rate_nom;
743 assert(frame_length > 0);
745 size_t num_samples = (audio_frame.len > audio_offset) ? (audio_frame.len - audio_offset) / audio_format.num_channels / (audio_format.bits_per_sample / 8) : 0;
746 if (num_samples > OUTPUT_FREQUENCY / 10 && card->type != CardType::FFMPEG_INPUT) {
747 printf("%s: Dropping frame with implausible audio length (len=%d, offset=%d) [timecode=0x%04x video_len=%d video_offset=%d video_format=%x)\n",
748 spec_to_string(device).c_str(), int(audio_frame.len), int(audio_offset),
749 timecode, int(video_frame.len), int(video_offset), video_format.id);
750 if (video_frame.owner) {
751 video_frame.owner->release_frame(video_frame);
753 if (audio_frame.owner) {
754 audio_frame.owner->release_frame(audio_frame);
759 int dropped_frames = 0;
760 if (card->last_timecode != -1) {
761 dropped_frames = unwrap_timecode(timecode, card->last_timecode) - card->last_timecode - 1;
764 // Number of samples per frame if we need to insert silence.
765 // (Could be nonintegral, but resampling will save us then.)
766 const int silence_samples = OUTPUT_FREQUENCY * video_format.frame_rate_den / video_format.frame_rate_nom;
768 if (dropped_frames > MAX_FPS * 2) {
769 fprintf(stderr, "%s lost more than two seconds (or time code jumping around; from 0x%04x to 0x%04x), resetting resampler\n",
770 spec_to_string(device).c_str(), card->last_timecode, timecode);
771 audio_mixer->reset_resampler(device);
773 ++card->metric_input_resets;
774 } else if (dropped_frames > 0) {
775 // Insert silence as needed.
776 fprintf(stderr, "%s dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
777 spec_to_string(device).c_str(), dropped_frames, timecode);
778 card->metric_input_dropped_frames_error += dropped_frames;
782 success = audio_mixer->add_silence(device, silence_samples, dropped_frames, frame_length);
786 if (num_samples > 0) {
787 audio_mixer->add_audio(device, audio_frame.data + audio_offset, num_samples, audio_format, frame_length, audio_frame.received_timestamp);
790 // Done with the audio, so release it.
791 if (audio_frame.owner) {
792 audio_frame.owner->release_frame(audio_frame);
795 card->last_timecode = timecode;
797 PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)video_frame.userdata;
799 size_t cbcr_width, cbcr_height, cbcr_offset, y_offset;
800 size_t expected_length = video_format.stride * (video_format.height + video_format.extra_lines_top + video_format.extra_lines_bottom);
801 if (userdata != nullptr && userdata->pixel_format == PixelFormat_8BitYCbCrPlanar) {
802 // The calculation above is wrong for planar Y'CbCr, so just override it.
803 assert(card->type == CardType::FFMPEG_INPUT);
804 assert(video_offset == 0);
805 expected_length = video_frame.len;
807 userdata->ycbcr_format = (static_cast<FFmpegCapture *>(card->capture.get()))->get_current_frame_ycbcr_format();
808 cbcr_width = video_format.width / userdata->ycbcr_format.chroma_subsampling_x;
809 cbcr_height = video_format.height / userdata->ycbcr_format.chroma_subsampling_y;
810 cbcr_offset = video_format.width * video_format.height;
813 // All the other Y'CbCr formats are 4:2:2.
814 cbcr_width = video_format.width / 2;
815 cbcr_height = video_format.height;
816 cbcr_offset = video_offset / 2;
817 y_offset = video_frame.size / 2 + video_offset / 2;
819 if (video_frame.len - video_offset == 0 ||
820 video_frame.len - video_offset != expected_length) {
821 if (video_frame.len != 0) {
822 printf("%s: Dropping video frame with wrong length (%ld; expected %ld)\n",
823 spec_to_string(device).c_str(), video_frame.len - video_offset, expected_length);
825 if (video_frame.owner) {
826 video_frame.owner->release_frame(video_frame);
829 // Still send on the information that we _had_ a frame, even though it's corrupted,
830 // so that pts can go up accordingly.
832 unique_lock<mutex> lock(card_mutex);
833 CaptureCard::NewFrame new_frame;
834 new_frame.frame = RefCountedFrame(FrameAllocator::Frame());
835 new_frame.length = frame_length;
836 new_frame.interlaced = false;
837 new_frame.dropped_frames = dropped_frames;
838 new_frame.received_timestamp = video_frame.received_timestamp;
839 card->new_frames.push_back(move(new_frame));
840 card->jitter_history.frame_arrived(video_frame.received_timestamp, frame_length, dropped_frames);
842 card->new_frames_changed.notify_all();
846 unsigned num_fields = video_format.interlaced ? 2 : 1;
847 steady_clock::time_point frame_upload_start;
848 bool interlaced_stride = false;
849 if (video_format.interlaced) {
850 // Send the two fields along as separate frames; the other side will need to add
851 // a deinterlacer to actually get this right.
852 assert(video_format.height % 2 == 0);
853 video_format.height /= 2;
855 assert(frame_length % 2 == 0);
858 if (video_format.second_field_start == 1) {
859 interlaced_stride = true;
861 frame_upload_start = steady_clock::now();
863 userdata->last_interlaced = video_format.interlaced;
864 userdata->last_has_signal = video_format.has_signal;
865 userdata->last_is_connected = video_format.is_connected;
866 userdata->last_frame_rate_nom = video_format.frame_rate_nom;
867 userdata->last_frame_rate_den = video_format.frame_rate_den;
868 RefCountedFrame frame(video_frame);
870 // Upload the textures.
871 for (unsigned field = 0; field < num_fields; ++field) {
872 // Put the actual texture upload in a lambda that is executed in the main thread.
873 // It is entirely possible to do this in the same thread (and it might even be
874 // faster, depending on the GPU and driver), but it appears to be trickling
875 // driver bugs very easily.
877 // Note that this means we must hold on to the actual frame data in <userdata>
878 // until the upload command is run, but we hold on to <frame> much longer than that
879 // (in fact, all the way until we no longer use the texture in rendering).
880 auto upload_func = [this, field, video_format, y_offset, video_offset, cbcr_offset, cbcr_width, cbcr_height, interlaced_stride, userdata]() {
881 unsigned field_start_line;
883 field_start_line = video_format.second_field_start;
885 field_start_line = video_format.extra_lines_top;
888 // For anything not FRAME_FORMAT_YCBCR_10BIT, v210_width will be nonsensical but not used.
889 size_t v210_width = video_format.stride / sizeof(uint32_t);
890 ensure_texture_resolution(userdata, field, video_format.width, video_format.height, cbcr_width, cbcr_height, v210_width);
892 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, userdata->pbo);
895 switch (userdata->pixel_format) {
896 case PixelFormat_10BitYCbCr: {
897 size_t field_start = video_offset + video_format.stride * field_start_line;
898 upload_texture(userdata->tex_v210[field], v210_width, video_format.height, video_format.stride, interlaced_stride, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, field_start);
899 v210_converter->convert(userdata->tex_v210[field], userdata->tex_444[field], video_format.width, video_format.height);
902 case PixelFormat_8BitYCbCr: {
903 size_t field_y_start = y_offset + video_format.width * field_start_line;
904 size_t field_cbcr_start = cbcr_offset + cbcr_width * field_start_line * sizeof(uint16_t);
906 // Make up our own strides, since we are interleaving.
907 upload_texture(userdata->tex_y[field], video_format.width, video_format.height, video_format.width, interlaced_stride, GL_RED, GL_UNSIGNED_BYTE, field_y_start);
908 upload_texture(userdata->tex_cbcr[field], cbcr_width, cbcr_height, cbcr_width * sizeof(uint16_t), interlaced_stride, GL_RG, GL_UNSIGNED_BYTE, field_cbcr_start);
911 case PixelFormat_8BitYCbCrPlanar: {
912 assert(field_start_line == 0); // We don't really support interlaced here.
913 size_t field_y_start = y_offset;
914 size_t field_cb_start = cbcr_offset;
915 size_t field_cr_start = cbcr_offset + cbcr_width * cbcr_height;
917 // Make up our own strides, since we are interleaving.
918 upload_texture(userdata->tex_y[field], video_format.width, video_format.height, video_format.width, interlaced_stride, GL_RED, GL_UNSIGNED_BYTE, field_y_start);
919 upload_texture(userdata->tex_cb[field], cbcr_width, cbcr_height, cbcr_width, interlaced_stride, GL_RED, GL_UNSIGNED_BYTE, field_cb_start);
920 upload_texture(userdata->tex_cr[field], cbcr_width, cbcr_height, cbcr_width, interlaced_stride, GL_RED, GL_UNSIGNED_BYTE, field_cr_start);
923 case PixelFormat_8BitBGRA: {
924 size_t field_start = video_offset + video_format.stride * field_start_line;
925 upload_texture(userdata->tex_rgba[field], video_format.width, video_format.height, video_format.stride, interlaced_stride, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, field_start);
926 // These could be asked to deliver mipmaps at any time.
927 glBindTexture(GL_TEXTURE_2D, userdata->tex_rgba[field]);
929 glGenerateMipmap(GL_TEXTURE_2D);
931 glBindTexture(GL_TEXTURE_2D, 0);
939 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
944 // Don't upload the second field as fast as we can; wait until
945 // the field time has approximately passed. (Otherwise, we could
946 // get timing jitter against the other sources, and possibly also
947 // against the video display, although the latter is not as critical.)
948 // This requires our system clock to be reasonably close to the
949 // video clock, but that's not an unreasonable assumption.
950 steady_clock::time_point second_field_start = frame_upload_start +
951 nanoseconds(frame_length * 1000000000 / TIMEBASE);
952 this_thread::sleep_until(second_field_start);
956 unique_lock<mutex> lock(card_mutex);
957 CaptureCard::NewFrame new_frame;
958 new_frame.frame = frame;
959 new_frame.length = frame_length;
960 new_frame.field = field;
961 new_frame.interlaced = video_format.interlaced;
962 new_frame.upload_func = upload_func;
963 new_frame.dropped_frames = dropped_frames;
964 new_frame.received_timestamp = video_frame.received_timestamp; // Ignore the audio timestamp.
965 new_frame.video_format = video_format;
966 new_frame.y_offset = y_offset;
967 new_frame.cbcr_offset = cbcr_offset;
968 card->new_frames.push_back(move(new_frame));
969 card->jitter_history.frame_arrived(video_frame.received_timestamp, frame_length, dropped_frames);
970 card->may_have_dropped_last_frame = false;
972 card->new_frames_changed.notify_all();
976 void Mixer::bm_hotplug_add(libusb_device *dev)
978 lock_guard<mutex> lock(hotplug_mutex);
979 hotplugged_cards.push_back(dev);
982 void Mixer::bm_hotplug_remove(unsigned card_index)
984 cards[card_index].new_frames_changed.notify_all();
987 void Mixer::thread_func()
989 pthread_setname_np(pthread_self(), "Mixer_OpenGL");
991 eglBindAPI(EGL_OPENGL_API);
992 QOpenGLContext *context = create_context(mixer_surface);
993 if (!make_current(context, mixer_surface)) {
998 // Start the actual capture. (We don't want to do it before we're actually ready
999 // to process output frames.)
1000 for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
1001 if (int(card_index) != output_card_index) {
1002 cards[card_index].capture->start_bm_capture();
1006 BasicStats basic_stats(/*verbose=*/true, /*use_opengl=*/true);
1007 int stats_dropped_frames = 0;
1009 while (!should_quit) {
1010 if (desired_output_card_index != output_card_index) {
1011 set_output_card_internal(desired_output_card_index);
1013 if (output_card_index != -1 &&
1014 desired_output_video_mode != output_video_mode) {
1015 DeckLinkOutput *output = cards[output_card_index].output.get();
1016 output->end_output();
1017 desired_output_video_mode = output_video_mode = output->pick_video_mode(desired_output_video_mode);
1018 output->start_output(desired_output_video_mode, pts_int);
1021 CaptureCard::NewFrame new_frames[MAX_VIDEO_CARDS];
1022 bool has_new_frame[MAX_VIDEO_CARDS] = { false };
1024 bool master_card_is_output;
1025 unsigned master_card_index;
1026 if (output_card_index != -1) {
1027 master_card_is_output = true;
1028 master_card_index = output_card_index;
1030 master_card_is_output = false;
1031 master_card_index = theme->map_signal(master_clock_channel);
1032 assert(master_card_index < num_cards + num_video_inputs);
1035 OutputFrameInfo output_frame_info = get_one_frame_from_each_card(master_card_index, master_card_is_output, new_frames, has_new_frame);
1036 schedule_audio_resampling_tasks(output_frame_info.dropped_frames, output_frame_info.num_samples, output_frame_info.frame_duration, output_frame_info.is_preroll, output_frame_info.frame_timestamp);
1037 stats_dropped_frames += output_frame_info.dropped_frames;
1039 handle_hotplugged_cards();
1041 for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
1042 DeviceSpec device = card_index_to_device(card_index, num_cards);
1043 if (card_index == master_card_index || !has_new_frame[card_index]) {
1046 if (new_frames[card_index].frame->len == 0) {
1047 ++new_frames[card_index].dropped_frames;
1049 if (new_frames[card_index].dropped_frames > 0) {
1050 printf("%s dropped %d frames before this\n",
1051 spec_to_string(device).c_str(), int(new_frames[card_index].dropped_frames));
1055 // If the first card is reporting a corrupted or otherwise dropped frame,
1056 // just increase the pts (skipping over this frame) and don't try to compute anything new.
1057 if (!master_card_is_output && new_frames[master_card_index].frame->len == 0) {
1058 ++stats_dropped_frames;
1059 pts_int += new_frames[master_card_index].length;
1063 for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
1064 if (!has_new_frame[card_index] || new_frames[card_index].frame->len == 0)
1067 CaptureCard::NewFrame *new_frame = &new_frames[card_index];
1068 assert(new_frame->frame != nullptr);
1069 insert_new_frame(new_frame->frame, new_frame->field, new_frame->interlaced, card_index, &input_state);
1072 // The new texture might need uploading before use.
1073 if (new_frame->upload_func) {
1074 new_frame->upload_func();
1075 new_frame->upload_func = nullptr;
1078 // Only bother doing MJPEG encoding if there are any connected clients
1079 // that want the stream.
1080 if (httpd.get_num_connected_multicam_clients() > 0) {
1081 auto stream_it = global_flags.card_to_mjpeg_stream_export.find(card_index);
1082 if (stream_it != global_flags.card_to_mjpeg_stream_export.end()) {
1083 mjpeg_encoder->upload_frame(pts_int, stream_it->second, new_frame->frame, new_frame->video_format, new_frame->y_offset, new_frame->cbcr_offset);
1088 int64_t frame_duration = output_frame_info.frame_duration;
1089 render_one_frame(frame_duration);
1091 lock_guard<mutex> lock(frame_num_mutex);
1094 frame_num_updated.notify_all();
1095 pts_int += frame_duration;
1097 basic_stats.update(frame_num, stats_dropped_frames);
1098 // if (frame_num % 100 == 0) chain->print_phase_timing();
1100 if (should_cut.exchange(false)) { // Test and clear.
1101 video_encoder->do_cut(frame_num);
1105 // Reset every 100 frames, so that local variations in frame times
1106 // (especially for the first few frames, when the shaders are
1107 // compiled etc.) don't make it hard to measure for the entire
1108 // remaining duration of the program.
1109 if (frame == 10000) {
1117 resource_pool->clean_context();
1120 bool Mixer::input_card_is_master_clock(unsigned card_index, unsigned master_card_index) const
1122 if (output_card_index != -1) {
1123 // The output card (ie., cards[output_card_index].output) is the master clock,
1124 // so no input card (ie., cards[card_index].capture) is.
1127 return (card_index == master_card_index);
1130 void Mixer::trim_queue(CaptureCard *card, size_t safe_queue_length)
1132 // Count the number of frames in the queue, including any frames
1133 // we dropped. It's hard to know exactly how we should deal with
1134 // dropped (corrupted) input frames; they don't help our goal of
1135 // avoiding starvation, but they still add to the problem of latency.
1136 // Since dropped frames is going to mean a bump in the signal anyway,
1137 // we err on the side of having more stable latency instead.
1138 unsigned queue_length = 0;
1139 for (const CaptureCard::NewFrame &frame : card->new_frames) {
1140 queue_length += frame.dropped_frames + 1;
1143 // If needed, drop frames until the queue is below the safe limit.
1144 // We prefer to drop from the head, because all else being equal,
1145 // we'd like more recent frames (less latency).
1146 unsigned dropped_frames = 0;
1147 while (queue_length > safe_queue_length) {
1148 assert(!card->new_frames.empty());
1149 assert(queue_length > card->new_frames.front().dropped_frames);
1150 queue_length -= card->new_frames.front().dropped_frames;
1152 if (queue_length <= safe_queue_length) {
1153 // No need to drop anything.
1157 card->new_frames.pop_front();
1158 card->new_frames_changed.notify_all();
1162 if (queue_length == 0 && card->is_cef_capture) {
1163 card->may_have_dropped_last_frame = true;
1167 card->metric_input_dropped_frames_jitter += dropped_frames;
1168 card->metric_input_queue_length_frames = queue_length;
1171 if (dropped_frames > 0) {
1172 fprintf(stderr, "Card %u dropped %u frame(s) to keep latency down.\n",
1173 card_index, dropped_frames);
1178 pair<string, string> Mixer::get_channels_json()
1181 for (int channel_idx = 2; channel_idx < theme->get_num_channels(); ++channel_idx) {
1182 Channel *channel = ret.add_channel();
1183 channel->set_index(channel_idx);
1184 channel->set_name(theme->get_channel_name(channel_idx));
1185 channel->set_color(theme->get_channel_color(channel_idx));
1188 google::protobuf::util::MessageToJsonString(ret, &contents); // Ignore any errors.
1189 return make_pair(contents, "text/json");
1192 pair<string, string> Mixer::get_channel_color_http(unsigned channel_idx)
1194 return make_pair(theme->get_channel_color(channel_idx), "text/plain");
1197 Mixer::OutputFrameInfo Mixer::get_one_frame_from_each_card(unsigned master_card_index, bool master_card_is_output, CaptureCard::NewFrame new_frames[MAX_VIDEO_CARDS], bool has_new_frame[MAX_VIDEO_CARDS])
1199 OutputFrameInfo output_frame_info;
1201 unique_lock<mutex> lock(card_mutex, defer_lock);
1202 if (master_card_is_output) {
1203 // Clocked to the output, so wait for it to be ready for the next frame.
1204 cards[master_card_index].output->wait_for_frame(pts_int, &output_frame_info.dropped_frames, &output_frame_info.frame_duration, &output_frame_info.is_preroll, &output_frame_info.frame_timestamp);
1207 // Wait for the master card to have a new frame.
1208 // TODO: Add a timeout.
1209 output_frame_info.is_preroll = false;
1211 cards[master_card_index].new_frames_changed.wait(lock, [this, master_card_index]{ return !cards[master_card_index].new_frames.empty() || cards[master_card_index].capture->get_disconnected(); });
1214 if (master_card_is_output) {
1215 handle_hotplugged_cards();
1216 } else if (cards[master_card_index].new_frames.empty()) {
1217 // We were woken up, but not due to a new frame. Deal with it
1218 // and then restart.
1219 assert(cards[master_card_index].capture->get_disconnected());
1220 handle_hotplugged_cards();
1225 for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
1226 CaptureCard *card = &cards[card_index];
1227 if (card->new_frames.empty()) { // Starvation.
1228 ++card->metric_input_duped_frames;
1230 if (card->is_cef_capture && card->may_have_dropped_last_frame) {
1231 // Unlike other sources, CEF is not guaranteed to send us a steady
1232 // stream of frames, so we'll have to ask it to repaint the frame
1233 // we dropped. (may_have_dropped_last_frame is set whenever we
1234 // trim the queue completely away, and cleared when we actually
1235 // get a new frame.)
1236 ((CEFCapture *)card->capture.get())->request_new_frame();
1240 new_frames[card_index] = move(card->new_frames.front());
1241 has_new_frame[card_index] = true;
1242 card->new_frames.pop_front();
1243 card->new_frames_changed.notify_all();
1247 if (!master_card_is_output) {
1248 output_frame_info.frame_timestamp = new_frames[master_card_index].received_timestamp;
1249 output_frame_info.dropped_frames = new_frames[master_card_index].dropped_frames;
1250 output_frame_info.frame_duration = new_frames[master_card_index].length;
1253 if (!output_frame_info.is_preroll) {
1254 output_jitter_history.frame_arrived(output_frame_info.frame_timestamp, output_frame_info.frame_duration, output_frame_info.dropped_frames);
1257 for (unsigned card_index = 0; card_index < num_cards + num_video_inputs + num_html_inputs; ++card_index) {
1258 CaptureCard *card = &cards[card_index];
1259 if (has_new_frame[card_index] &&
1260 !input_card_is_master_clock(card_index, master_card_index) &&
1261 !output_frame_info.is_preroll) {
1262 card->queue_length_policy.update_policy(
1263 output_frame_info.frame_timestamp,
1264 card->jitter_history.get_expected_next_frame(),
1265 new_frames[master_card_index].length,
1266 output_frame_info.frame_duration,
1267 card->jitter_history.estimate_max_jitter(),
1268 output_jitter_history.estimate_max_jitter());
1269 trim_queue(card, min<int>(global_flags.max_input_queue_frames,
1270 card->queue_length_policy.get_safe_queue_length()));
1274 // This might get off by a fractional sample when changing master card
1275 // between ones with different frame rates, but that's fine.
1276 int num_samples_times_timebase = OUTPUT_FREQUENCY * output_frame_info.frame_duration + fractional_samples;
1277 output_frame_info.num_samples = num_samples_times_timebase / TIMEBASE;
1278 fractional_samples = num_samples_times_timebase % TIMEBASE;
1279 assert(output_frame_info.num_samples >= 0);
1281 return output_frame_info;
1284 void Mixer::handle_hotplugged_cards()
1286 // Check for cards that have been disconnected since last frame.
1287 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
1288 CaptureCard *card = &cards[card_index];
1289 if (card->capture->get_disconnected()) {
1290 fprintf(stderr, "Card %u went away, replacing with a fake card.\n", card_index);
1291 FakeCapture *capture = new FakeCapture(global_flags.width, global_flags.height, FAKE_FPS, OUTPUT_FREQUENCY, card_index, global_flags.fake_cards_audio);
1292 configure_card(card_index, capture, CardType::FAKE_CAPTURE, /*output=*/nullptr);
1293 card->queue_length_policy.reset(card_index);
1294 card->capture->start_bm_capture();
1298 // Check for cards that have been connected since last frame.
1299 vector<libusb_device *> hotplugged_cards_copy;
1301 lock_guard<mutex> lock(hotplug_mutex);
1302 swap(hotplugged_cards, hotplugged_cards_copy);
1304 for (libusb_device *new_dev : hotplugged_cards_copy) {
1305 // Look for a fake capture card where we can stick this in.
1306 int free_card_index = -1;
1307 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
1308 if (cards[card_index].is_fake_capture) {
1309 free_card_index = card_index;
1314 if (free_card_index == -1) {
1315 fprintf(stderr, "New card plugged in, but no free slots -- ignoring.\n");
1316 libusb_unref_device(new_dev);
1318 // BMUSBCapture takes ownership.
1319 fprintf(stderr, "New card plugged in, choosing slot %d.\n", free_card_index);
1320 CaptureCard *card = &cards[free_card_index];
1321 BMUSBCapture *capture = new BMUSBCapture(free_card_index, new_dev);
1322 configure_card(free_card_index, capture, CardType::LIVE_CARD, /*output=*/nullptr);
1323 card->queue_length_policy.reset(free_card_index);
1324 capture->set_card_disconnected_callback(bind(&Mixer::bm_hotplug_remove, this, free_card_index));
1325 capture->start_bm_capture();
1331 void Mixer::schedule_audio_resampling_tasks(unsigned dropped_frames, int num_samples_per_frame, int length_per_frame, bool is_preroll, steady_clock::time_point frame_timestamp)
1333 // Resample the audio as needed, including from previously dropped frames.
1334 assert(num_cards > 0);
1335 for (unsigned frame_num = 0; frame_num < dropped_frames + 1; ++frame_num) {
1336 const bool dropped_frame = (frame_num != dropped_frames);
1338 // Signal to the audio thread to process this frame.
1339 // Note that if the frame is a dropped frame, we signal that
1340 // we don't want to use this frame as base for adjusting
1341 // the resampler rate. The reason for this is that the timing
1342 // of these frames is often way too late; they typically don't
1343 // “arrive” before we synthesize them. Thus, we could end up
1344 // in a situation where we have inserted e.g. five audio frames
1345 // into the queue before we then start pulling five of them
1346 // back out. This makes ResamplingQueue overestimate the delay,
1347 // causing undue resampler changes. (We _do_ use the last,
1348 // non-dropped frame; perhaps we should just discard that as well,
1349 // since dropped frames are expected to be rare, and it might be
1350 // better to just wait until we have a slightly more normal situation).
1351 unique_lock<mutex> lock(audio_mutex);
1352 bool adjust_rate = !dropped_frame && !is_preroll;
1353 audio_task_queue.push(AudioTask{pts_int, num_samples_per_frame, adjust_rate, frame_timestamp});
1354 audio_task_queue_changed.notify_one();
1356 if (dropped_frame) {
1357 // For dropped frames, increase the pts. Note that if the format changed
1358 // in the meantime, we have no way of detecting that; we just have to
1359 // assume the frame length is always the same.
1360 pts_int += length_per_frame;
1365 void Mixer::render_one_frame(int64_t duration)
1367 // Determine the time code for this frame before we start rendering.
1368 string timecode_text = timecode_renderer->get_timecode_text(double(pts_int) / TIMEBASE, frame_num);
1369 if (display_timecode_on_stdout) {
1370 printf("Timecode: '%s'\n", timecode_text.c_str());
1373 // Update Y'CbCr settings for all cards.
1375 unique_lock<mutex> lock(card_mutex);
1376 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
1377 YCbCrInterpretation *interpretation = &ycbcr_interpretation[card_index];
1378 input_state.ycbcr_coefficients_auto[card_index] = interpretation->ycbcr_coefficients_auto;
1379 input_state.ycbcr_coefficients[card_index] = interpretation->ycbcr_coefficients;
1380 input_state.full_range[card_index] = interpretation->full_range;
1384 // Get the main chain from the theme, and set its state immediately.
1385 Theme::Chain theme_main_chain = theme->get_chain(0, pts(), global_flags.width, global_flags.height, input_state);
1386 EffectChain *chain = theme_main_chain.chain;
1387 theme_main_chain.setup_chain();
1388 //theme_main_chain.chain->enable_phase_timing(true);
1390 // If HDMI/SDI output is active and the user has requested auto mode,
1391 // its mode overrides the existing Y'CbCr setting for the chain.
1392 YCbCrLumaCoefficients ycbcr_output_coefficients;
1393 if (global_flags.ycbcr_auto_coefficients && output_card_index != -1) {
1394 ycbcr_output_coefficients = cards[output_card_index].output->preferred_ycbcr_coefficients();
1396 ycbcr_output_coefficients = global_flags.ycbcr_rec709_coefficients ? YCBCR_REC_709 : YCBCR_REC_601;
1399 // TODO: Reduce the duplication against theme.cpp.
1400 YCbCrFormat output_ycbcr_format;
1401 output_ycbcr_format.chroma_subsampling_x = 1;
1402 output_ycbcr_format.chroma_subsampling_y = 1;
1403 output_ycbcr_format.luma_coefficients = ycbcr_output_coefficients;
1404 output_ycbcr_format.full_range = false;
1405 output_ycbcr_format.num_levels = 1 << global_flags.x264_bit_depth;
1406 chain->change_ycbcr_output_format(output_ycbcr_format);
1408 // Render main chain. If we're using zerocopy Quick Sync encoding
1409 // (the default case), we take an extra copy of the created outputs,
1410 // so that we can display it back to the screen later (it's less memory
1411 // bandwidth than writing and reading back an RGBA texture, even at 16-bit).
1412 // Ideally, we'd like to avoid taking copies and just use the main textures
1413 // for display as well, but they're just views into VA-API memory and must be
1414 // unmapped during encoding, so we can't use them for display, unfortunately.
1415 GLuint y_tex, cbcr_full_tex, cbcr_tex;
1416 GLuint y_copy_tex, cbcr_copy_tex = 0;
1417 GLuint y_display_tex, cbcr_display_tex;
1418 GLenum y_type = (global_flags.x264_bit_depth > 8) ? GL_R16 : GL_R8;
1419 GLenum cbcr_type = (global_flags.x264_bit_depth > 8) ? GL_RG16 : GL_RG8;
1420 const bool is_zerocopy = video_encoder->is_zerocopy();
1422 cbcr_full_tex = resource_pool->create_2d_texture(cbcr_type, global_flags.width, global_flags.height);
1423 y_copy_tex = resource_pool->create_2d_texture(y_type, global_flags.width, global_flags.height);
1424 cbcr_copy_tex = resource_pool->create_2d_texture(cbcr_type, global_flags.width / 2, global_flags.height / 2);
1426 y_display_tex = y_copy_tex;
1427 cbcr_display_tex = cbcr_copy_tex;
1429 // y_tex and cbcr_tex will be given by VideoEncoder.
1431 cbcr_full_tex = resource_pool->create_2d_texture(cbcr_type, global_flags.width, global_flags.height);
1432 y_tex = resource_pool->create_2d_texture(y_type, global_flags.width, global_flags.height);
1433 cbcr_tex = resource_pool->create_2d_texture(cbcr_type, global_flags.width / 2, global_flags.height / 2);
1435 y_display_tex = y_tex;
1436 cbcr_display_tex = cbcr_tex;
1439 const int64_t av_delay = lrint(global_flags.audio_queue_length_ms * 0.001 * TIMEBASE); // Corresponds to the delay in ResamplingQueue.
1440 bool got_frame = video_encoder->begin_frame(pts_int + av_delay, duration, ycbcr_output_coefficients, theme_main_chain.input_frames, &y_tex, &cbcr_tex);
1445 fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex, y_copy_tex);
1447 fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex);
1450 chain->render_to_fbo(fbo, global_flags.width, global_flags.height);
1452 if (display_timecode_in_stream) {
1453 // Render the timecode on top.
1454 timecode_renderer->render_timecode(fbo, timecode_text);
1457 resource_pool->release_fbo(fbo);
1460 chroma_subsampler->subsample_chroma(cbcr_full_tex, global_flags.width, global_flags.height, cbcr_tex, cbcr_copy_tex);
1462 chroma_subsampler->subsample_chroma(cbcr_full_tex, global_flags.width, global_flags.height, cbcr_tex);
1464 if (output_card_index != -1) {
1465 cards[output_card_index].output->send_frame(y_tex, cbcr_full_tex, ycbcr_output_coefficients, theme_main_chain.input_frames, pts_int, duration);
1467 resource_pool->release_2d_texture(cbcr_full_tex);
1469 // Set the right state for the Y' and CbCr textures we use for display.
1470 glBindFramebuffer(GL_FRAMEBUFFER, 0);
1471 glBindTexture(GL_TEXTURE_2D, y_display_tex);
1472 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
1473 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
1474 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
1476 glBindTexture(GL_TEXTURE_2D, cbcr_display_tex);
1477 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
1478 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
1479 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
1481 RefCountedGLsync fence = video_encoder->end_frame();
1483 // The live frame pieces the Y'CbCr texture copies back into RGB and displays them.
1484 // It owns y_display_tex and cbcr_display_tex now (whichever textures they are).
1485 DisplayFrame live_frame;
1486 live_frame.chain = display_chain.get();
1487 live_frame.setup_chain = [this, y_display_tex, cbcr_display_tex]{
1488 display_input->set_texture_num(0, y_display_tex);
1489 display_input->set_texture_num(1, cbcr_display_tex);
1491 live_frame.ready_fence = fence;
1492 live_frame.input_frames = {};
1493 live_frame.temp_textures = { y_display_tex, cbcr_display_tex };
1494 output_channel[OUTPUT_LIVE].output_frame(move(live_frame));
1496 // Set up preview and any additional channels.
1497 for (int i = 1; i < theme->get_num_channels() + 2; ++i) {
1498 DisplayFrame display_frame;
1499 Theme::Chain chain = theme->get_chain(i, pts(), global_flags.width, global_flags.height, input_state); // FIXME: dimensions
1500 display_frame.chain = move(chain.chain);
1501 display_frame.setup_chain = move(chain.setup_chain);
1502 display_frame.ready_fence = fence;
1503 display_frame.input_frames = move(chain.input_frames);
1504 display_frame.temp_textures = {};
1505 output_channel[i].output_frame(move(display_frame));
1509 void Mixer::audio_thread_func()
1511 pthread_setname_np(pthread_self(), "Mixer_Audio");
1513 while (!should_quit) {
1517 unique_lock<mutex> lock(audio_mutex);
1518 audio_task_queue_changed.wait(lock, [this]{ return should_quit || !audio_task_queue.empty(); });
1522 task = audio_task_queue.front();
1523 audio_task_queue.pop();
1526 ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy =
1527 task.adjust_rate ? ResamplingQueue::ADJUST_RATE : ResamplingQueue::DO_NOT_ADJUST_RATE;
1528 vector<float> samples_out = audio_mixer->get_output(
1529 task.frame_timestamp,
1531 rate_adjustment_policy);
1533 // Send the samples to the sound card, then add them to the output.
1535 alsa->write(samples_out);
1537 if (output_card_index != -1) {
1538 const int64_t av_delay = lrint(global_flags.audio_queue_length_ms * 0.001 * TIMEBASE); // Corresponds to the delay in ResamplingQueue.
1539 cards[output_card_index].output->send_audio(task.pts_int + av_delay, samples_out);
1541 video_encoder->add_audio(task.pts_int, move(samples_out));
1545 void Mixer::release_display_frame(DisplayFrame *frame)
1547 for (GLuint texnum : frame->temp_textures) {
1548 resource_pool->release_2d_texture(texnum);
1550 frame->temp_textures.clear();
1551 frame->ready_fence.reset();
1552 frame->input_frames.clear();
1557 mixer_thread = thread(&Mixer::thread_func, this);
1558 audio_thread = thread(&Mixer::audio_thread_func, this);
1564 audio_task_queue_changed.notify_one();
1565 mixer_thread.join();
1566 audio_thread.join();
1569 void Mixer::transition_clicked(int transition_num)
1571 theme->transition_clicked(transition_num, pts());
1574 void Mixer::channel_clicked(int preview_num)
1576 theme->channel_clicked(preview_num);
1579 YCbCrInterpretation Mixer::get_input_ycbcr_interpretation(unsigned card_index) const
1581 unique_lock<mutex> lock(card_mutex);
1582 return ycbcr_interpretation[card_index];
1585 void Mixer::set_input_ycbcr_interpretation(unsigned card_index, const YCbCrInterpretation &interpretation)
1587 unique_lock<mutex> lock(card_mutex);
1588 ycbcr_interpretation[card_index] = interpretation;
1591 void Mixer::start_mode_scanning(unsigned card_index)
1593 assert(card_index < num_cards);
1594 if (is_mode_scanning[card_index]) {
1597 is_mode_scanning[card_index] = true;
1598 mode_scanlist[card_index].clear();
1599 for (const auto &mode : cards[card_index].capture->get_available_video_modes()) {
1600 mode_scanlist[card_index].push_back(mode.first);
1602 assert(!mode_scanlist[card_index].empty());
1603 mode_scanlist_index[card_index] = 0;
1604 cards[card_index].capture->set_video_mode(mode_scanlist[card_index][0]);
1605 last_mode_scan_change[card_index] = steady_clock::now();
1608 map<uint32_t, VideoMode> Mixer::get_available_output_video_modes() const
1610 assert(desired_output_card_index != -1);
1611 unique_lock<mutex> lock(card_mutex);
1612 return cards[desired_output_card_index].output->get_available_video_modes();
1615 string Mixer::get_ffmpeg_filename(unsigned card_index) const
1617 assert(card_index >= num_cards && card_index < num_cards + num_video_inputs);
1618 return ((FFmpegCapture *)(cards[card_index].capture.get()))->get_filename();
1621 void Mixer::set_ffmpeg_filename(unsigned card_index, const string &filename) {
1622 assert(card_index >= num_cards && card_index < num_cards + num_video_inputs);
1623 ((FFmpegCapture *)(cards[card_index].capture.get()))->change_filename(filename);
1626 void Mixer::wait_for_next_frame()
1628 unique_lock<mutex> lock(frame_num_mutex);
1629 unsigned old_frame_num = frame_num;
1630 frame_num_updated.wait_for(lock, seconds(1), // Timeout is just in case.
1631 [old_frame_num, this]{ return this->frame_num > old_frame_num; });
1634 Mixer::OutputChannel::~OutputChannel()
1636 if (has_current_frame) {
1637 parent->release_display_frame(¤t_frame);
1639 if (has_ready_frame) {
1640 parent->release_display_frame(&ready_frame);
1644 void Mixer::OutputChannel::output_frame(DisplayFrame &&frame)
1646 // Store this frame for display. Remove the ready frame if any
1647 // (it was seemingly never used).
1649 unique_lock<mutex> lock(frame_mutex);
1650 if (has_ready_frame) {
1651 parent->release_display_frame(&ready_frame);
1653 ready_frame = move(frame);
1654 has_ready_frame = true;
1656 // Call the callbacks under the mutex (they should be short),
1657 // so that we don't race against a callback removal.
1658 for (const auto &key_and_callback : new_frame_ready_callbacks) {
1659 key_and_callback.second();
1663 // Reduce the number of callbacks by filtering duplicates. The reason
1664 // why we bother doing this is that Qt seemingly can get into a state
1665 // where its builds up an essentially unbounded queue of signals,
1666 // consuming more and more memory, and there's no good way of collapsing
1667 // user-defined signals or limiting the length of the queue.
1668 if (transition_names_updated_callback) {
1669 vector<string> transition_names = global_mixer->get_transition_names();
1670 bool changed = false;
1671 if (transition_names.size() != last_transition_names.size()) {
1674 for (unsigned i = 0; i < transition_names.size(); ++i) {
1675 if (transition_names[i] != last_transition_names[i]) {
1682 transition_names_updated_callback(transition_names);
1683 last_transition_names = transition_names;
1686 if (name_updated_callback) {
1687 string name = global_mixer->get_channel_name(channel);
1688 if (name != last_name) {
1689 name_updated_callback(name);
1693 if (color_updated_callback) {
1694 string color = global_mixer->get_channel_color(channel);
1695 if (color != last_color) {
1696 color_updated_callback(color);
1702 bool Mixer::OutputChannel::get_display_frame(DisplayFrame *frame)
1704 unique_lock<mutex> lock(frame_mutex);
1705 if (!has_current_frame && !has_ready_frame) {
1709 if (has_current_frame && has_ready_frame) {
1710 // We have a new ready frame. Toss the current one.
1711 parent->release_display_frame(¤t_frame);
1712 has_current_frame = false;
1714 if (has_ready_frame) {
1715 assert(!has_current_frame);
1716 current_frame = move(ready_frame);
1717 ready_frame.ready_fence.reset(); // Drop the refcount.
1718 ready_frame.input_frames.clear(); // Drop the refcounts.
1719 has_current_frame = true;
1720 has_ready_frame = false;
1723 *frame = current_frame;
1727 void Mixer::OutputChannel::add_frame_ready_callback(void *key, Mixer::new_frame_ready_callback_t callback)
1729 unique_lock<mutex> lock(frame_mutex);
1730 new_frame_ready_callbacks[key] = callback;
1733 void Mixer::OutputChannel::remove_frame_ready_callback(void *key)
1735 unique_lock<mutex> lock(frame_mutex);
1736 new_frame_ready_callbacks.erase(key);
1739 void Mixer::OutputChannel::set_transition_names_updated_callback(Mixer::transition_names_updated_callback_t callback)
1741 transition_names_updated_callback = callback;
1744 void Mixer::OutputChannel::set_name_updated_callback(Mixer::name_updated_callback_t callback)
1746 name_updated_callback = callback;
1749 void Mixer::OutputChannel::set_color_updated_callback(Mixer::color_updated_callback_t callback)
1751 color_updated_callback = callback;
1754 mutex RefCountedGLsync::fence_lock;