7 #include <movit/effect_chain.h>
8 #include <movit/effect_util.h>
9 #include <movit/flat_input.h>
10 #include <movit/image_format.h>
11 #include <movit/init.h>
12 #include <movit/resource_pool.h>
13 #include <movit/util.h>
21 #include <condition_variable>
29 #include <arpa/inet.h>
31 #include <sys/resource.h>
33 #include "bmusb/bmusb.h"
35 #include "decklink_capture.h"
37 #include "fake_capture.h"
39 #include "video_encoder.h"
40 #include "pbo_frame_allocator.h"
41 #include "ref_counted_gl_sync.h"
46 using namespace movit;
48 using namespace std::placeholders;
50 Mixer *global_mixer = nullptr;
51 bool uses_mlock = false;
55 void convert_fixed24_to_fp32(float *dst, size_t out_channels, const uint8_t *src, size_t in_channels, size_t num_samples)
57 assert(in_channels >= out_channels);
58 for (size_t i = 0; i < num_samples; ++i) {
59 for (size_t j = 0; j < out_channels; ++j) {
63 uint32_t s = s1 | (s1 << 8) | (s2 << 16) | (s3 << 24);
64 dst[i * out_channels + j] = int(s) * (1.0f / 4294967296.0f);
66 src += 3 * (in_channels - out_channels);
70 void convert_fixed32_to_fp32(float *dst, size_t out_channels, const uint8_t *src, size_t in_channels, size_t num_samples)
72 assert(in_channels >= out_channels);
73 for (size_t i = 0; i < num_samples; ++i) {
74 for (size_t j = 0; j < out_channels; ++j) {
75 // Note: Assumes little-endian.
76 int32_t s = *(int32_t *)src;
77 dst[i * out_channels + j] = s * (1.0f / 4294967296.0f);
80 src += 4 * (in_channels - out_channels);
84 void insert_new_frame(RefCountedFrame frame, unsigned field_num, bool interlaced, unsigned card_index, InputState *input_state)
87 for (unsigned frame_num = FRAME_HISTORY_LENGTH; frame_num --> 1; ) { // :-)
88 input_state->buffered_frames[card_index][frame_num] =
89 input_state->buffered_frames[card_index][frame_num - 1];
91 input_state->buffered_frames[card_index][0] = { frame, field_num };
93 for (unsigned frame_num = 0; frame_num < FRAME_HISTORY_LENGTH; ++frame_num) {
94 input_state->buffered_frames[card_index][frame_num] = { frame, field_num };
101 void QueueLengthPolicy::update_policy(int queue_length)
103 if (queue_length < 0) { // Starvation.
104 if (been_at_safe_point_since_last_starvation && safe_queue_length < 5) {
106 fprintf(stderr, "Card %u: Starvation, increasing safe limit to %u frames\n",
107 card_index, safe_queue_length);
109 frames_with_at_least_one = 0;
110 been_at_safe_point_since_last_starvation = false;
113 if (queue_length > 0) {
114 if (queue_length >= int(safe_queue_length)) {
115 been_at_safe_point_since_last_starvation = true;
117 if (++frames_with_at_least_one >= 1000 && safe_queue_length > 0) {
119 fprintf(stderr, "Card %u: Spare frames for more than 1000 frames, reducing safe limit to %u frames\n",
120 card_index, safe_queue_length);
121 frames_with_at_least_one = 0;
124 frames_with_at_least_one = 0;
128 Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
130 num_cards(num_cards),
131 mixer_surface(create_surface(format)),
132 h264_encoder_surface(create_surface(format)),
133 correlation(OUTPUT_FREQUENCY),
134 level_compressor(OUTPUT_FREQUENCY),
135 limiter(OUTPUT_FREQUENCY),
136 compressor(OUTPUT_FREQUENCY)
138 CHECK(init_movit(MOVIT_SHADER_DIR, MOVIT_DEBUG_OFF));
141 // Since we allow non-bouncing 4:2:2 YCbCrInputs, effective subpixel precision
142 // will be halved when sampling them, and we need to compensate here.
143 movit_texel_subpixel_precision /= 2.0;
145 resource_pool.reset(new ResourcePool);
146 theme.reset(new Theme(global_flags.theme_filename.c_str(), resource_pool.get(), num_cards));
147 for (unsigned i = 0; i < NUM_OUTPUTS; ++i) {
148 output_channel[i].parent = this;
149 output_channel[i].channel = i;
152 ImageFormat inout_format;
153 inout_format.color_space = COLORSPACE_sRGB;
154 inout_format.gamma_curve = GAMMA_sRGB;
156 // Display chain; shows the live output produced by the main chain (its RGBA version).
157 display_chain.reset(new EffectChain(WIDTH, HEIGHT, resource_pool.get()));
159 display_input = new FlatInput(inout_format, FORMAT_RGB, GL_UNSIGNED_BYTE, WIDTH, HEIGHT); // FIXME: GL_UNSIGNED_BYTE is really wrong.
160 display_chain->add_input(display_input);
161 display_chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
162 display_chain->set_dither_bits(0); // Don't bother.
163 display_chain->finalize();
165 video_encoder.reset(new VideoEncoder(resource_pool.get(), h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd));
167 // Start listening for clients only once VideoEncoder has written its header, if any.
170 // First try initializing the fake devices, then PCI devices, then USB,
171 // until we have the desired number of cards.
172 unsigned num_pci_devices = 0, num_usb_devices = 0;
173 unsigned card_index = 0;
175 assert(global_flags.num_fake_cards >= 0); // Enforced in flags.cpp.
176 unsigned num_fake_cards = global_flags.num_fake_cards;
178 assert(num_fake_cards <= num_cards); // Enforced in flags.cpp.
179 for ( ; card_index < num_fake_cards; ++card_index) {
180 configure_card(card_index, format, new FakeCapture(card_index));
183 if (global_flags.num_fake_cards > 0) {
184 fprintf(stderr, "Initialized %d fake cards.\n", global_flags.num_fake_cards);
187 if (card_index < num_cards) {
188 IDeckLinkIterator *decklink_iterator = CreateDeckLinkIteratorInstance();
189 if (decklink_iterator != nullptr) {
190 for ( ; card_index < num_cards; ++card_index) {
192 if (decklink_iterator->Next(&decklink) != S_OK) {
196 configure_card(card_index, format, new DeckLinkCapture(decklink, card_index - num_fake_cards));
199 decklink_iterator->Release();
200 fprintf(stderr, "Found %d DeckLink PCI card(s).\n", num_pci_devices);
202 fprintf(stderr, "DeckLink drivers not found. Probing for USB cards only.\n");
205 for ( ; card_index < num_cards; ++card_index) {
206 configure_card(card_index, format, new BMUSBCapture(card_index - num_pci_devices - num_fake_cards));
210 if (num_usb_devices > 0) {
211 has_bmusb_thread = true;
212 BMUSBCapture::start_bm_thread();
215 for (card_index = 0; card_index < num_cards; ++card_index) {
216 cards[card_index].queue_length_policy.reset(card_index);
217 cards[card_index].capture->start_bm_capture();
220 // Set up stuff for NV12 conversion.
223 string cbcr_vert_shader =
226 "in vec2 position; \n"
227 "in vec2 texcoord; \n"
229 "uniform vec2 foo_chroma_offset_0; \n"
233 " // The result of glOrtho(0.0, 1.0, 0.0, 1.0, 0.0, 1.0) is: \n"
235 " // 2.000 0.000 0.000 -1.000 \n"
236 " // 0.000 2.000 0.000 -1.000 \n"
237 " // 0.000 0.000 -2.000 -1.000 \n"
238 " // 0.000 0.000 0.000 1.000 \n"
239 " gl_Position = vec4(2.0 * position.x - 1.0, 2.0 * position.y - 1.0, -1.0, 1.0); \n"
240 " vec2 flipped_tc = texcoord; \n"
241 " tc0 = flipped_tc + foo_chroma_offset_0; \n"
243 string cbcr_frag_shader =
246 "uniform sampler2D cbcr_tex; \n"
247 "out vec4 FragColor; \n"
249 " FragColor = texture(cbcr_tex, tc0); \n"
251 vector<string> frag_shader_outputs;
252 cbcr_program_num = resource_pool->compile_glsl_program(cbcr_vert_shader, cbcr_frag_shader, frag_shader_outputs);
259 cbcr_vbo = generate_vbo(2, GL_FLOAT, sizeof(vertices), vertices);
260 cbcr_position_attribute_index = glGetAttribLocation(cbcr_program_num, "position");
261 cbcr_texcoord_attribute_index = glGetAttribLocation(cbcr_program_num, "texcoord");
263 r128.init(2, OUTPUT_FREQUENCY);
266 locut.init(FILTER_HPF, 2);
268 // If --flat-audio is given, turn off everything that messes with the sound,
269 // except the final makeup gain.
270 if (global_flags.flat_audio) {
271 set_locut_enabled(false);
272 set_gain_staging_auto(false);
273 set_limiter_enabled(false);
274 set_compressor_enabled(false);
277 // hlen=16 is pretty low quality, but we use quite a bit of CPU otherwise,
278 // and there's a limit to how important the peak meter is.
279 peak_resampler.setup(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY * 4, /*num_channels=*/2, /*hlen=*/16, /*frel=*/1.0);
281 if (global_flags.enable_alsa_output) {
282 alsa.reset(new ALSAOutput(OUTPUT_FREQUENCY, /*num_channels=*/2));
288 resource_pool->release_glsl_program(cbcr_program_num);
289 glDeleteBuffers(1, &cbcr_vbo);
290 if (has_bmusb_thread) {
291 BMUSBCapture::stop_bm_thread();
294 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
296 unique_lock<mutex> lock(bmusb_mutex);
297 cards[card_index].should_quit = true; // Unblock thread.
298 cards[card_index].new_frames_changed.notify_all();
300 cards[card_index].capture->stop_dequeue_thread();
303 video_encoder.reset(nullptr);
306 void Mixer::configure_card(unsigned card_index, const QSurfaceFormat &format, CaptureInterface *capture)
308 printf("Configuring card %d...\n", card_index);
310 CaptureCard *card = &cards[card_index];
311 card->capture = capture;
312 card->capture->set_frame_callback(bind(&Mixer::bm_frame, this, card_index, _1, _2, _3, _4, _5, _6, _7));
313 card->frame_allocator.reset(new PBOFrameAllocator(8 << 20, WIDTH, HEIGHT)); // 8 MB.
314 card->capture->set_video_frame_allocator(card->frame_allocator.get());
315 card->surface = create_surface(format);
316 card->resampling_queue.reset(new ResamplingQueue(card_index, OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
317 card->capture->configure_card();
323 int unwrap_timecode(uint16_t current_wrapped, int last)
325 uint16_t last_wrapped = last & 0xffff;
326 if (current_wrapped > last_wrapped) {
327 return (last & ~0xffff) | current_wrapped;
329 return 0x10000 + ((last & ~0xffff) | current_wrapped);
333 float find_peak(const float *samples, size_t num_samples)
335 float m = fabs(samples[0]);
336 for (size_t i = 1; i < num_samples; ++i) {
337 m = max(m, fabs(samples[i]));
342 void deinterleave_samples(const vector<float> &in, vector<float> *out_l, vector<float> *out_r)
344 size_t num_samples = in.size() / 2;
345 out_l->resize(num_samples);
346 out_r->resize(num_samples);
348 const float *inptr = in.data();
349 float *lptr = &(*out_l)[0];
350 float *rptr = &(*out_r)[0];
351 for (size_t i = 0; i < num_samples; ++i) {
359 void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
360 FrameAllocator::Frame video_frame, size_t video_offset, VideoFormat video_format,
361 FrameAllocator::Frame audio_frame, size_t audio_offset, AudioFormat audio_format)
363 CaptureCard *card = &cards[card_index];
365 if (is_mode_scanning[card_index]) {
366 if (video_format.has_signal) {
367 // Found a stable signal, so stop scanning.
368 is_mode_scanning[card_index] = false;
370 static constexpr double switch_time_s = 0.5; // Should be enough time for the signal to stabilize.
372 clock_gettime(CLOCK_MONOTONIC, &now);
373 double sec_since_last_switch = (now.tv_sec - last_mode_scan_change[card_index].tv_sec) +
374 1e-9 * (now.tv_nsec - last_mode_scan_change[card_index].tv_nsec);
375 if (sec_since_last_switch > switch_time_s) {
376 // It isn't this mode; try the next one.
377 mode_scanlist_index[card_index]++;
378 mode_scanlist_index[card_index] %= mode_scanlist[card_index].size();
379 cards[card_index].capture->set_video_mode(mode_scanlist[card_index][mode_scanlist_index[card_index]]);
380 last_mode_scan_change[card_index] = now;
385 int64_t frame_length = int64_t(TIMEBASE) * video_format.frame_rate_den / video_format.frame_rate_nom;
386 assert(frame_length > 0);
388 size_t num_samples = (audio_frame.len > audio_offset) ? (audio_frame.len - audio_offset) / audio_format.num_channels / (audio_format.bits_per_sample / 8) : 0;
389 if (num_samples > OUTPUT_FREQUENCY / 10) {
390 printf("Card %d: Dropping frame with implausible audio length (len=%d, offset=%d) [timecode=0x%04x video_len=%d video_offset=%d video_format=%x)\n",
391 card_index, int(audio_frame.len), int(audio_offset),
392 timecode, int(video_frame.len), int(video_offset), video_format.id);
393 if (video_frame.owner) {
394 video_frame.owner->release_frame(video_frame);
396 if (audio_frame.owner) {
397 audio_frame.owner->release_frame(audio_frame);
402 int64_t local_pts = card->next_local_pts;
403 int dropped_frames = 0;
404 if (card->last_timecode != -1) {
405 dropped_frames = unwrap_timecode(timecode, card->last_timecode) - card->last_timecode - 1;
408 // Convert the audio to stereo fp32 and add it.
410 audio.resize(num_samples * 2);
411 switch (audio_format.bits_per_sample) {
413 assert(num_samples == 0);
416 convert_fixed24_to_fp32(&audio[0], 2, audio_frame.data + audio_offset, audio_format.num_channels, num_samples);
419 convert_fixed32_to_fp32(&audio[0], 2, audio_frame.data + audio_offset, audio_format.num_channels, num_samples);
422 fprintf(stderr, "Cannot handle audio with %u bits per sample\n", audio_format.bits_per_sample);
428 unique_lock<mutex> lock(card->audio_mutex);
430 // Number of samples per frame if we need to insert silence.
431 // (Could be nonintegral, but resampling will save us then.)
432 int silence_samples = OUTPUT_FREQUENCY * video_format.frame_rate_den / video_format.frame_rate_nom;
434 if (dropped_frames > MAX_FPS * 2) {
435 fprintf(stderr, "Card %d lost more than two seconds (or time code jumping around; from 0x%04x to 0x%04x), resetting resampler\n",
436 card_index, card->last_timecode, timecode);
437 card->resampling_queue.reset(new ResamplingQueue(card_index, OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
439 } else if (dropped_frames > 0) {
440 // Insert silence as needed.
441 fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
442 card_index, dropped_frames, timecode);
443 vector<float> silence(silence_samples * 2, 0.0f);
444 for (int i = 0; i < dropped_frames; ++i) {
445 card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), silence.data(), silence_samples);
446 // Note that if the format changed in the meantime, we have
447 // no way of detecting that; we just have to assume the frame length
448 // is always the same.
449 local_pts += frame_length;
452 if (num_samples == 0) {
453 audio.resize(silence_samples * 2);
454 num_samples = silence_samples;
456 card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), audio.data(), num_samples);
457 card->next_local_pts = local_pts + frame_length;
460 card->last_timecode = timecode;
462 // Done with the audio, so release it.
463 if (audio_frame.owner) {
464 audio_frame.owner->release_frame(audio_frame);
467 size_t expected_length = video_format.width * (video_format.height + video_format.extra_lines_top + video_format.extra_lines_bottom) * 2;
468 if (video_frame.len - video_offset == 0 ||
469 video_frame.len - video_offset != expected_length) {
470 if (video_frame.len != 0) {
471 printf("Card %d: Dropping video frame with wrong length (%ld; expected %ld)\n",
472 card_index, video_frame.len - video_offset, expected_length);
474 if (video_frame.owner) {
475 video_frame.owner->release_frame(video_frame);
478 // Still send on the information that we _had_ a frame, even though it's corrupted,
479 // so that pts can go up accordingly.
481 unique_lock<mutex> lock(bmusb_mutex);
482 CaptureCard::NewFrame new_frame;
483 new_frame.frame = RefCountedFrame(FrameAllocator::Frame());
484 new_frame.length = frame_length;
485 new_frame.interlaced = false;
486 new_frame.dropped_frames = dropped_frames;
487 card->new_frames.push(move(new_frame));
488 card->new_frames_changed.notify_all();
493 PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)video_frame.userdata;
495 unsigned num_fields = video_format.interlaced ? 2 : 1;
496 timespec frame_upload_start;
497 if (video_format.interlaced) {
498 // Send the two fields along as separate frames; the other side will need to add
499 // a deinterlacer to actually get this right.
500 assert(video_format.height % 2 == 0);
501 video_format.height /= 2;
502 assert(frame_length % 2 == 0);
505 clock_gettime(CLOCK_MONOTONIC, &frame_upload_start);
507 userdata->last_interlaced = video_format.interlaced;
508 userdata->last_has_signal = video_format.has_signal;
509 userdata->last_frame_rate_nom = video_format.frame_rate_nom;
510 userdata->last_frame_rate_den = video_format.frame_rate_den;
511 RefCountedFrame frame(video_frame);
513 // Upload the textures.
514 size_t cbcr_width = video_format.width / 2;
515 size_t cbcr_offset = video_offset / 2;
516 size_t y_offset = video_frame.size / 2 + video_offset / 2;
518 for (unsigned field = 0; field < num_fields; ++field) {
519 // Put the actual texture upload in a lambda that is executed in the main thread.
520 // It is entirely possible to do this in the same thread (and it might even be
521 // faster, depending on the GPU and driver), but it appears to be trickling
522 // driver bugs very easily.
524 // Note that this means we must hold on to the actual frame data in <userdata>
525 // until the upload command is run, but we hold on to <frame> much longer than that
526 // (in fact, all the way until we no longer use the texture in rendering).
527 auto upload_func = [field, video_format, y_offset, cbcr_offset, cbcr_width, userdata]() {
528 unsigned field_start_line = (field == 1) ? video_format.second_field_start : video_format.extra_lines_top + field * (video_format.height + 22);
530 if (userdata->tex_y[field] == 0 ||
531 userdata->tex_cbcr[field] == 0 ||
532 video_format.width != userdata->last_width[field] ||
533 video_format.height != userdata->last_height[field]) {
534 // We changed resolution since last use of this texture, so we need to create
535 // a new object. Note that this each card has its own PBOFrameAllocator,
536 // we don't need to worry about these flip-flopping between resolutions.
537 glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]);
539 glTexImage2D(GL_TEXTURE_2D, 0, GL_RG8, cbcr_width, video_format.height, 0, GL_RG, GL_UNSIGNED_BYTE, nullptr);
541 glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]);
543 glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, video_format.width, video_format.height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
545 userdata->last_width[field] = video_format.width;
546 userdata->last_height[field] = video_format.height;
549 GLuint pbo = userdata->pbo;
551 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
554 size_t field_y_start = y_offset + video_format.width * field_start_line;
555 size_t field_cbcr_start = cbcr_offset + cbcr_width * field_start_line * sizeof(uint16_t);
557 if (global_flags.flush_pbos) {
558 glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, field_y_start, video_format.width * video_format.height);
560 glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, field_cbcr_start, cbcr_width * video_format.height * sizeof(uint16_t));
564 glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]);
566 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, cbcr_width, video_format.height, GL_RG, GL_UNSIGNED_BYTE, BUFFER_OFFSET(field_cbcr_start));
568 glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]);
570 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, video_format.width, video_format.height, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(field_y_start));
572 glBindTexture(GL_TEXTURE_2D, 0);
574 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
579 // Don't upload the second field as fast as we can; wait until
580 // the field time has approximately passed. (Otherwise, we could
581 // get timing jitter against the other sources, and possibly also
582 // against the video display, although the latter is not as critical.)
583 // This requires our system clock to be reasonably close to the
584 // video clock, but that's not an unreasonable assumption.
585 timespec second_field_start;
586 second_field_start.tv_nsec = frame_upload_start.tv_nsec +
587 frame_length * 1000000000 / TIMEBASE;
588 second_field_start.tv_sec = frame_upload_start.tv_sec +
589 second_field_start.tv_nsec / 1000000000;
590 second_field_start.tv_nsec %= 1000000000;
592 while (clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME,
593 &second_field_start, nullptr) == -1 &&
598 unique_lock<mutex> lock(bmusb_mutex);
599 CaptureCard::NewFrame new_frame;
600 new_frame.frame = frame;
601 new_frame.length = frame_length;
602 new_frame.field = field;
603 new_frame.interlaced = video_format.interlaced;
604 new_frame.upload_func = upload_func;
605 new_frame.dropped_frames = dropped_frames;
606 card->new_frames.push(move(new_frame));
607 card->new_frames_changed.notify_all();
612 void Mixer::thread_func()
614 eglBindAPI(EGL_OPENGL_API);
615 QOpenGLContext *context = create_context(mixer_surface);
616 if (!make_current(context, mixer_surface)) {
621 struct timespec start, now;
622 clock_gettime(CLOCK_MONOTONIC, &start);
625 int stats_dropped_frames = 0;
627 while (!should_quit) {
628 CaptureCard::NewFrame new_frames[MAX_CARDS];
629 bool has_new_frame[MAX_CARDS] = { false };
630 int num_samples[MAX_CARDS] = { 0 };
632 // TODO: Add a timeout.
633 unsigned master_card_index = theme->map_signal(master_clock_channel);
634 assert(master_card_index < num_cards);
636 get_one_frame_from_each_card(master_card_index, new_frames, has_new_frame, num_samples);
637 schedule_audio_resampling_tasks(new_frames[master_card_index].dropped_frames, num_samples[master_card_index], new_frames[master_card_index].length);
638 stats_dropped_frames += new_frames[master_card_index].dropped_frames;
639 send_audio_level_callback();
641 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
642 if (card_index == master_card_index || !has_new_frame[card_index]) {
645 if (new_frames[card_index].frame->len == 0) {
646 ++new_frames[card_index].dropped_frames;
648 if (new_frames[card_index].dropped_frames > 0) {
649 printf("Card %u dropped %d frames before this\n",
650 card_index, int(new_frames[card_index].dropped_frames));
654 // If the first card is reporting a corrupted or otherwise dropped frame,
655 // just increase the pts (skipping over this frame) and don't try to compute anything new.
656 if (new_frames[master_card_index].frame->len == 0) {
657 ++stats_dropped_frames;
658 pts_int += new_frames[master_card_index].length;
662 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
663 if (!has_new_frame[card_index] || new_frames[card_index].frame->len == 0)
666 CaptureCard::NewFrame *new_frame = &new_frames[card_index];
667 assert(new_frame->frame != nullptr);
668 insert_new_frame(new_frame->frame, new_frame->field, new_frame->interlaced, card_index, &input_state);
671 // The new texture might need uploading before use.
672 if (new_frame->upload_func) {
673 new_frame->upload_func();
674 new_frame->upload_func = nullptr;
678 int64_t duration = new_frames[master_card_index].length;
679 render_one_frame(duration);
683 clock_gettime(CLOCK_MONOTONIC, &now);
684 double elapsed = now.tv_sec - start.tv_sec +
685 1e-9 * (now.tv_nsec - start.tv_nsec);
686 if (frame % 100 == 0) {
687 // check our memory usage, to see if we are close to our mlockall()
688 // limit (if at all set).
690 if (getrusage(RUSAGE_SELF, &used) == -1) {
691 perror("getrusage(RUSAGE_SELF)");
696 if (getrlimit(RLIMIT_MEMLOCK, &limit) == -1) {
697 perror("getrlimit(RLIMIT_MEMLOCK)");
701 printf("%d frames (%d dropped) in %.3f seconds = %.1f fps (%.1f ms/frame)",
702 frame, stats_dropped_frames, elapsed, frame / elapsed,
703 1e3 * elapsed / frame);
704 // chain->print_phase_timing();
707 // Check our memory usage, to see if we are close to our mlockall()
708 // limit (if at all set).
710 if (getrusage(RUSAGE_SELF, &used) == -1) {
711 perror("getrusage(RUSAGE_SELF)");
716 if (getrlimit(RLIMIT_MEMLOCK, &limit) == -1) {
717 perror("getrlimit(RLIMIT_MEMLOCK)");
721 printf(", using %ld / %ld MB lockable memory (%.1f%%)",
722 long(used.ru_maxrss / 1024),
723 long(limit.rlim_cur / 1048576),
724 float(100.0 * (used.ru_maxrss * 1024.0) / limit.rlim_cur));
731 if (should_cut.exchange(false)) { // Test and clear.
732 video_encoder->do_cut(frame);
736 // Reset every 100 frames, so that local variations in frame times
737 // (especially for the first few frames, when the shaders are
738 // compiled etc.) don't make it hard to measure for the entire
739 // remaining duration of the program.
740 if (frame == 10000) {
748 resource_pool->clean_context();
751 void Mixer::get_one_frame_from_each_card(unsigned master_card_index, CaptureCard::NewFrame new_frames[MAX_CARDS], bool has_new_frame[MAX_CARDS], int num_samples[MAX_CARDS])
753 // The first card is the master timer, so wait for it to have a new frame.
754 unique_lock<mutex> lock(bmusb_mutex);
755 cards[master_card_index].new_frames_changed.wait(lock, [this, master_card_index]{ return !cards[master_card_index].new_frames.empty(); });
757 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
758 CaptureCard *card = &cards[card_index];
759 if (card->new_frames.empty()) {
760 assert(card_index != master_card_index);
761 card->queue_length_policy.update_policy(-1);
764 new_frames[card_index] = move(card->new_frames.front());
765 has_new_frame[card_index] = true;
766 card->new_frames.pop();
767 card->new_frames_changed.notify_all();
769 int num_samples_times_timebase = OUTPUT_FREQUENCY * new_frames[card_index].length + card->fractional_samples;
770 num_samples[card_index] = num_samples_times_timebase / TIMEBASE;
771 card->fractional_samples = num_samples_times_timebase % TIMEBASE;
772 assert(num_samples[card_index] >= 0);
774 if (card_index == master_card_index) {
775 // We don't use the queue length policy for the master card,
776 // but we will if it stops being the master. Thus, clear out
777 // the policy in case we switch in the future.
778 card->queue_length_policy.reset(card_index);
780 // If we have excess frames compared to the policy for this card,
781 // drop frames from the head.
782 card->queue_length_policy.update_policy(card->new_frames.size());
783 while (card->new_frames.size() > card->queue_length_policy.get_safe_queue_length()) {
784 card->new_frames.pop();
790 void Mixer::schedule_audio_resampling_tasks(unsigned dropped_frames, int num_samples_per_frame, int length_per_frame)
792 // Resample the audio as needed, including from previously dropped frames.
793 assert(num_cards > 0);
794 for (unsigned frame_num = 0; frame_num < dropped_frames + 1; ++frame_num) {
796 // Signal to the audio thread to process this frame.
797 unique_lock<mutex> lock(audio_mutex);
798 audio_task_queue.push(AudioTask{pts_int, num_samples_per_frame});
799 audio_task_queue_changed.notify_one();
801 if (frame_num != dropped_frames) {
802 // For dropped frames, increase the pts. Note that if the format changed
803 // in the meantime, we have no way of detecting that; we just have to
804 // assume the frame length is always the same.
805 pts_int += length_per_frame;
810 void Mixer::render_one_frame(int64_t duration)
812 // Get the main chain from the theme, and set its state immediately.
813 Theme::Chain theme_main_chain = theme->get_chain(0, pts(), WIDTH, HEIGHT, input_state);
814 EffectChain *chain = theme_main_chain.chain;
815 theme_main_chain.setup_chain();
816 //theme_main_chain.chain->enable_phase_timing(true);
818 GLuint y_tex, cbcr_tex;
819 bool got_frame = video_encoder->begin_frame(&y_tex, &cbcr_tex);
822 // Render main chain.
823 GLuint cbcr_full_tex = resource_pool->create_2d_texture(GL_RG8, WIDTH, HEIGHT);
824 GLuint rgba_tex = resource_pool->create_2d_texture(GL_RGB565, WIDTH, HEIGHT); // Saves texture bandwidth, although dithering gets messed up.
825 GLuint fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex, rgba_tex);
827 chain->render_to_fbo(fbo, WIDTH, HEIGHT);
828 resource_pool->release_fbo(fbo);
830 subsample_chroma(cbcr_full_tex, cbcr_tex);
831 resource_pool->release_2d_texture(cbcr_full_tex);
833 // Set the right state for rgba_tex.
834 glBindFramebuffer(GL_FRAMEBUFFER, 0);
835 glBindTexture(GL_TEXTURE_2D, rgba_tex);
836 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
837 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
838 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
840 const int64_t av_delay = TIMEBASE / 10; // Corresponds to the fixed delay in resampling_queue.h. TODO: Make less hard-coded.
841 RefCountedGLsync fence = video_encoder->end_frame(pts_int + av_delay, duration, theme_main_chain.input_frames);
843 // The live frame just shows the RGBA texture we just rendered.
844 // It owns rgba_tex now.
845 DisplayFrame live_frame;
846 live_frame.chain = display_chain.get();
847 live_frame.setup_chain = [this, rgba_tex]{
848 display_input->set_texture_num(rgba_tex);
850 live_frame.ready_fence = fence;
851 live_frame.input_frames = {};
852 live_frame.temp_textures = { rgba_tex };
853 output_channel[OUTPUT_LIVE].output_frame(live_frame);
855 // Set up preview and any additional channels.
856 for (int i = 1; i < theme->get_num_channels() + 2; ++i) {
857 DisplayFrame display_frame;
858 Theme::Chain chain = theme->get_chain(i, pts(), WIDTH, HEIGHT, input_state); // FIXME: dimensions
859 display_frame.chain = chain.chain;
860 display_frame.setup_chain = chain.setup_chain;
861 display_frame.ready_fence = fence;
862 display_frame.input_frames = chain.input_frames;
863 display_frame.temp_textures = {};
864 output_channel[i].output_frame(display_frame);
868 void Mixer::send_audio_level_callback()
870 if (audio_level_callback == nullptr) {
874 unique_lock<mutex> lock(compressor_mutex);
875 double loudness_s = r128.loudness_S();
876 double loudness_i = r128.integrated();
877 double loudness_range_low = r128.range_min();
878 double loudness_range_high = r128.range_max();
880 audio_level_callback(loudness_s, 20.0 * log10(peak),
881 loudness_i, loudness_range_low, loudness_range_high,
882 gain_staging_db, 20.0 * log10(final_makeup_gain),
883 correlation.get_correlation());
886 void Mixer::audio_thread_func()
888 while (!should_quit) {
892 unique_lock<mutex> lock(audio_mutex);
893 audio_task_queue_changed.wait(lock, [this]{ return should_quit || !audio_task_queue.empty(); });
897 task = audio_task_queue.front();
898 audio_task_queue.pop();
901 process_audio_one_frame(task.pts_int, task.num_samples);
905 void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples)
907 vector<float> samples_card;
908 vector<float> samples_out;
910 // TODO: Allow mixing audio from several sources.
911 unsigned selected_audio_card = theme->map_signal(audio_source_channel);
912 assert(selected_audio_card < num_cards);
914 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
915 samples_card.resize(num_samples * 2);
917 unique_lock<mutex> lock(cards[card_index].audio_mutex);
918 cards[card_index].resampling_queue->get_output_samples(double(frame_pts_int) / TIMEBASE, &samples_card[0], num_samples);
920 if (card_index == selected_audio_card) {
921 samples_out = move(samples_card);
925 // Cut away everything under 120 Hz (or whatever the cutoff is);
926 // we don't need it for voice, and it will reduce headroom
927 // and confuse the compressor. (In particular, any hums at 50 or 60 Hz
928 // should be dampened.)
930 locut.render(samples_out.data(), samples_out.size() / 2, locut_cutoff_hz * 2.0 * M_PI / OUTPUT_FREQUENCY, 0.5f);
933 // Apply a level compressor to get the general level right.
934 // Basically, if it's over about -40 dBFS, we squeeze it down to that level
935 // (or more precisely, near it, since we don't use infinite ratio),
936 // then apply a makeup gain to get it to -14 dBFS. -14 dBFS is, of course,
937 // entirely arbitrary, but from practical tests with speech, it seems to
938 // put ut around -23 LUFS, so it's a reasonable starting point for later use.
940 unique_lock<mutex> lock(compressor_mutex);
941 if (level_compressor_enabled) {
942 float threshold = 0.01f; // -40 dBFS.
944 float attack_time = 0.5f;
945 float release_time = 20.0f;
946 float makeup_gain = pow(10.0f, (ref_level_dbfs - (-40.0f)) / 20.0f); // +26 dB.
947 level_compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
948 gain_staging_db = 20.0 * log10(level_compressor.get_attenuation() * makeup_gain);
950 // Just apply the gain we already had.
951 float g = pow(10.0f, gain_staging_db / 20.0f);
952 for (size_t i = 0; i < samples_out.size(); ++i) {
959 printf("level=%f (%+5.2f dBFS) attenuation=%f (%+5.2f dB) end_result=%+5.2f dB\n",
960 level_compressor.get_level(), 20.0 * log10(level_compressor.get_level()),
961 level_compressor.get_attenuation(), 20.0 * log10(level_compressor.get_attenuation()),
962 20.0 * log10(level_compressor.get_level() * level_compressor.get_attenuation() * makeup_gain));
965 // float limiter_att, compressor_att;
967 // The real compressor.
968 if (compressor_enabled) {
969 float threshold = pow(10.0f, compressor_threshold_dbfs / 20.0f);
971 float attack_time = 0.005f;
972 float release_time = 0.040f;
973 float makeup_gain = 2.0f; // +6 dB.
974 compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
975 // compressor_att = compressor.get_attenuation();
978 // Finally a limiter at -4 dB (so, -10 dBFS) to take out the worst peaks only.
979 // Note that since ratio is not infinite, we could go slightly higher than this.
980 if (limiter_enabled) {
981 float threshold = pow(10.0f, limiter_threshold_dbfs / 20.0f);
983 float attack_time = 0.0f; // Instant.
984 float release_time = 0.020f;
985 float makeup_gain = 1.0f; // 0 dB.
986 limiter.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
987 // limiter_att = limiter.get_attenuation();
990 // printf("limiter=%+5.1f compressor=%+5.1f\n", 20.0*log10(limiter_att), 20.0*log10(compressor_att));
992 // Upsample 4x to find interpolated peak.
993 peak_resampler.inp_data = samples_out.data();
994 peak_resampler.inp_count = samples_out.size() / 2;
996 vector<float> interpolated_samples_out;
997 interpolated_samples_out.resize(samples_out.size());
998 while (peak_resampler.inp_count > 0) { // About four iterations.
999 peak_resampler.out_data = &interpolated_samples_out[0];
1000 peak_resampler.out_count = interpolated_samples_out.size() / 2;
1001 peak_resampler.process();
1002 size_t out_stereo_samples = interpolated_samples_out.size() / 2 - peak_resampler.out_count;
1003 peak = max<float>(peak, find_peak(interpolated_samples_out.data(), out_stereo_samples * 2));
1004 peak_resampler.out_data = nullptr;
1007 // At this point, we are most likely close to +0 LU, but all of our
1008 // measurements have been on raw sample values, not R128 values.
1009 // So we have a final makeup gain to get us to +0 LU; the gain
1010 // adjustments required should be relatively small, and also, the
1011 // offset shouldn't change much (only if the type of audio changes
1012 // significantly). Thus, we shoot for updating this value basically
1013 // “whenever we process buffers”, since the R128 calculation isn't exactly
1014 // something we get out per-sample.
1016 // Note that there's a feedback loop here, so we choose a very slow filter
1017 // (half-time of 100 seconds).
1018 double target_loudness_factor, alpha;
1020 unique_lock<mutex> lock(compressor_mutex);
1021 double loudness_lu = r128.loudness_M() - ref_level_lufs;
1022 double current_makeup_lu = 20.0f * log10(final_makeup_gain);
1023 target_loudness_factor = pow(10.0f, -loudness_lu / 20.0f);
1025 // If we're outside +/- 5 LU uncorrected, we don't count it as
1026 // a normal signal (probably silence) and don't change the
1027 // correction factor; just apply what we already have.
1028 if (fabs(loudness_lu - current_makeup_lu) >= 5.0 || !final_makeup_gain_auto) {
1031 // Formula adapted from
1032 // https://en.wikipedia.org/wiki/Low-pass_filter#Simple_infinite_impulse_response_filter.
1033 const double half_time_s = 100.0;
1034 const double fc_mul_2pi_delta_t = 1.0 / (half_time_s * OUTPUT_FREQUENCY);
1035 alpha = fc_mul_2pi_delta_t / (fc_mul_2pi_delta_t + 1.0);
1038 double m = final_makeup_gain;
1039 for (size_t i = 0; i < samples_out.size(); i += 2) {
1040 samples_out[i + 0] *= m;
1041 samples_out[i + 1] *= m;
1042 m += (target_loudness_factor - m) * alpha;
1044 final_makeup_gain = m;
1047 // Find R128 levels and L/R correlation.
1048 vector<float> left, right;
1049 deinterleave_samples(samples_out, &left, &right);
1050 float *ptrs[] = { left.data(), right.data() };
1052 unique_lock<mutex> lock(compressor_mutex);
1053 r128.process(left.size(), ptrs);
1054 correlation.process_samples(samples_out);
1057 // Send the samples to the sound card.
1059 alsa->write(samples_out);
1062 // And finally add them to the output.
1063 video_encoder->add_audio(frame_pts_int, move(samples_out));
1066 void Mixer::subsample_chroma(GLuint src_tex, GLuint dst_tex)
1069 glGenVertexArrays(1, &vao);
1072 glBindVertexArray(vao);
1076 GLuint fbo = resource_pool->create_fbo(dst_tex);
1077 glBindFramebuffer(GL_FRAMEBUFFER, fbo);
1078 glViewport(0, 0, WIDTH/2, HEIGHT/2);
1081 glUseProgram(cbcr_program_num);
1084 glActiveTexture(GL_TEXTURE0);
1086 glBindTexture(GL_TEXTURE_2D, src_tex);
1088 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
1090 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
1092 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
1095 float chroma_offset_0[] = { -0.5f / WIDTH, 0.0f };
1096 set_uniform_vec2(cbcr_program_num, "foo", "chroma_offset_0", chroma_offset_0);
1098 glBindBuffer(GL_ARRAY_BUFFER, cbcr_vbo);
1101 for (GLint attr_index : { cbcr_position_attribute_index, cbcr_texcoord_attribute_index }) {
1102 glEnableVertexAttribArray(attr_index);
1104 glVertexAttribPointer(attr_index, 2, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0));
1108 glDrawArrays(GL_TRIANGLES, 0, 3);
1111 for (GLint attr_index : { cbcr_position_attribute_index, cbcr_texcoord_attribute_index }) {
1112 glDisableVertexAttribArray(attr_index);
1118 glBindFramebuffer(GL_FRAMEBUFFER, 0);
1121 resource_pool->release_fbo(fbo);
1122 glDeleteVertexArrays(1, &vao);
1125 void Mixer::release_display_frame(DisplayFrame *frame)
1127 for (GLuint texnum : frame->temp_textures) {
1128 resource_pool->release_2d_texture(texnum);
1130 frame->temp_textures.clear();
1131 frame->ready_fence.reset();
1132 frame->input_frames.clear();
1137 mixer_thread = thread(&Mixer::thread_func, this);
1138 audio_thread = thread(&Mixer::audio_thread_func, this);
1144 audio_task_queue_changed.notify_one();
1145 mixer_thread.join();
1146 audio_thread.join();
1149 void Mixer::transition_clicked(int transition_num)
1151 theme->transition_clicked(transition_num, pts());
1154 void Mixer::channel_clicked(int preview_num)
1156 theme->channel_clicked(preview_num);
1159 void Mixer::reset_meters()
1161 peak_resampler.reset();
1164 r128.integr_start();
1165 correlation.reset();
1168 void Mixer::start_mode_scanning(unsigned card_index)
1170 assert(card_index < num_cards);
1171 if (is_mode_scanning[card_index]) {
1174 is_mode_scanning[card_index] = true;
1175 mode_scanlist[card_index].clear();
1176 for (const auto &mode : cards[card_index].capture->get_available_video_modes()) {
1177 mode_scanlist[card_index].push_back(mode.first);
1179 assert(!mode_scanlist[card_index].empty());
1180 mode_scanlist_index[card_index] = 0;
1181 cards[card_index].capture->set_video_mode(mode_scanlist[card_index][0]);
1182 clock_gettime(CLOCK_MONOTONIC, &last_mode_scan_change[card_index]);
1185 Mixer::OutputChannel::~OutputChannel()
1187 if (has_current_frame) {
1188 parent->release_display_frame(¤t_frame);
1190 if (has_ready_frame) {
1191 parent->release_display_frame(&ready_frame);
1195 void Mixer::OutputChannel::output_frame(DisplayFrame frame)
1197 // Store this frame for display. Remove the ready frame if any
1198 // (it was seemingly never used).
1200 unique_lock<mutex> lock(frame_mutex);
1201 if (has_ready_frame) {
1202 parent->release_display_frame(&ready_frame);
1204 ready_frame = frame;
1205 has_ready_frame = true;
1208 if (new_frame_ready_callback) {
1209 new_frame_ready_callback();
1212 // Reduce the number of callbacks by filtering duplicates. The reason
1213 // why we bother doing this is that Qt seemingly can get into a state
1214 // where its builds up an essentially unbounded queue of signals,
1215 // consuming more and more memory, and there's no good way of collapsing
1216 // user-defined signals or limiting the length of the queue.
1217 if (transition_names_updated_callback) {
1218 vector<string> transition_names = global_mixer->get_transition_names();
1219 bool changed = false;
1220 if (transition_names.size() != last_transition_names.size()) {
1223 for (unsigned i = 0; i < transition_names.size(); ++i) {
1224 if (transition_names[i] != last_transition_names[i]) {
1231 transition_names_updated_callback(transition_names);
1232 last_transition_names = transition_names;
1235 if (name_updated_callback) {
1236 string name = global_mixer->get_channel_name(channel);
1237 if (name != last_name) {
1238 name_updated_callback(name);
1242 if (color_updated_callback) {
1243 string color = global_mixer->get_channel_color(channel);
1244 if (color != last_color) {
1245 color_updated_callback(color);
1251 bool Mixer::OutputChannel::get_display_frame(DisplayFrame *frame)
1253 unique_lock<mutex> lock(frame_mutex);
1254 if (!has_current_frame && !has_ready_frame) {
1258 if (has_current_frame && has_ready_frame) {
1259 // We have a new ready frame. Toss the current one.
1260 parent->release_display_frame(¤t_frame);
1261 has_current_frame = false;
1263 if (has_ready_frame) {
1264 assert(!has_current_frame);
1265 current_frame = ready_frame;
1266 ready_frame.ready_fence.reset(); // Drop the refcount.
1267 ready_frame.input_frames.clear(); // Drop the refcounts.
1268 has_current_frame = true;
1269 has_ready_frame = false;
1272 *frame = current_frame;
1276 void Mixer::OutputChannel::set_frame_ready_callback(Mixer::new_frame_ready_callback_t callback)
1278 new_frame_ready_callback = callback;
1281 void Mixer::OutputChannel::set_transition_names_updated_callback(Mixer::transition_names_updated_callback_t callback)
1283 transition_names_updated_callback = callback;
1286 void Mixer::OutputChannel::set_name_updated_callback(Mixer::name_updated_callback_t callback)
1288 name_updated_callback = callback;
1291 void Mixer::OutputChannel::set_color_updated_callback(Mixer::color_updated_callback_t callback)
1293 color_updated_callback = callback;
1296 mutex RefCountedGLsync::fence_lock;