7 #include <movit/effect_chain.h>
8 #include <movit/effect_util.h>
9 #include <movit/flat_input.h>
10 #include <movit/image_format.h>
11 #include <movit/init.h>
12 #include <movit/resource_pool.h>
13 #include <movit/util.h>
21 #include <condition_variable>
30 #include "bmusb/bmusb.h"
34 #include "h264encode.h"
35 #include "pbo_frame_allocator.h"
36 #include "ref_counted_gl_sync.h"
41 using namespace movit;
43 using namespace std::placeholders;
45 Mixer *global_mixer = nullptr;
49 void convert_fixed24_to_fp32(float *dst, size_t out_channels, const uint8_t *src, size_t in_channels, size_t num_samples)
51 for (size_t i = 0; i < num_samples; ++i) {
52 for (size_t j = 0; j < out_channels; ++j) {
56 uint32_t s = s1 | (s1 << 8) | (s2 << 16) | (s3 << 24);
57 dst[i * out_channels + j] = int(s) * (1.0f / 4294967296.0f);
59 src += 3 * (in_channels - out_channels);
63 void insert_new_frame(RefCountedFrame frame, unsigned field_num, bool interlaced, unsigned card_index, InputState *input_state)
66 for (unsigned frame_num = FRAME_HISTORY_LENGTH; frame_num --> 1; ) { // :-)
67 input_state->buffered_frames[card_index][frame_num] =
68 input_state->buffered_frames[card_index][frame_num - 1];
70 input_state->buffered_frames[card_index][0] = { frame, field_num };
72 for (unsigned frame_num = 0; frame_num < FRAME_HISTORY_LENGTH; ++frame_num) {
73 input_state->buffered_frames[card_index][frame_num] = { frame, field_num };
78 string generate_local_dump_filename(int frame)
80 time_t now = time(NULL);
82 localtime_r(&now, &now_tm);
85 strftime(timestamp, sizeof(timestamp), "%F-%T%z", &now_tm);
87 // Use the frame number to disambiguate between two cuts starting
88 // on the same second.
90 snprintf(filename, sizeof(filename), "%s%s-f%02d%s",
91 LOCAL_DUMP_PREFIX, timestamp, frame % 100, LOCAL_DUMP_SUFFIX);
97 Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
98 : httpd(WIDTH, HEIGHT),
100 mixer_surface(create_surface(format)),
101 h264_encoder_surface(create_surface(format)),
102 correlation(OUTPUT_FREQUENCY),
103 level_compressor(OUTPUT_FREQUENCY),
104 limiter(OUTPUT_FREQUENCY),
105 compressor(OUTPUT_FREQUENCY)
107 httpd.open_output_file(generate_local_dump_filename(/*frame=*/0).c_str());
110 CHECK(init_movit(MOVIT_SHADER_DIR, MOVIT_DEBUG_OFF));
113 // Since we allow non-bouncing 4:2:2 YCbCrInputs, effective subpixel precision
114 // will be halved when sampling them, and we need to compensate here.
115 movit_texel_subpixel_precision /= 2.0;
117 resource_pool.reset(new ResourcePool);
118 theme.reset(new Theme("theme.lua", resource_pool.get(), num_cards));
119 for (unsigned i = 0; i < NUM_OUTPUTS; ++i) {
120 output_channel[i].parent = this;
123 ImageFormat inout_format;
124 inout_format.color_space = COLORSPACE_sRGB;
125 inout_format.gamma_curve = GAMMA_sRGB;
127 // Display chain; shows the live output produced by the main chain (its RGBA version).
128 display_chain.reset(new EffectChain(WIDTH, HEIGHT, resource_pool.get()));
130 display_input = new FlatInput(inout_format, FORMAT_RGB, GL_UNSIGNED_BYTE, WIDTH, HEIGHT); // FIXME: GL_UNSIGNED_BYTE is really wrong.
131 display_chain->add_input(display_input);
132 display_chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
133 display_chain->set_dither_bits(0); // Don't bother.
134 display_chain->finalize();
136 h264_encoder.reset(new H264Encoder(h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd));
138 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
139 printf("Configuring card %d...\n", card_index);
140 CaptureCard *card = &cards[card_index];
141 card->usb = new BMUSBCapture(card_index);
142 card->usb->set_frame_callback(bind(&Mixer::bm_frame, this, card_index, _1, _2, _3, _4, _5, _6, _7));
143 card->frame_allocator.reset(new PBOFrameAllocator(8 << 20, WIDTH, HEIGHT)); // 8 MB.
144 card->usb->set_video_frame_allocator(card->frame_allocator.get());
145 card->surface = create_surface(format);
146 card->usb->set_dequeue_thread_callbacks(
148 eglBindAPI(EGL_OPENGL_API);
149 card->context = create_context(card->surface);
150 if (!make_current(card->context, card->surface)) {
151 printf("failed to create bmusb context\n");
156 resource_pool->clean_context();
158 card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
159 card->usb->configure_card();
162 BMUSBCapture::start_bm_thread();
164 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
165 cards[card_index].usb->start_bm_capture();
168 // Set up stuff for NV12 conversion.
171 string cbcr_vert_shader =
174 "in vec2 position; \n"
175 "in vec2 texcoord; \n"
177 "uniform vec2 foo_chroma_offset_0; \n"
181 " // The result of glOrtho(0.0, 1.0, 0.0, 1.0, 0.0, 1.0) is: \n"
183 " // 2.000 0.000 0.000 -1.000 \n"
184 " // 0.000 2.000 0.000 -1.000 \n"
185 " // 0.000 0.000 -2.000 -1.000 \n"
186 " // 0.000 0.000 0.000 1.000 \n"
187 " gl_Position = vec4(2.0 * position.x - 1.0, 2.0 * position.y - 1.0, -1.0, 1.0); \n"
188 " vec2 flipped_tc = texcoord; \n"
189 " tc0 = flipped_tc + foo_chroma_offset_0; \n"
191 string cbcr_frag_shader =
194 "uniform sampler2D cbcr_tex; \n"
195 "out vec4 FragColor; \n"
197 " FragColor = texture(cbcr_tex, tc0); \n"
199 vector<string> frag_shader_outputs;
200 cbcr_program_num = resource_pool->compile_glsl_program(cbcr_vert_shader, cbcr_frag_shader, frag_shader_outputs);
207 cbcr_vbo = generate_vbo(2, GL_FLOAT, sizeof(vertices), vertices);
208 cbcr_position_attribute_index = glGetAttribLocation(cbcr_program_num, "position");
209 cbcr_texcoord_attribute_index = glGetAttribLocation(cbcr_program_num, "texcoord");
211 r128.init(2, OUTPUT_FREQUENCY);
214 locut.init(FILTER_HPF, 2);
216 // hlen=16 is pretty low quality, but we use quite a bit of CPU otherwise,
217 // and there's a limit to how important the peak meter is.
218 peak_resampler.setup(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY * 4, /*num_channels=*/2, /*hlen=*/16, /*frel=*/1.0);
220 alsa.reset(new ALSAOutput(OUTPUT_FREQUENCY, /*num_channels=*/2));
225 resource_pool->release_glsl_program(cbcr_program_num);
226 glDeleteBuffers(1, &cbcr_vbo);
227 BMUSBCapture::stop_bm_thread();
229 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
231 unique_lock<mutex> lock(bmusb_mutex);
232 cards[card_index].should_quit = true; // Unblock thread.
233 cards[card_index].new_data_ready_changed.notify_all();
235 cards[card_index].usb->stop_dequeue_thread();
238 h264_encoder.reset(nullptr);
243 int unwrap_timecode(uint16_t current_wrapped, int last)
245 uint16_t last_wrapped = last & 0xffff;
246 if (current_wrapped > last_wrapped) {
247 return (last & ~0xffff) | current_wrapped;
249 return 0x10000 + ((last & ~0xffff) | current_wrapped);
253 float find_peak(const float *samples, size_t num_samples)
255 float m = fabs(samples[0]);
256 for (size_t i = 1; i < num_samples; ++i) {
257 m = max(m, fabs(samples[i]));
262 void deinterleave_samples(const vector<float> &in, vector<float> *out_l, vector<float> *out_r)
264 size_t num_samples = in.size() / 2;
265 out_l->resize(num_samples);
266 out_r->resize(num_samples);
268 const float *inptr = in.data();
269 float *lptr = &(*out_l)[0];
270 float *rptr = &(*out_r)[0];
271 for (size_t i = 0; i < num_samples; ++i) {
279 void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
280 FrameAllocator::Frame video_frame, size_t video_offset, uint16_t video_format,
281 FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format)
283 CaptureCard *card = &cards[card_index];
285 unsigned width, height, second_field_start, frame_rate_nom, frame_rate_den, extra_lines_top, extra_lines_bottom;
288 decode_video_format(video_format, &width, &height, &second_field_start, &extra_lines_top, &extra_lines_bottom,
289 &frame_rate_nom, &frame_rate_den, &interlaced); // Ignore return value for now.
290 int64_t frame_length = int64_t(TIMEBASE * frame_rate_den) / frame_rate_nom;
292 size_t num_samples = (audio_frame.len >= audio_offset) ? (audio_frame.len - audio_offset) / 8 / 3 : 0;
293 if (num_samples > OUTPUT_FREQUENCY / 10) {
294 printf("Card %d: Dropping frame with implausible audio length (len=%d, offset=%d) [timecode=0x%04x video_len=%d video_offset=%d video_format=%x)\n",
295 card_index, int(audio_frame.len), int(audio_offset),
296 timecode, int(video_frame.len), int(video_offset), video_format);
297 if (video_frame.owner) {
298 video_frame.owner->release_frame(video_frame);
300 if (audio_frame.owner) {
301 audio_frame.owner->release_frame(audio_frame);
306 int64_t local_pts = card->next_local_pts;
307 int dropped_frames = 0;
308 if (card->last_timecode != -1) {
309 dropped_frames = unwrap_timecode(timecode, card->last_timecode) - card->last_timecode - 1;
312 // Convert the audio to stereo fp32 and add it.
314 audio.resize(num_samples * 2);
315 convert_fixed24_to_fp32(&audio[0], 2, audio_frame.data + audio_offset, 8, num_samples);
319 unique_lock<mutex> lock(card->audio_mutex);
321 // Number of samples per frame if we need to insert silence.
322 // (Could be nonintegral, but resampling will save us then.)
323 int silence_samples = OUTPUT_FREQUENCY * frame_rate_den / frame_rate_nom;
325 if (dropped_frames > MAX_FPS * 2) {
326 fprintf(stderr, "Card %d lost more than two seconds (or time code jumping around; from 0x%04x to 0x%04x), resetting resampler\n",
327 card_index, card->last_timecode, timecode);
328 card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
330 } else if (dropped_frames > 0) {
331 // Insert silence as needed.
332 fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
333 card_index, dropped_frames, timecode);
334 vector<float> silence(silence_samples * 2, 0.0f);
335 for (int i = 0; i < dropped_frames; ++i) {
336 card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), silence.data(), silence_samples);
337 // Note that if the format changed in the meantime, we have
338 // no way of detecting that; we just have to assume the frame length
339 // is always the same.
340 local_pts += frame_length;
343 if (num_samples == 0) {
344 audio.resize(silence_samples * 2);
345 num_samples = silence_samples;
347 card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), audio.data(), num_samples);
348 card->next_local_pts = local_pts + frame_length;
351 card->last_timecode = timecode;
353 // Done with the audio, so release it.
354 if (audio_frame.owner) {
355 audio_frame.owner->release_frame(audio_frame);
359 // Wait until the previous frame was consumed.
360 unique_lock<mutex> lock(bmusb_mutex);
361 card->new_data_ready_changed.wait(lock, [card]{ return !card->new_data_ready || card->should_quit; });
362 if (card->should_quit) return;
365 size_t expected_length = width * (height + extra_lines_top + extra_lines_bottom) * 2;
366 if (video_frame.len - video_offset == 0 ||
367 video_frame.len - video_offset != expected_length) {
368 if (video_frame.len != 0) {
369 printf("Card %d: Dropping video frame with wrong length (%ld; expected %ld)\n",
370 card_index, video_frame.len - video_offset, expected_length);
372 if (video_frame.owner) {
373 video_frame.owner->release_frame(video_frame);
376 // Still send on the information that we _had_ a frame, even though it's corrupted,
377 // so that pts can go up accordingly.
379 unique_lock<mutex> lock(bmusb_mutex);
380 card->new_data_ready = true;
381 card->new_frame = RefCountedFrame(FrameAllocator::Frame());
382 card->new_frame_length = frame_length;
383 card->new_frame_interlaced = false;
384 card->new_data_ready_fence = nullptr;
385 card->dropped_frames = dropped_frames;
386 card->new_data_ready_changed.notify_all();
391 PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)video_frame.userdata;
393 unsigned num_fields = interlaced ? 2 : 1;
394 timespec frame_upload_start;
396 // Send the two fields along as separate frames; the other side will need to add
397 // a deinterlacer to actually get this right.
398 assert(height % 2 == 0);
400 assert(frame_length % 2 == 0);
403 clock_gettime(CLOCK_MONOTONIC, &frame_upload_start);
405 userdata->last_interlaced = interlaced;
406 userdata->last_frame_rate_nom = frame_rate_nom;
407 userdata->last_frame_rate_den = frame_rate_den;
408 RefCountedFrame new_frame(video_frame);
410 // Upload the textures.
411 size_t cbcr_width = width / 2;
412 size_t cbcr_offset = video_offset / 2;
413 size_t y_offset = video_frame.size / 2 + video_offset / 2;
415 for (unsigned field = 0; field < num_fields; ++field) {
416 unsigned field_start_line = (field == 1) ? second_field_start : extra_lines_top + field * (height + 22);
418 if (userdata->tex_y[field] == 0 ||
419 userdata->tex_cbcr[field] == 0 ||
420 width != userdata->last_width[field] ||
421 height != userdata->last_height[field]) {
422 // We changed resolution since last use of this texture, so we need to create
423 // a new object. Note that this each card has its own PBOFrameAllocator,
424 // we don't need to worry about these flip-flopping between resolutions.
425 glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]);
427 glTexImage2D(GL_TEXTURE_2D, 0, GL_RG8, cbcr_width, height, 0, GL_RG, GL_UNSIGNED_BYTE, nullptr);
429 glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]);
431 glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
433 userdata->last_width[field] = width;
434 userdata->last_height[field] = height;
437 GLuint pbo = userdata->pbo;
439 glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
441 glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
444 glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]);
446 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, cbcr_width, height, GL_RG, GL_UNSIGNED_BYTE, BUFFER_OFFSET(cbcr_offset + cbcr_width * field_start_line * sizeof(uint16_t)));
448 glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]);
450 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(y_offset + width * field_start_line));
452 glBindTexture(GL_TEXTURE_2D, 0);
454 glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
456 GLsync fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
458 assert(fence != nullptr);
461 // Don't upload the second field as fast as we can; wait until
462 // the field time has approximately passed. (Otherwise, we could
463 // get timing jitter against the other sources, and possibly also
464 // against the video display, although the latter is not as critical.)
465 // This requires our system clock to be reasonably close to the
466 // video clock, but that's not an unreasonable assumption.
467 timespec second_field_start;
468 second_field_start.tv_nsec = frame_upload_start.tv_nsec +
469 frame_length * 1000000000 / TIMEBASE;
470 second_field_start.tv_sec = frame_upload_start.tv_sec +
471 second_field_start.tv_nsec / 1000000000;
472 second_field_start.tv_nsec %= 1000000000;
474 while (clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME,
475 &second_field_start, nullptr) == -1 &&
480 unique_lock<mutex> lock(bmusb_mutex);
481 card->new_data_ready = true;
482 card->new_frame = new_frame;
483 card->new_frame_length = frame_length;
484 card->new_frame_field = field;
485 card->new_frame_interlaced = interlaced;
486 card->new_data_ready_fence = fence;
487 card->dropped_frames = dropped_frames;
488 card->new_data_ready_changed.notify_all();
490 if (field != num_fields - 1) {
491 // Wait until the previous frame was consumed.
492 card->new_data_ready_changed.wait(lock, [card]{ return !card->new_data_ready || card->should_quit; });
493 if (card->should_quit) return;
499 void Mixer::thread_func()
501 eglBindAPI(EGL_OPENGL_API);
502 QOpenGLContext *context = create_context(mixer_surface);
503 if (!make_current(context, mixer_surface)) {
508 struct timespec start, now;
509 clock_gettime(CLOCK_MONOTONIC, &start);
512 int stats_dropped_frames = 0;
514 while (!should_quit) {
515 CaptureCard card_copy[MAX_CARDS];
516 int num_samples[MAX_CARDS];
519 unique_lock<mutex> lock(bmusb_mutex);
521 // The first card is the master timer, so wait for it to have a new frame.
522 // TODO: Make configurable, and with a timeout.
523 cards[0].new_data_ready_changed.wait(lock, [this]{ return cards[0].new_data_ready; });
525 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
526 CaptureCard *card = &cards[card_index];
527 card_copy[card_index].usb = card->usb;
528 card_copy[card_index].new_data_ready = card->new_data_ready;
529 card_copy[card_index].new_frame = card->new_frame;
530 card_copy[card_index].new_frame_length = card->new_frame_length;
531 card_copy[card_index].new_frame_field = card->new_frame_field;
532 card_copy[card_index].new_frame_interlaced = card->new_frame_interlaced;
533 card_copy[card_index].new_data_ready_fence = card->new_data_ready_fence;
534 card_copy[card_index].dropped_frames = card->dropped_frames;
535 card->new_data_ready = false;
536 card->new_data_ready_changed.notify_all();
538 int num_samples_times_timebase = OUTPUT_FREQUENCY * card->new_frame_length + card->fractional_samples;
539 num_samples[card_index] = num_samples_times_timebase / TIMEBASE;
540 card->fractional_samples = num_samples_times_timebase % TIMEBASE;
541 assert(num_samples[card_index] >= 0);
545 // Resample the audio as needed, including from previously dropped frames.
546 assert(num_cards > 0);
547 for (unsigned frame_num = 0; frame_num < card_copy[0].dropped_frames + 1; ++frame_num) {
549 // Signal to the audio thread to process this frame.
550 unique_lock<mutex> lock(audio_mutex);
551 audio_task_queue.push(AudioTask{pts_int, num_samples[0]});
552 audio_task_queue_changed.notify_one();
554 if (frame_num != card_copy[0].dropped_frames) {
555 // For dropped frames, increase the pts. Note that if the format changed
556 // in the meantime, we have no way of detecting that; we just have to
557 // assume the frame length is always the same.
558 ++stats_dropped_frames;
559 pts_int += card_copy[0].new_frame_length;
563 if (audio_level_callback != nullptr) {
564 unique_lock<mutex> lock(compressor_mutex);
565 double loudness_s = r128.loudness_S();
566 double loudness_i = r128.integrated();
567 double loudness_range_low = r128.range_min();
568 double loudness_range_high = r128.range_max();
570 audio_level_callback(loudness_s, 20.0 * log10(peak),
571 loudness_i, loudness_range_low, loudness_range_high,
572 gain_staging_db, 20.0 * log10(final_makeup_gain),
573 correlation.get_correlation());
576 for (unsigned card_index = 1; card_index < num_cards; ++card_index) {
577 if (card_copy[card_index].new_data_ready && card_copy[card_index].new_frame->len == 0) {
578 ++card_copy[card_index].dropped_frames;
580 if (card_copy[card_index].dropped_frames > 0) {
581 printf("Card %u dropped %d frames before this\n",
582 card_index, int(card_copy[card_index].dropped_frames));
586 // If the first card is reporting a corrupted or otherwise dropped frame,
587 // just increase the pts (skipping over this frame) and don't try to compute anything new.
588 if (card_copy[0].new_frame->len == 0) {
589 ++stats_dropped_frames;
590 pts_int += card_copy[0].new_frame_length;
594 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
595 CaptureCard *card = &card_copy[card_index];
596 if (!card->new_data_ready || card->new_frame->len == 0)
599 assert(card->new_frame != nullptr);
600 insert_new_frame(card->new_frame, card->new_frame_field, card->new_frame_interlaced, card_index, &input_state);
603 // The new texture might still be uploaded,
604 // tell the GPU to wait until it's there.
605 if (card->new_data_ready_fence) {
606 glWaitSync(card->new_data_ready_fence, /*flags=*/0, GL_TIMEOUT_IGNORED);
608 glDeleteSync(card->new_data_ready_fence);
613 // Get the main chain from the theme, and set its state immediately.
614 Theme::Chain theme_main_chain = theme->get_chain(0, pts(), WIDTH, HEIGHT, input_state);
615 EffectChain *chain = theme_main_chain.chain;
616 theme_main_chain.setup_chain();
617 //theme_main_chain.chain->enable_phase_timing(true);
619 GLuint y_tex, cbcr_tex;
620 bool got_frame = h264_encoder->begin_frame(&y_tex, &cbcr_tex);
623 // Render main chain.
624 GLuint cbcr_full_tex = resource_pool->create_2d_texture(GL_RG8, WIDTH, HEIGHT);
625 GLuint rgba_tex = resource_pool->create_2d_texture(GL_RGB565, WIDTH, HEIGHT); // Saves texture bandwidth, although dithering gets messed up.
626 GLuint fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex, rgba_tex);
628 chain->render_to_fbo(fbo, WIDTH, HEIGHT);
629 resource_pool->release_fbo(fbo);
631 subsample_chroma(cbcr_full_tex, cbcr_tex);
632 resource_pool->release_2d_texture(cbcr_full_tex);
634 // Set the right state for rgba_tex.
635 glBindFramebuffer(GL_FRAMEBUFFER, 0);
636 glBindTexture(GL_TEXTURE_2D, rgba_tex);
637 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
638 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
639 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
641 RefCountedGLsync fence(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
644 const int64_t av_delay = TIMEBASE / 10; // Corresponds to the fixed delay in resampling_queue.h. TODO: Make less hard-coded.
645 h264_encoder->end_frame(fence, pts_int + av_delay, theme_main_chain.input_frames);
647 pts_int += card_copy[0].new_frame_length;
649 // The live frame just shows the RGBA texture we just rendered.
650 // It owns rgba_tex now.
651 DisplayFrame live_frame;
652 live_frame.chain = display_chain.get();
653 live_frame.setup_chain = [this, rgba_tex]{
654 display_input->set_texture_num(rgba_tex);
656 live_frame.ready_fence = fence;
657 live_frame.input_frames = {};
658 live_frame.temp_textures = { rgba_tex };
659 output_channel[OUTPUT_LIVE].output_frame(live_frame);
661 // Set up preview and any additional channels.
662 for (int i = 1; i < theme->get_num_channels() + 2; ++i) {
663 DisplayFrame display_frame;
664 Theme::Chain chain = theme->get_chain(i, pts(), WIDTH, HEIGHT, input_state); // FIXME: dimensions
665 display_frame.chain = chain.chain;
666 display_frame.setup_chain = chain.setup_chain;
667 display_frame.ready_fence = fence;
668 display_frame.input_frames = chain.input_frames;
669 display_frame.temp_textures = {};
670 output_channel[i].output_frame(display_frame);
673 clock_gettime(CLOCK_MONOTONIC, &now);
674 double elapsed = now.tv_sec - start.tv_sec +
675 1e-9 * (now.tv_nsec - start.tv_nsec);
676 if (frame % 100 == 0) {
677 printf("%d frames (%d dropped) in %.3f seconds = %.1f fps (%.1f ms/frame)\n",
678 frame, stats_dropped_frames, elapsed, frame / elapsed,
679 1e3 * elapsed / frame);
680 // chain->print_phase_timing();
683 if (should_cut.exchange(false)) { // Test and clear.
684 string filename = generate_local_dump_filename(frame);
685 printf("Starting new recording: %s\n", filename.c_str());
686 h264_encoder->shutdown();
687 httpd.close_output_file();
688 httpd.open_output_file(filename.c_str());
689 h264_encoder.reset(new H264Encoder(h264_encoder_surface, global_flags.va_display, WIDTH, HEIGHT, &httpd));
693 // Reset every 100 frames, so that local variations in frame times
694 // (especially for the first few frames, when the shaders are
695 // compiled etc.) don't make it hard to measure for the entire
696 // remaining duration of the program.
697 if (frame == 10000) {
705 resource_pool->clean_context();
708 void Mixer::audio_thread_func()
710 while (!should_quit) {
714 unique_lock<mutex> lock(audio_mutex);
715 audio_task_queue_changed.wait(lock, [this]{ return !audio_task_queue.empty(); });
716 task = audio_task_queue.front();
717 audio_task_queue.pop();
720 process_audio_one_frame(task.pts_int, task.num_samples);
724 void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples)
726 vector<float> samples_card;
727 vector<float> samples_out;
729 // TODO: Allow mixing audio from several sources.
730 unsigned selected_audio_card = theme->map_signal(audio_source_channel);
731 assert(selected_audio_card < num_cards);
733 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
734 samples_card.resize(num_samples * 2);
736 unique_lock<mutex> lock(cards[card_index].audio_mutex);
737 if (!cards[card_index].resampling_queue->get_output_samples(double(frame_pts_int) / TIMEBASE, &samples_card[0], num_samples)) {
738 printf("Card %d reported previous underrun.\n", card_index);
741 if (card_index == selected_audio_card) {
742 samples_out = move(samples_card);
746 // Cut away everything under 120 Hz (or whatever the cutoff is);
747 // we don't need it for voice, and it will reduce headroom
748 // and confuse the compressor. (In particular, any hums at 50 or 60 Hz
749 // should be dampened.)
751 locut.render(samples_out.data(), samples_out.size() / 2, locut_cutoff_hz * 2.0 * M_PI / OUTPUT_FREQUENCY, 0.5f);
754 // Apply a level compressor to get the general level right.
755 // Basically, if it's over about -40 dBFS, we squeeze it down to that level
756 // (or more precisely, near it, since we don't use infinite ratio),
757 // then apply a makeup gain to get it to -14 dBFS. -14 dBFS is, of course,
758 // entirely arbitrary, but from practical tests with speech, it seems to
759 // put ut around -23 LUFS, so it's a reasonable starting point for later use.
761 unique_lock<mutex> lock(compressor_mutex);
762 if (level_compressor_enabled) {
763 float threshold = 0.01f; // -40 dBFS.
765 float attack_time = 0.5f;
766 float release_time = 20.0f;
767 float makeup_gain = pow(10.0f, (ref_level_dbfs - (-40.0f)) / 20.0f); // +26 dB.
768 level_compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
769 gain_staging_db = 20.0 * log10(level_compressor.get_attenuation() * makeup_gain);
771 // Just apply the gain we already had.
772 float g = pow(10.0f, gain_staging_db / 20.0f);
773 for (size_t i = 0; i < samples_out.size(); ++i) {
780 printf("level=%f (%+5.2f dBFS) attenuation=%f (%+5.2f dB) end_result=%+5.2f dB\n",
781 level_compressor.get_level(), 20.0 * log10(level_compressor.get_level()),
782 level_compressor.get_attenuation(), 20.0 * log10(level_compressor.get_attenuation()),
783 20.0 * log10(level_compressor.get_level() * level_compressor.get_attenuation() * makeup_gain));
786 // float limiter_att, compressor_att;
788 // The real compressor.
789 if (compressor_enabled) {
790 float threshold = pow(10.0f, compressor_threshold_dbfs / 20.0f);
792 float attack_time = 0.005f;
793 float release_time = 0.040f;
794 float makeup_gain = 2.0f; // +6 dB.
795 compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
796 // compressor_att = compressor.get_attenuation();
799 // Finally a limiter at -4 dB (so, -10 dBFS) to take out the worst peaks only.
800 // Note that since ratio is not infinite, we could go slightly higher than this.
801 if (limiter_enabled) {
802 float threshold = pow(10.0f, limiter_threshold_dbfs / 20.0f);
804 float attack_time = 0.0f; // Instant.
805 float release_time = 0.020f;
806 float makeup_gain = 1.0f; // 0 dB.
807 limiter.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
808 // limiter_att = limiter.get_attenuation();
811 // printf("limiter=%+5.1f compressor=%+5.1f\n", 20.0*log10(limiter_att), 20.0*log10(compressor_att));
813 // Upsample 4x to find interpolated peak.
814 peak_resampler.inp_data = samples_out.data();
815 peak_resampler.inp_count = samples_out.size() / 2;
817 vector<float> interpolated_samples_out;
818 interpolated_samples_out.resize(samples_out.size());
819 while (peak_resampler.inp_count > 0) { // About four iterations.
820 peak_resampler.out_data = &interpolated_samples_out[0];
821 peak_resampler.out_count = interpolated_samples_out.size() / 2;
822 peak_resampler.process();
823 size_t out_stereo_samples = interpolated_samples_out.size() / 2 - peak_resampler.out_count;
824 peak = max<float>(peak, find_peak(interpolated_samples_out.data(), out_stereo_samples * 2));
825 peak_resampler.out_data = nullptr;
828 // At this point, we are most likely close to +0 LU, but all of our
829 // measurements have been on raw sample values, not R128 values.
830 // So we have a final makeup gain to get us to +0 LU; the gain
831 // adjustments required should be relatively small, and also, the
832 // offset shouldn't change much (only if the type of audio changes
833 // significantly). Thus, we shoot for updating this value basically
834 // “whenever we process buffers”, since the R128 calculation isn't exactly
835 // something we get out per-sample.
837 // Note that there's a feedback loop here, so we choose a very slow filter
838 // (half-time of 100 seconds).
839 double target_loudness_factor, alpha;
841 unique_lock<mutex> lock(compressor_mutex);
842 double loudness_lu = r128.loudness_M() - ref_level_lufs;
843 double current_makeup_lu = 20.0f * log10(final_makeup_gain);
844 target_loudness_factor = pow(10.0f, -loudness_lu / 20.0f);
846 // If we're outside +/- 5 LU uncorrected, we don't count it as
847 // a normal signal (probably silence) and don't change the
848 // correction factor; just apply what we already have.
849 if (fabs(loudness_lu - current_makeup_lu) >= 5.0 || !final_makeup_gain_auto) {
852 // Formula adapted from
853 // https://en.wikipedia.org/wiki/Low-pass_filter#Simple_infinite_impulse_response_filter.
854 const double half_time_s = 100.0;
855 const double fc_mul_2pi_delta_t = 1.0 / (half_time_s * OUTPUT_FREQUENCY);
856 alpha = fc_mul_2pi_delta_t / (fc_mul_2pi_delta_t + 1.0);
859 double m = final_makeup_gain;
860 for (size_t i = 0; i < samples_out.size(); i += 2) {
861 samples_out[i + 0] *= m;
862 samples_out[i + 1] *= m;
863 m += (target_loudness_factor - m) * alpha;
865 final_makeup_gain = m;
868 // Find R128 levels and L/R correlation.
869 vector<float> left, right;
870 deinterleave_samples(samples_out, &left, &right);
871 float *ptrs[] = { left.data(), right.data() };
873 unique_lock<mutex> lock(compressor_mutex);
874 r128.process(left.size(), ptrs);
875 correlation.process_samples(samples_out);
878 // Send the samples to the sound card.
880 alsa->write(samples_out);
883 // And finally add them to the output.
884 h264_encoder->add_audio(frame_pts_int, move(samples_out));
887 void Mixer::subsample_chroma(GLuint src_tex, GLuint dst_tex)
890 glGenVertexArrays(1, &vao);
893 glBindVertexArray(vao);
897 GLuint fbo = resource_pool->create_fbo(dst_tex);
898 glBindFramebuffer(GL_FRAMEBUFFER, fbo);
899 glViewport(0, 0, WIDTH/2, HEIGHT/2);
902 glUseProgram(cbcr_program_num);
905 glActiveTexture(GL_TEXTURE0);
907 glBindTexture(GL_TEXTURE_2D, src_tex);
909 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
911 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
913 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
916 float chroma_offset_0[] = { -0.5f / WIDTH, 0.0f };
917 set_uniform_vec2(cbcr_program_num, "foo", "chroma_offset_0", chroma_offset_0);
919 glBindBuffer(GL_ARRAY_BUFFER, cbcr_vbo);
922 for (GLint attr_index : { cbcr_position_attribute_index, cbcr_texcoord_attribute_index }) {
923 glEnableVertexAttribArray(attr_index);
925 glVertexAttribPointer(attr_index, 2, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0));
929 glDrawArrays(GL_TRIANGLES, 0, 3);
932 for (GLint attr_index : { cbcr_position_attribute_index, cbcr_texcoord_attribute_index }) {
933 glDisableVertexAttribArray(attr_index);
939 glBindFramebuffer(GL_FRAMEBUFFER, 0);
942 resource_pool->release_fbo(fbo);
943 glDeleteVertexArrays(1, &vao);
946 void Mixer::release_display_frame(DisplayFrame *frame)
948 for (GLuint texnum : frame->temp_textures) {
949 resource_pool->release_2d_texture(texnum);
951 frame->temp_textures.clear();
952 frame->ready_fence.reset();
953 frame->input_frames.clear();
958 mixer_thread = thread(&Mixer::thread_func, this);
959 audio_thread = thread(&Mixer::audio_thread_func, this);
969 void Mixer::transition_clicked(int transition_num)
971 theme->transition_clicked(transition_num, pts());
974 void Mixer::channel_clicked(int preview_num)
976 theme->channel_clicked(preview_num);
979 void Mixer::reset_meters()
981 peak_resampler.reset();
988 Mixer::OutputChannel::~OutputChannel()
990 if (has_current_frame) {
991 parent->release_display_frame(¤t_frame);
993 if (has_ready_frame) {
994 parent->release_display_frame(&ready_frame);
998 void Mixer::OutputChannel::output_frame(DisplayFrame frame)
1000 // Store this frame for display. Remove the ready frame if any
1001 // (it was seemingly never used).
1003 unique_lock<mutex> lock(frame_mutex);
1004 if (has_ready_frame) {
1005 parent->release_display_frame(&ready_frame);
1007 ready_frame = frame;
1008 has_ready_frame = true;
1011 if (has_new_frame_ready_callback) {
1012 new_frame_ready_callback();
1016 bool Mixer::OutputChannel::get_display_frame(DisplayFrame *frame)
1018 unique_lock<mutex> lock(frame_mutex);
1019 if (!has_current_frame && !has_ready_frame) {
1023 if (has_current_frame && has_ready_frame) {
1024 // We have a new ready frame. Toss the current one.
1025 parent->release_display_frame(¤t_frame);
1026 has_current_frame = false;
1028 if (has_ready_frame) {
1029 assert(!has_current_frame);
1030 current_frame = ready_frame;
1031 ready_frame.ready_fence.reset(); // Drop the refcount.
1032 ready_frame.input_frames.clear(); // Drop the refcounts.
1033 has_current_frame = true;
1034 has_ready_frame = false;
1037 *frame = current_frame;
1041 void Mixer::OutputChannel::set_frame_ready_callback(Mixer::new_frame_ready_callback_t callback)
1043 new_frame_ready_callback = callback;
1044 has_new_frame_ready_callback = true;