10 #include <movit/effect_chain.h>
11 #include <movit/effect_util.h>
12 #include <movit/flat_input.h>
13 #include <movit/image_format.h>
14 #include <movit/resource_pool.h>
23 #include <condition_variable>
32 #include "bmusb/bmusb.h"
35 #include "h264encode.h"
36 #include "pbo_frame_allocator.h"
37 #include "ref_counted_gl_sync.h"
42 using namespace movit;
44 using namespace std::placeholders;
46 Mixer *global_mixer = nullptr;
50 void convert_fixed24_to_fp32(float *dst, size_t out_channels, const uint8_t *src, size_t in_channels, size_t num_samples)
52 for (size_t i = 0; i < num_samples; ++i) {
53 for (size_t j = 0; j < out_channels; ++j) {
57 uint32_t s = s1 | (s1 << 8) | (s2 << 16) | (s3 << 24);
58 dst[i * out_channels + j] = int(s) * (1.0f / 4294967296.0f);
60 src += 3 * (in_channels - out_channels);
66 Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
67 : httpd(LOCAL_DUMP_FILE_NAME, WIDTH, HEIGHT),
69 mixer_surface(create_surface(format)),
70 h264_encoder_surface(create_surface(format)),
71 level_compressor(OUTPUT_FREQUENCY),
72 limiter(OUTPUT_FREQUENCY),
73 compressor(OUTPUT_FREQUENCY)
77 CHECK(init_movit(MOVIT_SHADER_DIR, MOVIT_DEBUG_OFF));
80 // Since we allow non-bouncing 4:2:2 YCbCrInputs, effective subpixel precision
81 // will be halved when sampling them, and we need to compensate here.
82 movit_texel_subpixel_precision /= 2.0;
84 resource_pool.reset(new ResourcePool);
85 theme.reset(new Theme("theme.lua", resource_pool.get(), num_cards));
86 for (unsigned i = 0; i < NUM_OUTPUTS; ++i) {
87 output_channel[i].parent = this;
90 ImageFormat inout_format;
91 inout_format.color_space = COLORSPACE_sRGB;
92 inout_format.gamma_curve = GAMMA_sRGB;
94 // Display chain; shows the live output produced by the main chain (its RGBA version).
95 display_chain.reset(new EffectChain(WIDTH, HEIGHT, resource_pool.get()));
97 display_input = new FlatInput(inout_format, FORMAT_RGB, GL_UNSIGNED_BYTE, WIDTH, HEIGHT); // FIXME: GL_UNSIGNED_BYTE is really wrong.
98 display_chain->add_input(display_input);
99 display_chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
100 display_chain->set_dither_bits(0); // Don't bother.
101 display_chain->finalize();
103 h264_encoder.reset(new H264Encoder(h264_encoder_surface, WIDTH, HEIGHT, &httpd));
105 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
106 printf("Configuring card %d...\n", card_index);
107 CaptureCard *card = &cards[card_index];
108 card->usb = new BMUSBCapture(card_index);
109 card->usb->set_frame_callback(bind(&Mixer::bm_frame, this, card_index, _1, _2, _3, _4, _5, _6, _7));
110 card->frame_allocator.reset(new PBOFrameAllocator(WIDTH * (HEIGHT+EXTRAHEIGHT) * 2 + 44 + 1, WIDTH, HEIGHT));
111 card->usb->set_video_frame_allocator(card->frame_allocator.get());
112 card->surface = create_surface(format);
113 card->usb->set_dequeue_thread_callbacks(
115 eglBindAPI(EGL_OPENGL_API);
116 card->context = create_context(card->surface);
117 if (!make_current(card->context, card->surface)) {
118 printf("failed to create bmusb context\n");
123 resource_pool->clean_context();
125 card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
126 card->usb->configure_card();
129 BMUSBCapture::start_bm_thread();
131 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
132 cards[card_index].usb->start_bm_capture();
135 //chain->enable_phase_timing(true);
137 // Set up stuff for NV12 conversion.
140 string cbcr_vert_shader = read_file("vs-cbcr.130.vert");
141 string cbcr_frag_shader =
144 "uniform sampler2D cbcr_tex; \n"
146 " gl_FragColor = texture2D(cbcr_tex, tc0); \n"
148 cbcr_program_num = resource_pool->compile_glsl_program(cbcr_vert_shader, cbcr_frag_shader);
150 r128.init(2, OUTPUT_FREQUENCY);
153 locut.init(FILTER_HPF, 2);
155 // hlen=16 is pretty low quality, but we use quite a bit of CPU otherwise,
156 // and there's a limit to how important the peak meter is.
157 peak_resampler.setup(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY * 4, /*num_channels=*/2, /*hlen=*/16);
159 alsa.reset(new ALSAOutput(OUTPUT_FREQUENCY, /*num_channels=*/2));
164 resource_pool->release_glsl_program(cbcr_program_num);
165 BMUSBCapture::stop_bm_thread();
167 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
169 unique_lock<mutex> lock(bmusb_mutex);
170 cards[card_index].should_quit = true; // Unblock thread.
171 cards[card_index].new_data_ready_changed.notify_all();
173 cards[card_index].usb->stop_dequeue_thread();
176 h264_encoder.reset(nullptr);
181 int unwrap_timecode(uint16_t current_wrapped, int last)
183 uint16_t last_wrapped = last & 0xffff;
184 if (current_wrapped > last_wrapped) {
185 return (last & ~0xffff) | current_wrapped;
187 return 0x10000 + ((last & ~0xffff) | current_wrapped);
191 float find_peak(const float *samples, size_t num_samples)
193 float m = fabs(samples[0]);
194 for (size_t i = 1; i < num_samples; ++i) {
195 m = std::max(m, fabs(samples[i]));
200 void deinterleave_samples(const vector<float> &in, vector<float> *out_l, vector<float> *out_r)
202 size_t num_samples = in.size() / 2;
203 out_l->resize(num_samples);
204 out_r->resize(num_samples);
206 const float *inptr = in.data();
207 float *lptr = &(*out_l)[0];
208 float *rptr = &(*out_r)[0];
209 for (size_t i = 0; i < num_samples; ++i) {
217 void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
218 FrameAllocator::Frame video_frame, size_t video_offset, uint16_t video_format,
219 FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format)
221 CaptureCard *card = &cards[card_index];
223 int width, height, frame_rate_nom, frame_rate_den, extra_lines_top, extra_lines_bottom;
226 decode_video_format(video_format, &width, &height, &extra_lines_top, &extra_lines_bottom,
227 &frame_rate_nom, &frame_rate_den, &interlaced); // Ignore return value for now.
228 int64_t frame_length = TIMEBASE * frame_rate_den / frame_rate_nom;
230 size_t num_samples = (audio_frame.len >= audio_offset) ? (audio_frame.len - audio_offset) / 8 / 3 : 0;
231 if (num_samples > OUTPUT_FREQUENCY / 10) {
232 printf("Card %d: Dropping frame with implausible audio length (len=%d, offset=%d) [timecode=0x%04x video_len=%d video_offset=%d video_format=%x)\n",
233 card_index, int(audio_frame.len), int(audio_offset),
234 timecode, int(video_frame.len), int(video_offset), video_format);
235 if (video_frame.owner) {
236 video_frame.owner->release_frame(video_frame);
238 if (audio_frame.owner) {
239 audio_frame.owner->release_frame(audio_frame);
244 int64_t local_pts = card->next_local_pts;
245 int dropped_frames = 0;
246 if (card->last_timecode != -1) {
247 dropped_frames = unwrap_timecode(timecode, card->last_timecode) - card->last_timecode - 1;
250 // Convert the audio to stereo fp32 and add it.
252 audio.resize(num_samples * 2);
253 convert_fixed24_to_fp32(&audio[0], 2, audio_frame.data + audio_offset, 8, num_samples);
257 unique_lock<mutex> lock(card->audio_mutex);
259 // Number of samples per frame if we need to insert silence.
260 // (Could be nonintegral, but resampling will save us then.)
261 int silence_samples = OUTPUT_FREQUENCY * frame_rate_den / frame_rate_nom;
263 if (dropped_frames > MAX_FPS * 2) {
264 fprintf(stderr, "Card %d lost more than two seconds (or time code jumping around; from 0x%04x to 0x%04x), resetting resampler\n",
265 card_index, card->last_timecode, timecode);
266 card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
268 } else if (dropped_frames > 0) {
269 // Insert silence as needed.
270 fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
271 card_index, dropped_frames, timecode);
272 vector<float> silence;
273 silence.resize(silence_samples * 2);
274 for (int i = 0; i < dropped_frames; ++i) {
275 card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), silence.data(), silence_samples);
276 // Note that if the format changed in the meantime, we have
277 // no way of detecting that; we just have to assume the frame length
278 // is always the same.
279 local_pts += frame_length;
282 if (num_samples == 0) {
283 audio.resize(silence_samples * 2);
284 num_samples = silence_samples;
286 card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), audio.data(), num_samples);
287 card->next_local_pts = local_pts + frame_length;
290 card->last_timecode = timecode;
292 // Done with the audio, so release it.
293 if (audio_frame.owner) {
294 audio_frame.owner->release_frame(audio_frame);
298 // Wait until the previous frame was consumed.
299 unique_lock<mutex> lock(bmusb_mutex);
300 card->new_data_ready_changed.wait(lock, [card]{ return !card->new_data_ready || card->should_quit; });
301 if (card->should_quit) return;
304 if (video_frame.len - video_offset != WIDTH * (HEIGHT+EXTRAHEIGHT) * 2) {
305 if (video_frame.len != 0) {
306 printf("Card %d: Dropping video frame with wrong length (%ld)\n",
307 card_index, video_frame.len - video_offset);
309 if (video_frame.owner) {
310 video_frame.owner->release_frame(video_frame);
313 // Still send on the information that we _had_ a frame, even though it's corrupted,
314 // so that pts can go up accordingly.
316 unique_lock<mutex> lock(bmusb_mutex);
317 card->new_data_ready = true;
318 card->new_frame = RefCountedFrame(FrameAllocator::Frame());
319 card->new_frame_length = frame_length;
320 card->new_data_ready_fence = nullptr;
321 card->dropped_frames = dropped_frames;
322 card->new_data_ready_changed.notify_all();
327 const PBOFrameAllocator::Userdata *userdata = (const PBOFrameAllocator::Userdata *)video_frame.userdata;
328 GLuint pbo = userdata->pbo;
330 glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
332 glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, 0, video_frame.size);
334 //glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
337 // Upload the textures.
338 size_t skipped_lines = 25;
339 size_t cbcr_width = WIDTH / 2;
340 size_t cbcr_offset = video_offset / 2;
341 size_t y_offset = cbcr_offset + cbcr_width * (HEIGHT + EXTRAHEIGHT) * sizeof(uint16_t) + video_offset / 2;
343 glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr);
345 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, cbcr_width, HEIGHT, GL_RG, GL_UNSIGNED_BYTE, BUFFER_OFFSET(cbcr_offset + cbcr_width * skipped_lines * sizeof(uint16_t)));
347 glBindTexture(GL_TEXTURE_2D, userdata->tex_y);
349 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, WIDTH, HEIGHT, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(y_offset + WIDTH * skipped_lines));
351 glBindTexture(GL_TEXTURE_2D, 0);
353 GLsync fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
355 assert(fence != nullptr);
358 unique_lock<mutex> lock(bmusb_mutex);
359 card->new_data_ready = true;
360 card->new_frame = RefCountedFrame(video_frame);
361 card->new_frame_length = frame_length;
362 card->new_data_ready_fence = fence;
363 card->dropped_frames = dropped_frames;
364 card->new_data_ready_changed.notify_all();
368 void Mixer::thread_func()
370 eglBindAPI(EGL_OPENGL_API);
371 QOpenGLContext *context = create_context(mixer_surface);
372 if (!make_current(context, mixer_surface)) {
377 struct timespec start, now;
378 clock_gettime(CLOCK_MONOTONIC, &start);
381 int stats_dropped_frames = 0;
383 while (!should_quit) {
384 CaptureCard card_copy[MAX_CARDS];
385 int num_samples[MAX_CARDS];
388 unique_lock<mutex> lock(bmusb_mutex);
390 // The first card is the master timer, so wait for it to have a new frame.
391 // TODO: Make configurable, and with a timeout.
392 cards[0].new_data_ready_changed.wait(lock, [this]{ return cards[0].new_data_ready; });
394 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
395 CaptureCard *card = &cards[card_index];
396 card_copy[card_index].usb = card->usb;
397 card_copy[card_index].new_data_ready = card->new_data_ready;
398 card_copy[card_index].new_frame = card->new_frame;
399 card_copy[card_index].new_frame_length = card->new_frame_length;
400 card_copy[card_index].new_data_ready_fence = card->new_data_ready_fence;
401 card_copy[card_index].dropped_frames = card->dropped_frames;
402 card->new_data_ready = false;
403 card->new_data_ready_changed.notify_all();
405 int num_samples_times_timebase = OUTPUT_FREQUENCY * card->new_frame_length + card->fractional_samples;
406 num_samples[card_index] = num_samples_times_timebase / TIMEBASE;
407 card->fractional_samples = num_samples_times_timebase % TIMEBASE;
408 assert(num_samples[card_index] >= 0);
412 // Resample the audio as needed, including from previously dropped frames.
413 for (unsigned frame_num = 0; frame_num < card_copy[0].dropped_frames + 1; ++frame_num) {
415 // Signal to the audio thread to process this frame.
416 unique_lock<mutex> lock(audio_mutex);
417 audio_task_queue.push(AudioTask{pts_int, num_samples[0]});
418 audio_task_queue_changed.notify_one();
420 if (frame_num != card_copy[0].dropped_frames) {
421 // For dropped frames, increase the pts. Note that if the format changed
422 // in the meantime, we have no way of detecting that; we just have to
423 // assume the frame length is always the same.
424 ++stats_dropped_frames;
425 pts_int += card_copy[0].new_frame_length;
429 if (audio_level_callback != nullptr) {
430 double loudness_s = r128.loudness_S();
431 double loudness_i = r128.integrated();
432 double loudness_range_low = r128.range_min();
433 double loudness_range_high = r128.range_max();
435 audio_level_callback(loudness_s, 20.0 * log10(peak),
436 loudness_i, loudness_range_low, loudness_range_high,
437 last_gain_staging_db);
440 for (unsigned card_index = 1; card_index < num_cards; ++card_index) {
441 if (card_copy[card_index].new_data_ready && card_copy[card_index].new_frame->len == 0) {
442 ++card_copy[card_index].dropped_frames;
444 if (card_copy[card_index].dropped_frames > 0) {
445 printf("Card %u dropped %d frames before this\n",
446 card_index, int(card_copy[card_index].dropped_frames));
450 // If the first card is reporting a corrupted or otherwise dropped frame,
451 // just increase the pts (skipping over this frame) and don't try to compute anything new.
452 if (card_copy[0].new_frame->len == 0) {
453 ++stats_dropped_frames;
454 pts_int += card_copy[0].new_frame_length;
458 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
459 CaptureCard *card = &card_copy[card_index];
460 if (!card->new_data_ready || card->new_frame->len == 0)
463 assert(card->new_frame != nullptr);
464 bmusb_current_rendering_frame[card_index] = card->new_frame;
467 // The new texture might still be uploaded,
468 // tell the GPU to wait until it's there.
469 if (card->new_data_ready_fence) {
470 glWaitSync(card->new_data_ready_fence, /*flags=*/0, GL_TIMEOUT_IGNORED);
472 glDeleteSync(card->new_data_ready_fence);
475 const PBOFrameAllocator::Userdata *userdata = (const PBOFrameAllocator::Userdata *)card->new_frame->userdata;
476 theme->set_input_textures(card_index, userdata->tex_y, userdata->tex_cbcr);
479 // Get the main chain from the theme, and set its state immediately.
480 pair<EffectChain *, function<void()>> theme_main_chain = theme->get_chain(0, pts(), WIDTH, HEIGHT);
481 EffectChain *chain = theme_main_chain.first;
482 theme_main_chain.second();
484 GLuint y_tex, cbcr_tex;
485 bool got_frame = h264_encoder->begin_frame(&y_tex, &cbcr_tex);
488 // Render main chain.
489 GLuint cbcr_full_tex = resource_pool->create_2d_texture(GL_RG8, WIDTH, HEIGHT);
490 GLuint rgba_tex = resource_pool->create_2d_texture(GL_RGB565, WIDTH, HEIGHT); // Saves texture bandwidth, although dithering gets messed up.
491 GLuint fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex, rgba_tex);
493 chain->render_to_fbo(fbo, WIDTH, HEIGHT);
494 resource_pool->release_fbo(fbo);
496 subsample_chroma(cbcr_full_tex, cbcr_tex);
497 resource_pool->release_2d_texture(cbcr_full_tex);
499 // Set the right state for rgba_tex.
500 glBindFramebuffer(GL_FRAMEBUFFER, 0);
501 glBindTexture(GL_TEXTURE_2D, rgba_tex);
502 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
503 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
504 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
506 RefCountedGLsync fence(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
509 // Make sure the H.264 gets a reference to all the
510 // input frames needed, so that they are not released back
511 // until the rendering is done.
512 vector<RefCountedFrame> input_frames;
513 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
514 input_frames.push_back(bmusb_current_rendering_frame[card_index]);
516 const int64_t av_delay = TIMEBASE / 10; // Corresponds to the fixed delay in resampling_queue.h. TODO: Make less hard-coded.
517 h264_encoder->end_frame(fence, pts_int + av_delay, input_frames);
519 pts_int += card_copy[0].new_frame_length;
521 // The live frame just shows the RGBA texture we just rendered.
522 // It owns rgba_tex now.
523 DisplayFrame live_frame;
524 live_frame.chain = display_chain.get();
525 live_frame.setup_chain = [this, rgba_tex]{
526 display_input->set_texture_num(rgba_tex);
528 live_frame.ready_fence = fence;
529 live_frame.input_frames = {};
530 live_frame.temp_textures = { rgba_tex };
531 output_channel[OUTPUT_LIVE].output_frame(live_frame);
533 // Set up preview and any additional channels.
534 for (int i = 1; i < theme->get_num_channels() + 2; ++i) {
535 DisplayFrame display_frame;
536 pair<EffectChain *, function<void()>> chain = theme->get_chain(i, pts(), WIDTH, HEIGHT); // FIXME: dimensions
537 display_frame.chain = chain.first;
538 display_frame.setup_chain = chain.second;
539 display_frame.ready_fence = fence;
541 // FIXME: possible to do better?
542 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
543 display_frame.input_frames.push_back(bmusb_current_rendering_frame[card_index]);
545 display_frame.temp_textures = {};
546 output_channel[i].output_frame(display_frame);
549 clock_gettime(CLOCK_MONOTONIC, &now);
550 double elapsed = now.tv_sec - start.tv_sec +
551 1e-9 * (now.tv_nsec - start.tv_nsec);
552 if (frame % 100 == 0) {
553 printf("%d frames (%d dropped) in %.3f seconds = %.1f fps (%.1f ms/frame)\n",
554 frame, stats_dropped_frames, elapsed, frame / elapsed,
555 1e3 * elapsed / frame);
556 // chain->print_phase_timing();
560 // Reset every 100 frames, so that local variations in frame times
561 // (especially for the first few frames, when the shaders are
562 // compiled etc.) don't make it hard to measure for the entire
563 // remaining duration of the program.
564 if (frame == 10000) {
572 resource_pool->clean_context();
575 void Mixer::audio_thread_func()
577 while (!should_quit) {
581 unique_lock<mutex> lock(audio_mutex);
582 audio_task_queue_changed.wait(lock, [this]{ return !audio_task_queue.empty(); });
583 task = audio_task_queue.front();
584 audio_task_queue.pop();
587 process_audio_one_frame(task.pts_int, task.num_samples);
591 void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples)
593 vector<float> samples_card;
594 vector<float> samples_out;
595 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
596 samples_card.resize(num_samples * 2);
598 unique_lock<mutex> lock(cards[card_index].audio_mutex);
599 if (!cards[card_index].resampling_queue->get_output_samples(double(frame_pts_int) / TIMEBASE, &samples_card[0], num_samples)) {
600 printf("Card %d reported previous underrun.\n", card_index);
603 // TODO: Allow using audio from the other card(s) as well.
604 if (card_index == 0) {
605 samples_out = move(samples_card);
609 // Cut away everything under 120 Hz (or whatever the cutoff is);
610 // we don't need it for voice, and it will reduce headroom
611 // and confuse the compressor. (In particular, any hums at 50 or 60 Hz
612 // should be dampened.)
613 locut.render(samples_out.data(), samples_out.size() / 2, locut_cutoff_hz * 2.0 * M_PI / OUTPUT_FREQUENCY, 0.5f);
615 // Apply a level compressor to get the general level right.
616 // Basically, if it's over about -40 dBFS, we squeeze it down to that level
617 // (or more precisely, near it, since we don't use infinite ratio),
618 // then apply a makeup gain to get it to -14 dBFS. -14 dBFS is, of course,
619 // entirely arbitrary, but from practical tests with speech, it seems to
620 // put ut around -23 LUFS, so it's a reasonable starting point for later use.
621 float ref_level_dbfs = -14.0f;
623 float threshold = 0.01f; // -40 dBFS.
625 float attack_time = 0.5f;
626 float release_time = 20.0f;
627 float makeup_gain = pow(10.0f, (ref_level_dbfs - (-40.0f)) / 20.0f); // +26 dB.
628 level_compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
629 last_gain_staging_db = 20.0 * log10(level_compressor.get_attenuation() * makeup_gain);
633 printf("level=%f (%+5.2f dBFS) attenuation=%f (%+5.2f dB) end_result=%+5.2f dB\n",
634 level_compressor.get_level(), 20.0 * log10(level_compressor.get_level()),
635 level_compressor.get_attenuation(), 20.0 * log10(level_compressor.get_attenuation()),
636 20.0 * log10(level_compressor.get_level() * level_compressor.get_attenuation() * makeup_gain));
639 // float limiter_att, compressor_att;
641 // The real compressor.
642 if (compressor_enabled) {
643 float threshold = pow(10.0f, compressor_threshold_dbfs / 20.0f);
645 float attack_time = 0.005f;
646 float release_time = 0.040f;
647 float makeup_gain = 2.0f; // +6 dB.
648 compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
649 // compressor_att = compressor.get_attenuation();
652 // Finally a limiter at -4 dB (so, -10 dBFS) to take out the worst peaks only.
653 // Note that since ratio is not infinite, we could go slightly higher than this.
654 if (limiter_enabled) {
655 float threshold = pow(10.0f, limiter_threshold_dbfs / 20.0f);
657 float attack_time = 0.0f; // Instant.
658 float release_time = 0.020f;
659 float makeup_gain = 1.0f; // 0 dB.
660 limiter.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
661 // limiter_att = limiter.get_attenuation();
664 // printf("limiter=%+5.1f compressor=%+5.1f\n", 20.0*log10(limiter_att), 20.0*log10(compressor_att));
666 // Upsample 4x to find interpolated peak.
667 peak_resampler.inp_data = samples_out.data();
668 peak_resampler.inp_count = samples_out.size() / 2;
670 vector<float> interpolated_samples_out;
671 interpolated_samples_out.resize(samples_out.size());
672 while (peak_resampler.inp_count > 0) { // About four iterations.
673 peak_resampler.out_data = &interpolated_samples_out[0];
674 peak_resampler.out_count = interpolated_samples_out.size() / 2;
675 peak_resampler.process();
676 size_t out_stereo_samples = interpolated_samples_out.size() / 2 - peak_resampler.out_count;
677 peak = max<float>(peak, find_peak(interpolated_samples_out.data(), out_stereo_samples * 2));
681 vector<float> left, right;
682 deinterleave_samples(samples_out, &left, &right);
683 float *ptrs[] = { left.data(), right.data() };
684 r128.process(left.size(), ptrs);
686 // Send the samples to the sound card.
688 alsa->write(samples_out);
691 // And finally add them to the output.
692 h264_encoder->add_audio(frame_pts_int, move(samples_out));
695 void Mixer::subsample_chroma(GLuint src_tex, GLuint dst_tex)
698 glGenVertexArrays(1, &vao);
707 glBindVertexArray(vao);
711 GLuint fbo = resource_pool->create_fbo(dst_tex);
712 glBindFramebuffer(GL_FRAMEBUFFER, fbo);
713 glViewport(0, 0, WIDTH/2, HEIGHT/2);
716 glUseProgram(cbcr_program_num);
719 glActiveTexture(GL_TEXTURE0);
721 glBindTexture(GL_TEXTURE_2D, src_tex);
723 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
725 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
727 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
730 float chroma_offset_0[] = { -0.5f / WIDTH, 0.0f };
731 set_uniform_vec2(cbcr_program_num, "foo", "chroma_offset_0", chroma_offset_0);
733 GLuint position_vbo = fill_vertex_attribute(cbcr_program_num, "position", 2, GL_FLOAT, sizeof(vertices), vertices);
734 GLuint texcoord_vbo = fill_vertex_attribute(cbcr_program_num, "texcoord", 2, GL_FLOAT, sizeof(vertices), vertices); // Same as vertices.
736 glDrawArrays(GL_TRIANGLES, 0, 3);
739 cleanup_vertex_attribute(cbcr_program_num, "position", position_vbo);
740 cleanup_vertex_attribute(cbcr_program_num, "texcoord", texcoord_vbo);
745 resource_pool->release_fbo(fbo);
746 glDeleteVertexArrays(1, &vao);
749 void Mixer::release_display_frame(DisplayFrame *frame)
751 for (GLuint texnum : frame->temp_textures) {
752 resource_pool->release_2d_texture(texnum);
754 frame->temp_textures.clear();
755 frame->ready_fence.reset();
756 frame->input_frames.clear();
761 mixer_thread = thread(&Mixer::thread_func, this);
762 audio_thread = thread(&Mixer::audio_thread_func, this);
772 void Mixer::transition_clicked(int transition_num)
774 theme->transition_clicked(transition_num, pts());
777 void Mixer::channel_clicked(int preview_num)
779 theme->channel_clicked(preview_num);
782 void Mixer::reset_meters()
784 peak_resampler.reset();
790 Mixer::OutputChannel::~OutputChannel()
792 if (has_current_frame) {
793 parent->release_display_frame(¤t_frame);
795 if (has_ready_frame) {
796 parent->release_display_frame(&ready_frame);
800 void Mixer::OutputChannel::output_frame(DisplayFrame frame)
802 // Store this frame for display. Remove the ready frame if any
803 // (it was seemingly never used).
805 unique_lock<mutex> lock(frame_mutex);
806 if (has_ready_frame) {
807 parent->release_display_frame(&ready_frame);
810 has_ready_frame = true;
813 if (has_new_frame_ready_callback) {
814 new_frame_ready_callback();
818 bool Mixer::OutputChannel::get_display_frame(DisplayFrame *frame)
820 unique_lock<mutex> lock(frame_mutex);
821 if (!has_current_frame && !has_ready_frame) {
825 if (has_current_frame && has_ready_frame) {
826 // We have a new ready frame. Toss the current one.
827 parent->release_display_frame(¤t_frame);
828 has_current_frame = false;
830 if (has_ready_frame) {
831 assert(!has_current_frame);
832 current_frame = ready_frame;
833 ready_frame.ready_fence.reset(); // Drop the refcount.
834 ready_frame.input_frames.clear(); // Drop the refcounts.
835 has_current_frame = true;
836 has_ready_frame = false;
839 *frame = current_frame;
843 void Mixer::OutputChannel::set_frame_ready_callback(Mixer::new_frame_ready_callback_t callback)
845 new_frame_ready_callback = callback;
846 has_new_frame_ready_callback = true;