]> git.sesse.net Git - nageru/blob - mixer.cpp
Unify the texture upload paths a bit.
[nageru] / mixer.cpp
1 #undef Success
2
3 #include "mixer.h"
4
5 #include <assert.h>
6 #include <epoxy/egl.h>
7 #include <init.h>
8 #include <movit/effect_chain.h>
9 #include <movit/effect_util.h>
10 #include <movit/flat_input.h>
11 #include <movit/image_format.h>
12 #include <movit/resource_pool.h>
13 #include <stdint.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <sys/time.h>
17 #include <time.h>
18 #include <util.h>
19 #include <algorithm>
20 #include <cmath>
21 #include <condition_variable>
22 #include <cstddef>
23 #include <memory>
24 #include <mutex>
25 #include <string>
26 #include <thread>
27 #include <utility>
28 #include <vector>
29
30 #include "bmusb/bmusb.h"
31 #include "context.h"
32 #include "defs.h"
33 #include "h264encode.h"
34 #include "pbo_frame_allocator.h"
35 #include "ref_counted_gl_sync.h"
36 #include "timebase.h"
37
38 class QOpenGLContext;
39
40 using namespace movit;
41 using namespace std;
42 using namespace std::placeholders;
43
44 Mixer *global_mixer = nullptr;
45
46 namespace {
47
48 void convert_fixed24_to_fp32(float *dst, size_t out_channels, const uint8_t *src, size_t in_channels, size_t num_samples)
49 {
50         for (size_t i = 0; i < num_samples; ++i) {
51                 for (size_t j = 0; j < out_channels; ++j) {
52                         uint32_t s1 = *src++;
53                         uint32_t s2 = *src++;
54                         uint32_t s3 = *src++;
55                         uint32_t s = s1 | (s1 << 8) | (s2 << 16) | (s3 << 24);
56                         dst[i * out_channels + j] = int(s) * (1.0f / 4294967296.0f);
57                 }
58                 src += 3 * (in_channels - out_channels);
59         }
60 }
61
62 }  // namespace
63
64 Mixer::Mixer(const QSurfaceFormat &format, unsigned num_cards)
65         : httpd(LOCAL_DUMP_FILE_NAME, WIDTH, HEIGHT),
66           num_cards(num_cards),
67           mixer_surface(create_surface(format)),
68           h264_encoder_surface(create_surface(format)),
69           level_compressor(OUTPUT_FREQUENCY),
70           limiter(OUTPUT_FREQUENCY),
71           compressor(OUTPUT_FREQUENCY)
72 {
73         httpd.start(9095);
74
75         CHECK(init_movit(MOVIT_SHADER_DIR, MOVIT_DEBUG_OFF));
76         check_error();
77
78         // Since we allow non-bouncing 4:2:2 YCbCrInputs, effective subpixel precision
79         // will be halved when sampling them, and we need to compensate here.
80         movit_texel_subpixel_precision /= 2.0;
81
82         resource_pool.reset(new ResourcePool);
83         theme.reset(new Theme("theme.lua", resource_pool.get(), num_cards));
84         for (unsigned i = 0; i < NUM_OUTPUTS; ++i) {
85                 output_channel[i].parent = this;
86         }
87
88         ImageFormat inout_format;
89         inout_format.color_space = COLORSPACE_sRGB;
90         inout_format.gamma_curve = GAMMA_sRGB;
91
92         // Display chain; shows the live output produced by the main chain (its RGBA version).
93         display_chain.reset(new EffectChain(WIDTH, HEIGHT, resource_pool.get()));
94         check_error();
95         display_input = new FlatInput(inout_format, FORMAT_RGB, GL_UNSIGNED_BYTE, WIDTH, HEIGHT);  // FIXME: GL_UNSIGNED_BYTE is really wrong.
96         display_chain->add_input(display_input);
97         display_chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
98         display_chain->set_dither_bits(0);  // Don't bother.
99         display_chain->finalize();
100
101         h264_encoder.reset(new H264Encoder(h264_encoder_surface, WIDTH, HEIGHT, &httpd));
102
103         for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
104                 printf("Configuring card %d...\n", card_index);
105                 CaptureCard *card = &cards[card_index];
106                 card->usb = new BMUSBCapture(card_index);
107                 card->usb->set_frame_callback(bind(&Mixer::bm_frame, this, card_index, _1, _2, _3, _4, _5, _6, _7));
108                 card->frame_allocator.reset(new PBOFrameAllocator(8 << 20, WIDTH, HEIGHT));  // 8 MB.
109                 card->usb->set_video_frame_allocator(card->frame_allocator.get());
110                 card->surface = create_surface(format);
111                 card->usb->set_dequeue_thread_callbacks(
112                         [card]{
113                                 eglBindAPI(EGL_OPENGL_API);
114                                 card->context = create_context(card->surface);
115                                 if (!make_current(card->context, card->surface)) {
116                                         printf("failed to create bmusb context\n");
117                                         exit(1);
118                                 }
119                         },
120                         [this]{
121                                 resource_pool->clean_context();
122                         });
123                 card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
124                 card->usb->configure_card();
125         }
126
127         BMUSBCapture::start_bm_thread();
128
129         for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
130                 cards[card_index].usb->start_bm_capture();
131         }
132
133         //chain->enable_phase_timing(true);
134
135         // Set up stuff for NV12 conversion.
136
137         // Cb/Cr shader.
138         string cbcr_vert_shader = read_file("vs-cbcr.130.vert");
139         string cbcr_frag_shader =
140                 "#version 130 \n"
141                 "in vec2 tc0; \n"
142                 "uniform sampler2D cbcr_tex; \n"
143                 "void main() { \n"
144                 "    gl_FragColor = texture2D(cbcr_tex, tc0); \n"
145                 "} \n";
146         cbcr_program_num = resource_pool->compile_glsl_program(cbcr_vert_shader, cbcr_frag_shader);
147
148         r128.init(2, OUTPUT_FREQUENCY);
149         r128.integr_start();
150
151         locut.init(FILTER_HPF, 2);
152
153         // hlen=16 is pretty low quality, but we use quite a bit of CPU otherwise,
154         // and there's a limit to how important the peak meter is.
155         peak_resampler.setup(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY * 4, /*num_channels=*/2, /*hlen=*/16);
156
157         alsa.reset(new ALSAOutput(OUTPUT_FREQUENCY, /*num_channels=*/2));
158 }
159
160 Mixer::~Mixer()
161 {
162         resource_pool->release_glsl_program(cbcr_program_num);
163         BMUSBCapture::stop_bm_thread();
164
165         for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
166                 {
167                         unique_lock<mutex> lock(bmusb_mutex);
168                         cards[card_index].should_quit = true;  // Unblock thread.
169                         cards[card_index].new_data_ready_changed.notify_all();
170                 }
171                 cards[card_index].usb->stop_dequeue_thread();
172         }
173
174         h264_encoder.reset(nullptr);
175 }
176
177 namespace {
178
179 int unwrap_timecode(uint16_t current_wrapped, int last)
180 {
181         uint16_t last_wrapped = last & 0xffff;
182         if (current_wrapped > last_wrapped) {
183                 return (last & ~0xffff) | current_wrapped;
184         } else {
185                 return 0x10000 + ((last & ~0xffff) | current_wrapped);
186         }
187 }
188
189 float find_peak(const float *samples, size_t num_samples)
190 {
191         float m = fabs(samples[0]);
192         for (size_t i = 1; i < num_samples; ++i) {
193                 m = std::max(m, fabs(samples[i]));
194         }
195         return m;
196 }
197
198 void deinterleave_samples(const vector<float> &in, vector<float> *out_l, vector<float> *out_r)
199 {
200         size_t num_samples = in.size() / 2;
201         out_l->resize(num_samples);
202         out_r->resize(num_samples);
203
204         const float *inptr = in.data();
205         float *lptr = &(*out_l)[0];
206         float *rptr = &(*out_r)[0];
207         for (size_t i = 0; i < num_samples; ++i) {
208                 *lptr++ = *inptr++;
209                 *rptr++ = *inptr++;
210         }
211 }
212
213 }  // namespace
214
215 void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
216                      FrameAllocator::Frame video_frame, size_t video_offset, uint16_t video_format,
217                      FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format)
218 {
219         CaptureCard *card = &cards[card_index];
220
221         unsigned width, height, frame_rate_nom, frame_rate_den, extra_lines_top, extra_lines_bottom;
222         bool interlaced;
223
224         decode_video_format(video_format, &width, &height, &extra_lines_top, &extra_lines_bottom,
225                             &frame_rate_nom, &frame_rate_den, &interlaced);  // Ignore return value for now.
226         int64_t frame_length = TIMEBASE * frame_rate_den / frame_rate_nom;
227
228         size_t num_samples = (audio_frame.len >= audio_offset) ? (audio_frame.len - audio_offset) / 8 / 3 : 0;
229         if (num_samples > OUTPUT_FREQUENCY / 10) {
230                 printf("Card %d: Dropping frame with implausible audio length (len=%d, offset=%d) [timecode=0x%04x video_len=%d video_offset=%d video_format=%x)\n",
231                         card_index, int(audio_frame.len), int(audio_offset),
232                         timecode, int(video_frame.len), int(video_offset), video_format);
233                 if (video_frame.owner) {
234                         video_frame.owner->release_frame(video_frame);
235                 }
236                 if (audio_frame.owner) {
237                         audio_frame.owner->release_frame(audio_frame);
238                 }
239                 return;
240         }
241
242         int64_t local_pts = card->next_local_pts;
243         int dropped_frames = 0;
244         if (card->last_timecode != -1) {
245                 dropped_frames = unwrap_timecode(timecode, card->last_timecode) - card->last_timecode - 1;
246         }
247
248         // Convert the audio to stereo fp32 and add it.
249         vector<float> audio;
250         audio.resize(num_samples * 2);
251         convert_fixed24_to_fp32(&audio[0], 2, audio_frame.data + audio_offset, 8, num_samples);
252
253         // Add the audio.
254         {
255                 unique_lock<mutex> lock(card->audio_mutex);
256
257                 // Number of samples per frame if we need to insert silence.
258                 // (Could be nonintegral, but resampling will save us then.)
259                 int silence_samples = OUTPUT_FREQUENCY * frame_rate_den / frame_rate_nom;
260
261                 if (dropped_frames > MAX_FPS * 2) {
262                         fprintf(stderr, "Card %d lost more than two seconds (or time code jumping around; from 0x%04x to 0x%04x), resetting resampler\n",
263                                 card_index, card->last_timecode, timecode);
264                         card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
265                         dropped_frames = 0;
266                 } else if (dropped_frames > 0) {
267                         // Insert silence as needed.
268                         fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
269                                 card_index, dropped_frames, timecode);
270                         vector<float> silence;
271                         silence.resize(silence_samples * 2);
272                         for (int i = 0; i < dropped_frames; ++i) {
273                                 card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), silence.data(), silence_samples);
274                                 // Note that if the format changed in the meantime, we have
275                                 // no way of detecting that; we just have to assume the frame length
276                                 // is always the same.
277                                 local_pts += frame_length;
278                         }
279                 }
280                 if (num_samples == 0) {
281                         audio.resize(silence_samples * 2);
282                         num_samples = silence_samples;
283                 }
284                 card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), audio.data(), num_samples);
285                 card->next_local_pts = local_pts + frame_length;
286         }
287
288         card->last_timecode = timecode;
289
290         // Done with the audio, so release it.
291         if (audio_frame.owner) {
292                 audio_frame.owner->release_frame(audio_frame);
293         }
294
295         {
296                 // Wait until the previous frame was consumed.
297                 unique_lock<mutex> lock(bmusb_mutex);
298                 card->new_data_ready_changed.wait(lock, [card]{ return !card->new_data_ready || card->should_quit; });
299                 if (card->should_quit) return;
300         }
301
302         if (video_frame.len - video_offset == 0 ||
303             video_frame.len - video_offset != size_t(width * (height + extra_lines_top + extra_lines_bottom) * 2)) {
304                 if (video_frame.len != 0) {
305                         printf("Card %d: Dropping video frame with wrong length (%ld)\n",
306                                 card_index, video_frame.len - video_offset);
307                 }
308                 if (video_frame.owner) {
309                         video_frame.owner->release_frame(video_frame);
310                 }
311
312                 // Still send on the information that we _had_ a frame, even though it's corrupted,
313                 // so that pts can go up accordingly.
314                 {
315                         unique_lock<mutex> lock(bmusb_mutex);
316                         card->new_data_ready = true;
317                         card->new_frame = RefCountedFrame(FrameAllocator::Frame());
318                         card->new_frame_length = frame_length;
319                         card->new_data_ready_fence = nullptr;
320                         card->dropped_frames = dropped_frames;
321                         card->new_data_ready_changed.notify_all();
322                 }
323                 return;
324         }
325
326         PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)video_frame.userdata;
327
328         // Upload the textures.
329         size_t cbcr_width = width / 2;
330         size_t cbcr_offset = video_offset / 2;
331         size_t y_offset = video_frame.size / 2 + video_offset / 2;
332
333         if (width != userdata->last_width || height != userdata->last_height) {
334                 // We changed resolution since last use of this texture, so we need to create
335                 // a new object. Note that this each card has its own PBOFrameAllocator,
336                 // we don't need to worry about these flip-flopping between resolutions.
337                 glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr);
338                 check_error();
339                 glTexImage2D(GL_TEXTURE_2D, 0, GL_RG8, cbcr_width, height, 0, GL_RG, GL_UNSIGNED_BYTE, nullptr);
340                 check_error();
341                 glBindTexture(GL_TEXTURE_2D, userdata->tex_y);
342                 check_error();
343                 glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
344                 check_error();
345                 userdata->last_width = width;
346                 userdata->last_height = height;
347         }
348
349         GLuint pbo = userdata->pbo;
350         check_error();
351         glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
352         check_error();
353         glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, 0, video_frame.size);
354         check_error();
355         //glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
356         //check_error();
357
358         glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr);
359         check_error();
360         glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, cbcr_width, height, GL_RG, GL_UNSIGNED_BYTE, BUFFER_OFFSET(cbcr_offset + cbcr_width * extra_lines_top * sizeof(uint16_t)));
361         check_error();
362         glBindTexture(GL_TEXTURE_2D, userdata->tex_y);
363         check_error();
364         glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(y_offset + width * extra_lines_top));
365         check_error();
366         glBindTexture(GL_TEXTURE_2D, 0);
367         check_error();
368         GLsync fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);              
369         check_error();
370         assert(fence != nullptr);
371
372         {
373                 unique_lock<mutex> lock(bmusb_mutex);
374                 card->new_data_ready = true;
375                 card->new_frame = RefCountedFrame(video_frame);
376                 card->new_frame_length = frame_length;
377                 card->new_data_ready_fence = fence;
378                 card->dropped_frames = dropped_frames;
379                 card->new_data_ready_changed.notify_all();
380         }
381 }
382
383 void Mixer::thread_func()
384 {
385         eglBindAPI(EGL_OPENGL_API);
386         QOpenGLContext *context = create_context(mixer_surface);
387         if (!make_current(context, mixer_surface)) {
388                 printf("oops\n");
389                 exit(1);
390         }
391
392         struct timespec start, now;
393         clock_gettime(CLOCK_MONOTONIC, &start);
394
395         int frame = 0;
396         int stats_dropped_frames = 0;
397
398         while (!should_quit) {
399                 CaptureCard card_copy[MAX_CARDS];
400                 int num_samples[MAX_CARDS];
401
402                 {
403                         unique_lock<mutex> lock(bmusb_mutex);
404
405                         // The first card is the master timer, so wait for it to have a new frame.
406                         // TODO: Make configurable, and with a timeout.
407                         cards[0].new_data_ready_changed.wait(lock, [this]{ return cards[0].new_data_ready; });
408
409                         for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
410                                 CaptureCard *card = &cards[card_index];
411                                 card_copy[card_index].usb = card->usb;
412                                 card_copy[card_index].new_data_ready = card->new_data_ready;
413                                 card_copy[card_index].new_frame = card->new_frame;
414                                 card_copy[card_index].new_frame_length = card->new_frame_length;
415                                 card_copy[card_index].new_data_ready_fence = card->new_data_ready_fence;
416                                 card_copy[card_index].dropped_frames = card->dropped_frames;
417                                 card->new_data_ready = false;
418                                 card->new_data_ready_changed.notify_all();
419
420                                 int num_samples_times_timebase = OUTPUT_FREQUENCY * card->new_frame_length + card->fractional_samples;
421                                 num_samples[card_index] = num_samples_times_timebase / TIMEBASE;
422                                 card->fractional_samples = num_samples_times_timebase % TIMEBASE;
423                                 assert(num_samples[card_index] >= 0);
424                         }
425                 }
426
427                 // Resample the audio as needed, including from previously dropped frames.
428                 for (unsigned frame_num = 0; frame_num < card_copy[0].dropped_frames + 1; ++frame_num) {
429                         {
430                                 // Signal to the audio thread to process this frame.
431                                 unique_lock<mutex> lock(audio_mutex);
432                                 audio_task_queue.push(AudioTask{pts_int, num_samples[0]});
433                                 audio_task_queue_changed.notify_one();
434                         }
435                         if (frame_num != card_copy[0].dropped_frames) {
436                                 // For dropped frames, increase the pts. Note that if the format changed
437                                 // in the meantime, we have no way of detecting that; we just have to
438                                 // assume the frame length is always the same.
439                                 ++stats_dropped_frames;
440                                 pts_int += card_copy[0].new_frame_length;
441                         }
442                 }
443
444                 if (audio_level_callback != nullptr) {
445                         double loudness_s = r128.loudness_S();
446                         double loudness_i = r128.integrated();
447                         double loudness_range_low = r128.range_min();
448                         double loudness_range_high = r128.range_max();
449
450                         audio_level_callback(loudness_s, 20.0 * log10(peak),
451                                              loudness_i, loudness_range_low, loudness_range_high,
452                                              last_gain_staging_db);
453                 }
454
455                 for (unsigned card_index = 1; card_index < num_cards; ++card_index) {
456                         if (card_copy[card_index].new_data_ready && card_copy[card_index].new_frame->len == 0) {
457                                 ++card_copy[card_index].dropped_frames;
458                         }
459                         if (card_copy[card_index].dropped_frames > 0) {
460                                 printf("Card %u dropped %d frames before this\n",
461                                         card_index, int(card_copy[card_index].dropped_frames));
462                         }
463                 }
464
465                 // If the first card is reporting a corrupted or otherwise dropped frame,
466                 // just increase the pts (skipping over this frame) and don't try to compute anything new.
467                 if (card_copy[0].new_frame->len == 0) {
468                         ++stats_dropped_frames;
469                         pts_int += card_copy[0].new_frame_length;
470                         continue;
471                 }
472
473                 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
474                         CaptureCard *card = &card_copy[card_index];
475                         if (!card->new_data_ready || card->new_frame->len == 0)
476                                 continue;
477
478                         assert(card->new_frame != nullptr);
479                         bmusb_current_rendering_frame[card_index] = card->new_frame;
480                         check_error();
481
482                         // The new texture might still be uploaded,
483                         // tell the GPU to wait until it's there.
484                         if (card->new_data_ready_fence) {
485                                 glWaitSync(card->new_data_ready_fence, /*flags=*/0, GL_TIMEOUT_IGNORED);
486                                 check_error();
487                                 glDeleteSync(card->new_data_ready_fence);
488                                 check_error();
489                         }
490                         const PBOFrameAllocator::Userdata *userdata = (const PBOFrameAllocator::Userdata *)card->new_frame->userdata;
491                         theme->set_input_textures(card_index, userdata->tex_y, userdata->tex_cbcr, userdata->last_width, userdata->last_height);
492                 }
493
494                 // Get the main chain from the theme, and set its state immediately.
495                 pair<EffectChain *, function<void()>> theme_main_chain = theme->get_chain(0, pts(), WIDTH, HEIGHT);
496                 EffectChain *chain = theme_main_chain.first;
497                 theme_main_chain.second();
498
499                 GLuint y_tex, cbcr_tex;
500                 bool got_frame = h264_encoder->begin_frame(&y_tex, &cbcr_tex);
501                 assert(got_frame);
502
503                 // Render main chain.
504                 GLuint cbcr_full_tex = resource_pool->create_2d_texture(GL_RG8, WIDTH, HEIGHT);
505                 GLuint rgba_tex = resource_pool->create_2d_texture(GL_RGB565, WIDTH, HEIGHT);  // Saves texture bandwidth, although dithering gets messed up.
506                 GLuint fbo = resource_pool->create_fbo(y_tex, cbcr_full_tex, rgba_tex);
507                 check_error();
508                 chain->render_to_fbo(fbo, WIDTH, HEIGHT);
509                 resource_pool->release_fbo(fbo);
510
511                 subsample_chroma(cbcr_full_tex, cbcr_tex);
512                 resource_pool->release_2d_texture(cbcr_full_tex);
513
514                 // Set the right state for rgba_tex.
515                 glBindFramebuffer(GL_FRAMEBUFFER, 0);
516                 glBindTexture(GL_TEXTURE_2D, rgba_tex);
517                 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
518                 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
519                 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
520
521                 RefCountedGLsync fence(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
522                 check_error();
523
524                 // Make sure the H.264 gets a reference to all the
525                 // input frames needed, so that they are not released back
526                 // until the rendering is done.
527                 vector<RefCountedFrame> input_frames;
528                 for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
529                         input_frames.push_back(bmusb_current_rendering_frame[card_index]);
530                 }
531                 const int64_t av_delay = TIMEBASE / 10;  // Corresponds to the fixed delay in resampling_queue.h. TODO: Make less hard-coded.
532                 h264_encoder->end_frame(fence, pts_int + av_delay, input_frames);
533                 ++frame;
534                 pts_int += card_copy[0].new_frame_length;
535
536                 // The live frame just shows the RGBA texture we just rendered.
537                 // It owns rgba_tex now.
538                 DisplayFrame live_frame;
539                 live_frame.chain = display_chain.get();
540                 live_frame.setup_chain = [this, rgba_tex]{
541                         display_input->set_texture_num(rgba_tex);
542                 };
543                 live_frame.ready_fence = fence;
544                 live_frame.input_frames = {};
545                 live_frame.temp_textures = { rgba_tex };
546                 output_channel[OUTPUT_LIVE].output_frame(live_frame);
547
548                 // Set up preview and any additional channels.
549                 for (int i = 1; i < theme->get_num_channels() + 2; ++i) {
550                         DisplayFrame display_frame;
551                         pair<EffectChain *, function<void()>> chain = theme->get_chain(i, pts(), WIDTH, HEIGHT);  // FIXME: dimensions
552                         display_frame.chain = chain.first;
553                         display_frame.setup_chain = chain.second;
554                         display_frame.ready_fence = fence;
555
556                         // FIXME: possible to do better?
557                         for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
558                                 display_frame.input_frames.push_back(bmusb_current_rendering_frame[card_index]);
559                         }
560                         display_frame.temp_textures = {};
561                         output_channel[i].output_frame(display_frame);
562                 }
563
564                 clock_gettime(CLOCK_MONOTONIC, &now);
565                 double elapsed = now.tv_sec - start.tv_sec +
566                         1e-9 * (now.tv_nsec - start.tv_nsec);
567                 if (frame % 100 == 0) {
568                         printf("%d frames (%d dropped) in %.3f seconds = %.1f fps (%.1f ms/frame)\n",
569                                 frame, stats_dropped_frames, elapsed, frame / elapsed,
570                                 1e3 * elapsed / frame);
571                 //      chain->print_phase_timing();
572                 }
573
574 #if 0
575                 // Reset every 100 frames, so that local variations in frame times
576                 // (especially for the first few frames, when the shaders are
577                 // compiled etc.) don't make it hard to measure for the entire
578                 // remaining duration of the program.
579                 if (frame == 10000) {
580                         frame = 0;
581                         start = now;
582                 }
583 #endif
584                 check_error();
585         }
586
587         resource_pool->clean_context();
588 }
589
590 void Mixer::audio_thread_func()
591 {
592         while (!should_quit) {
593                 AudioTask task;
594
595                 {
596                         unique_lock<mutex> lock(audio_mutex);
597                         audio_task_queue_changed.wait(lock, [this]{ return !audio_task_queue.empty(); });
598                         task = audio_task_queue.front();
599                         audio_task_queue.pop();
600                 }
601
602                 process_audio_one_frame(task.pts_int, task.num_samples);
603         }
604 }
605
606 void Mixer::process_audio_one_frame(int64_t frame_pts_int, int num_samples)
607 {
608         vector<float> samples_card;
609         vector<float> samples_out;
610         for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
611                 samples_card.resize(num_samples * 2);
612                 {
613                         unique_lock<mutex> lock(cards[card_index].audio_mutex);
614                         if (!cards[card_index].resampling_queue->get_output_samples(double(frame_pts_int) / TIMEBASE, &samples_card[0], num_samples)) {
615                                 printf("Card %d reported previous underrun.\n", card_index);
616                         }
617                 }
618                 // TODO: Allow using audio from the other card(s) as well.
619                 if (card_index == 0) {
620                         samples_out = move(samples_card);
621                 }
622         }
623
624         // Cut away everything under 120 Hz (or whatever the cutoff is);
625         // we don't need it for voice, and it will reduce headroom
626         // and confuse the compressor. (In particular, any hums at 50 or 60 Hz
627         // should be dampened.)
628         locut.render(samples_out.data(), samples_out.size() / 2, locut_cutoff_hz * 2.0 * M_PI / OUTPUT_FREQUENCY, 0.5f);
629
630         // Apply a level compressor to get the general level right.
631         // Basically, if it's over about -40 dBFS, we squeeze it down to that level
632         // (or more precisely, near it, since we don't use infinite ratio),
633         // then apply a makeup gain to get it to -14 dBFS. -14 dBFS is, of course,
634         // entirely arbitrary, but from practical tests with speech, it seems to
635         // put ut around -23 LUFS, so it's a reasonable starting point for later use.
636         float ref_level_dbfs = -14.0f;
637         {
638                 float threshold = 0.01f;   // -40 dBFS.
639                 float ratio = 20.0f;
640                 float attack_time = 0.5f;
641                 float release_time = 20.0f;
642                 float makeup_gain = pow(10.0f, (ref_level_dbfs - (-40.0f)) / 20.0f);  // +26 dB.
643                 level_compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
644                 last_gain_staging_db = 20.0 * log10(level_compressor.get_attenuation() * makeup_gain);
645         }
646
647 #if 0
648         printf("level=%f (%+5.2f dBFS) attenuation=%f (%+5.2f dB) end_result=%+5.2f dB\n",
649                 level_compressor.get_level(), 20.0 * log10(level_compressor.get_level()),
650                 level_compressor.get_attenuation(), 20.0 * log10(level_compressor.get_attenuation()),
651                 20.0 * log10(level_compressor.get_level() * level_compressor.get_attenuation() * makeup_gain));
652 #endif
653
654 //      float limiter_att, compressor_att;
655
656         // The real compressor.
657         if (compressor_enabled) {
658                 float threshold = pow(10.0f, compressor_threshold_dbfs / 20.0f);
659                 float ratio = 20.0f;
660                 float attack_time = 0.005f;
661                 float release_time = 0.040f;
662                 float makeup_gain = 2.0f;  // +6 dB.
663                 compressor.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
664 //              compressor_att = compressor.get_attenuation();
665         }
666
667         // Finally a limiter at -4 dB (so, -10 dBFS) to take out the worst peaks only.
668         // Note that since ratio is not infinite, we could go slightly higher than this.
669         if (limiter_enabled) {
670                 float threshold = pow(10.0f, limiter_threshold_dbfs / 20.0f);
671                 float ratio = 30.0f;
672                 float attack_time = 0.0f;  // Instant.
673                 float release_time = 0.020f;
674                 float makeup_gain = 1.0f;  // 0 dB.
675                 limiter.process(samples_out.data(), samples_out.size() / 2, threshold, ratio, attack_time, release_time, makeup_gain);
676 //              limiter_att = limiter.get_attenuation();
677         }
678
679 //      printf("limiter=%+5.1f  compressor=%+5.1f\n", 20.0*log10(limiter_att), 20.0*log10(compressor_att));
680
681         // Upsample 4x to find interpolated peak.
682         peak_resampler.inp_data = samples_out.data();
683         peak_resampler.inp_count = samples_out.size() / 2;
684
685         vector<float> interpolated_samples_out;
686         interpolated_samples_out.resize(samples_out.size());
687         while (peak_resampler.inp_count > 0) {  // About four iterations.
688                 peak_resampler.out_data = &interpolated_samples_out[0];
689                 peak_resampler.out_count = interpolated_samples_out.size() / 2;
690                 peak_resampler.process();
691                 size_t out_stereo_samples = interpolated_samples_out.size() / 2 - peak_resampler.out_count;
692                 peak = max<float>(peak, find_peak(interpolated_samples_out.data(), out_stereo_samples * 2));
693         }
694
695         // Find R128 levels.
696         vector<float> left, right;
697         deinterleave_samples(samples_out, &left, &right);
698         float *ptrs[] = { left.data(), right.data() };
699         r128.process(left.size(), ptrs);
700
701         // Send the samples to the sound card.
702         if (alsa) {
703                 alsa->write(samples_out);
704         }
705
706         // And finally add them to the output.
707         h264_encoder->add_audio(frame_pts_int, move(samples_out));
708 }
709
710 void Mixer::subsample_chroma(GLuint src_tex, GLuint dst_tex)
711 {
712         GLuint vao;
713         glGenVertexArrays(1, &vao);
714         check_error();
715
716         float vertices[] = {
717                 0.0f, 2.0f,
718                 0.0f, 0.0f,
719                 2.0f, 0.0f
720         };
721
722         glBindVertexArray(vao);
723         check_error();
724
725         // Extract Cb/Cr.
726         GLuint fbo = resource_pool->create_fbo(dst_tex);
727         glBindFramebuffer(GL_FRAMEBUFFER, fbo);
728         glViewport(0, 0, WIDTH/2, HEIGHT/2);
729         check_error();
730
731         glUseProgram(cbcr_program_num);
732         check_error();
733
734         glActiveTexture(GL_TEXTURE0);
735         check_error();
736         glBindTexture(GL_TEXTURE_2D, src_tex);
737         check_error();
738         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
739         check_error();
740         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
741         check_error();
742         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
743         check_error();
744
745         float chroma_offset_0[] = { -0.5f / WIDTH, 0.0f };
746         set_uniform_vec2(cbcr_program_num, "foo", "chroma_offset_0", chroma_offset_0);
747
748         GLuint position_vbo = fill_vertex_attribute(cbcr_program_num, "position", 2, GL_FLOAT, sizeof(vertices), vertices);
749         GLuint texcoord_vbo = fill_vertex_attribute(cbcr_program_num, "texcoord", 2, GL_FLOAT, sizeof(vertices), vertices);  // Same as vertices.
750
751         glDrawArrays(GL_TRIANGLES, 0, 3);
752         check_error();
753
754         cleanup_vertex_attribute(cbcr_program_num, "position", position_vbo);
755         cleanup_vertex_attribute(cbcr_program_num, "texcoord", texcoord_vbo);
756
757         glUseProgram(0);
758         check_error();
759
760         resource_pool->release_fbo(fbo);
761         glDeleteVertexArrays(1, &vao);
762 }
763
764 void Mixer::release_display_frame(DisplayFrame *frame)
765 {
766         for (GLuint texnum : frame->temp_textures) {
767                 resource_pool->release_2d_texture(texnum);
768         }
769         frame->temp_textures.clear();
770         frame->ready_fence.reset();
771         frame->input_frames.clear();
772 }
773
774 void Mixer::start()
775 {
776         mixer_thread = thread(&Mixer::thread_func, this);
777         audio_thread = thread(&Mixer::audio_thread_func, this);
778 }
779
780 void Mixer::quit()
781 {
782         should_quit = true;
783         mixer_thread.join();
784         audio_thread.join();
785 }
786
787 void Mixer::transition_clicked(int transition_num)
788 {
789         theme->transition_clicked(transition_num, pts());
790 }
791
792 void Mixer::channel_clicked(int preview_num)
793 {
794         theme->channel_clicked(preview_num);
795 }
796
797 void Mixer::reset_meters()
798 {
799         peak_resampler.reset();
800         peak = 0.0f;
801         r128.reset();
802         r128.integr_start();
803 }
804
805 Mixer::OutputChannel::~OutputChannel()
806 {
807         if (has_current_frame) {
808                 parent->release_display_frame(&current_frame);
809         }
810         if (has_ready_frame) {
811                 parent->release_display_frame(&ready_frame);
812         }
813 }
814
815 void Mixer::OutputChannel::output_frame(DisplayFrame frame)
816 {
817         // Store this frame for display. Remove the ready frame if any
818         // (it was seemingly never used).
819         {
820                 unique_lock<mutex> lock(frame_mutex);
821                 if (has_ready_frame) {
822                         parent->release_display_frame(&ready_frame);
823                 }
824                 ready_frame = frame;
825                 has_ready_frame = true;
826         }
827
828         if (has_new_frame_ready_callback) {
829                 new_frame_ready_callback();
830         }
831 }
832
833 bool Mixer::OutputChannel::get_display_frame(DisplayFrame *frame)
834 {
835         unique_lock<mutex> lock(frame_mutex);
836         if (!has_current_frame && !has_ready_frame) {
837                 return false;
838         }
839
840         if (has_current_frame && has_ready_frame) {
841                 // We have a new ready frame. Toss the current one.
842                 parent->release_display_frame(&current_frame);
843                 has_current_frame = false;
844         }
845         if (has_ready_frame) {
846                 assert(!has_current_frame);
847                 current_frame = ready_frame;
848                 ready_frame.ready_fence.reset();  // Drop the refcount.
849                 ready_frame.input_frames.clear();  // Drop the refcounts.
850                 has_current_frame = true;
851                 has_ready_frame = false;
852         }
853
854         *frame = current_frame;
855         return true;
856 }
857
858 void Mixer::OutputChannel::set_frame_ready_callback(Mixer::new_frame_ready_callback_t callback)
859 {
860         new_frame_ready_callback = callback;
861         has_new_frame_ready_callback = true;
862 }