+ r128.init(2, OUTPUT_FREQUENCY);
+ r128.integr_start();
+
+ locut.init(FILTER_HPF, 2);
+
+ // hlen=16 is pretty low quality, but we use quite a bit of CPU otherwise,
+ // and there's a limit to how important the peak meter is.
+ peak_resampler.setup(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY * 4, /*num_channels=*/2, /*hlen=*/16);
+
+ alsa.reset(new ALSAOutput(OUTPUT_FREQUENCY, /*num_channels=*/2));
+}
+
+Mixer::~Mixer()
+{
+ resource_pool->release_glsl_program(cbcr_program_num);
+ BMUSBCapture::stop_bm_thread();
+
+ for (unsigned card_index = 0; card_index < num_cards; ++card_index) {
+ {
+ unique_lock<mutex> lock(bmusb_mutex);
+ cards[card_index].should_quit = true; // Unblock thread.
+ cards[card_index].new_data_ready_changed.notify_all();
+ }
+ cards[card_index].usb->stop_dequeue_thread();
+ }
+
+ h264_encoder.reset(nullptr);
+}
+
+namespace {
+
+int unwrap_timecode(uint16_t current_wrapped, int last)
+{
+ uint16_t last_wrapped = last & 0xffff;
+ if (current_wrapped > last_wrapped) {
+ return (last & ~0xffff) | current_wrapped;
+ } else {
+ return 0x10000 + ((last & ~0xffff) | current_wrapped);
+ }
+}
+
+float find_peak(const float *samples, size_t num_samples)
+{
+ float m = fabs(samples[0]);
+ for (size_t i = 1; i < num_samples; ++i) {
+ m = std::max(m, fabs(samples[i]));
+ }
+ return m;
+}
+
+void deinterleave_samples(const vector<float> &in, vector<float> *out_l, vector<float> *out_r)
+{
+ size_t num_samples = in.size() / 2;
+ out_l->resize(num_samples);
+ out_r->resize(num_samples);
+
+ const float *inptr = in.data();
+ float *lptr = &(*out_l)[0];
+ float *rptr = &(*out_r)[0];
+ for (size_t i = 0; i < num_samples; ++i) {
+ *lptr++ = *inptr++;
+ *rptr++ = *inptr++;
+ }
+}
+
+} // namespace
+
+void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
+ FrameAllocator::Frame video_frame, size_t video_offset, uint16_t video_format,
+ FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format)
+{
+ CaptureCard *card = &cards[card_index];
+
+ int width, height, frame_rate_nom, frame_rate_den, extra_lines_top, extra_lines_bottom;
+ bool interlaced;
+
+ decode_video_format(video_format, &width, &height, &extra_lines_top, &extra_lines_bottom,
+ &frame_rate_nom, &frame_rate_den, &interlaced); // Ignore return value for now.
+ int64_t frame_length = TIMEBASE * frame_rate_den / frame_rate_nom;
+
+ size_t num_samples = (audio_frame.len >= audio_offset) ? (audio_frame.len - audio_offset) / 8 / 3 : 0;
+ if (num_samples > OUTPUT_FREQUENCY / 10) {
+ printf("Card %d: Dropping frame with implausible audio length (len=%d, offset=%d) [timecode=0x%04x video_len=%d video_offset=%d video_format=%x)\n",
+ card_index, int(audio_frame.len), int(audio_offset),
+ timecode, int(video_frame.len), int(video_offset), video_format);
+ if (video_frame.owner) {
+ video_frame.owner->release_frame(video_frame);
+ }
+ if (audio_frame.owner) {
+ audio_frame.owner->release_frame(audio_frame);
+ }
+ return;
+ }
+
+ int64_t local_pts = card->next_local_pts;
+ int dropped_frames = 0;
+ if (card->last_timecode != -1) {
+ dropped_frames = unwrap_timecode(timecode, card->last_timecode) - card->last_timecode - 1;
+ }
+
+ // Convert the audio to stereo fp32 and add it.
+ vector<float> audio;
+ audio.resize(num_samples * 2);
+ convert_fixed24_to_fp32(&audio[0], 2, audio_frame.data + audio_offset, 8, num_samples);
+
+ // Add the audio.
+ {
+ unique_lock<mutex> lock(card->audio_mutex);
+
+ // Number of samples per frame if we need to insert silence.
+ // (Could be nonintegral, but resampling will save us then.)
+ int silence_samples = OUTPUT_FREQUENCY * frame_rate_den / frame_rate_nom;
+
+ if (dropped_frames > MAX_FPS * 2) {
+ fprintf(stderr, "Card %d lost more than two seconds (or time code jumping around; from 0x%04x to 0x%04x), resetting resampler\n",
+ card_index, card->last_timecode, timecode);
+ card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
+ dropped_frames = 0;
+ } else if (dropped_frames > 0) {
+ // Insert silence as needed.
+ fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
+ card_index, dropped_frames, timecode);
+ vector<float> silence;
+ silence.resize(silence_samples * 2);
+ for (int i = 0; i < dropped_frames; ++i) {
+ card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), silence.data(), silence_samples);
+ // Note that if the format changed in the meantime, we have
+ // no way of detecting that; we just have to assume the frame length
+ // is always the same.
+ local_pts += frame_length;
+ }
+ }
+ if (num_samples == 0) {
+ audio.resize(silence_samples * 2);
+ num_samples = silence_samples;
+ }
+ card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), audio.data(), num_samples);
+ card->next_local_pts = local_pts + frame_length;
+ }
+
+ card->last_timecode = timecode;
+
+ // Done with the audio, so release it.
+ if (audio_frame.owner) {
+ audio_frame.owner->release_frame(audio_frame);
+ }
+
+ {
+ // Wait until the previous frame was consumed.
+ unique_lock<mutex> lock(bmusb_mutex);
+ card->new_data_ready_changed.wait(lock, [card]{ return !card->new_data_ready || card->should_quit; });
+ if (card->should_quit) return;
+ }
+
+ if (video_frame.len - video_offset == 0 ||
+ video_frame.len - video_offset != size_t(width * (height + extra_lines_top + extra_lines_bottom) * 2) ||
+ width != WIDTH || height != HEIGHT) { // TODO: Remove this once the rest of the infrastructure is in place.
+ if (video_frame.len != 0) {
+ printf("Card %d: Dropping video frame with wrong length (%ld)\n",
+ card_index, video_frame.len - video_offset);
+ }
+ if (video_frame.owner) {
+ video_frame.owner->release_frame(video_frame);
+ }
+
+ // Still send on the information that we _had_ a frame, even though it's corrupted,
+ // so that pts can go up accordingly.
+ {
+ unique_lock<mutex> lock(bmusb_mutex);
+ card->new_data_ready = true;
+ card->new_frame = RefCountedFrame(FrameAllocator::Frame());
+ card->new_frame_length = frame_length;
+ card->new_data_ready_fence = nullptr;
+ card->dropped_frames = dropped_frames;
+ card->new_data_ready_changed.notify_all();
+ }
+ return;
+ }
+
+ const PBOFrameAllocator::Userdata *userdata = (const PBOFrameAllocator::Userdata *)video_frame.userdata;
+ GLuint pbo = userdata->pbo;