+ return;
+ }
+
+ int64_t local_pts = card->next_local_pts;
+ int dropped_frames = 0;
+ if (card->last_timecode != -1) {
+ dropped_frames = unwrap_timecode(timecode, card->last_timecode) - card->last_timecode - 1;
+ }
+
+ // Convert the audio to stereo fp32 and add it.
+ vector<float> audio;
+ audio.resize(num_samples * 2);
+ convert_fixed24_to_fp32(&audio[0], 2, audio_frame.data + audio_offset, 8, num_samples);
+
+ // Add the audio.
+ {
+ unique_lock<mutex> lock(card->audio_mutex);
+
+ // Number of samples per frame if we need to insert silence.
+ // (Could be nonintegral, but resampling will save us then.)
+ int silence_samples = OUTPUT_FREQUENCY * frame_rate_den / frame_rate_nom;
+
+ if (dropped_frames > MAX_FPS * 2) {
+ fprintf(stderr, "Card %d lost more than two seconds (or time code jumping around; from 0x%04x to 0x%04x), resetting resampler\n",
+ card_index, card->last_timecode, timecode);
+ card->resampling_queue.reset(new ResamplingQueue(OUTPUT_FREQUENCY, OUTPUT_FREQUENCY, 2));
+ dropped_frames = 0;
+ } else if (dropped_frames > 0) {
+ // Insert silence as needed.
+ fprintf(stderr, "Card %d dropped %d frame(s) (before timecode 0x%04x), inserting silence.\n",
+ card_index, dropped_frames, timecode);
+ vector<float> silence;
+ silence.resize(silence_samples * 2);
+ for (int i = 0; i < dropped_frames; ++i) {
+ card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), silence.data(), silence_samples);
+ // Note that if the format changed in the meantime, we have
+ // no way of detecting that; we just have to assume the frame length
+ // is always the same.
+ local_pts += frame_length;
+ }
+ }
+ if (num_samples == 0) {
+ audio.resize(silence_samples * 2);
+ num_samples = silence_samples;
+ }
+ card->resampling_queue->add_input_samples(local_pts / double(TIMEBASE), audio.data(), num_samples);
+ card->next_local_pts = local_pts + frame_length;
+ }
+
+ card->last_timecode = timecode;
+
+ // Done with the audio, so release it.
+ if (audio_frame.owner) {
+ audio_frame.owner->release_frame(audio_frame);
+ }
+
+ {
+ // Wait until the previous frame was consumed.
+ unique_lock<mutex> lock(bmusb_mutex);
+ card->new_data_ready_changed.wait(lock, [card]{ return !card->new_data_ready || card->should_quit; });
+ if (card->should_quit) return;
+ }
+
+ if (video_frame.len - video_offset == 0 ||
+ video_frame.len - video_offset != size_t(width * (height + extra_lines_top + extra_lines_bottom) * 2)) {
+ if (video_frame.len != 0) {
+ printf("Card %d: Dropping video frame with wrong length (%ld)\n",
+ card_index, video_frame.len - video_offset);
+ }
+ if (video_frame.owner) {
+ video_frame.owner->release_frame(video_frame);
+ }
+
+ // Still send on the information that we _had_ a frame, even though it's corrupted,
+ // so that pts can go up accordingly.
+ {
+ unique_lock<mutex> lock(bmusb_mutex);
+ card->new_data_ready = true;
+ card->new_frame = RefCountedFrame(FrameAllocator::Frame());
+ card->new_frame_length = frame_length;
+ card->new_data_ready_fence = nullptr;
+ card->dropped_frames = dropped_frames;
+ card->new_data_ready_changed.notify_all();
+ }
+ return;
+ }
+
+ PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)video_frame.userdata;
+ GLuint pbo = userdata->pbo;
+ check_error();
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
+ check_error();
+ glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, 0, video_frame.size);
+ check_error();
+ //glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
+ //check_error();
+
+ // Upload the textures.
+ size_t cbcr_width = width / 2;
+ size_t cbcr_offset = video_offset / 2;
+ size_t y_offset = video_frame.size / 2 + video_offset / 2;
+
+ if (width != userdata->last_width || height != userdata->last_height) {
+ // We changed resolution since last use of this texture, so we need to create
+ // a new object. Note that this each card has its own PBOFrameAllocator,
+ // we don't need to worry about these flip-flopping between resolutions.
+ glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr);
+ check_error();
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RG8, cbcr_width, height, 0, GL_RG, GL_UNSIGNED_BYTE, BUFFER_OFFSET(cbcr_offset + cbcr_width * extra_lines_top * sizeof(uint16_t)));
+ check_error();
+ glBindTexture(GL_TEXTURE_2D, userdata->tex_y);
+ check_error();
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(y_offset + width * extra_lines_top));
+ check_error();
+ userdata->last_width = width;
+ userdata->last_height = height;
+ } else {
+ glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr);
+ check_error();
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, cbcr_width, height, GL_RG, GL_UNSIGNED_BYTE, BUFFER_OFFSET(cbcr_offset + cbcr_width * extra_lines_top * sizeof(uint16_t)));
+ check_error();
+ glBindTexture(GL_TEXTURE_2D, userdata->tex_y);
+ check_error();
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(y_offset + width * extra_lines_top));
+ check_error();
+ }
+ glBindTexture(GL_TEXTURE_2D, 0);
+ check_error();
+ GLsync fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
+ check_error();
+ assert(fence != nullptr);
+
+ {
+ unique_lock<mutex> lock(bmusb_mutex);
+ card->new_data_ready = true;
+ card->new_frame = RefCountedFrame(video_frame);
+ card->new_frame_length = frame_length;
+ card->new_data_ready_fence = fence;
+ card->dropped_frames = dropped_frames;
+ card->new_data_ready_changed.notify_all();
+ }
+}
+
+void Mixer::thread_func()
+{
+ eglBindAPI(EGL_OPENGL_API);
+ QOpenGLContext *context = create_context(mixer_surface);
+ if (!make_current(context, mixer_surface)) {
+ printf("oops\n");
+ exit(1);
+ }