- // Video frame will be released when last user of card->new_frame goes out of scope.
- card->usb->get_audio_frame_allocator()->release_frame(audio_frame);
-}
-
-void Mixer::place_rectangle(Effect *resample_effect, Effect *padding_effect, float x0, float y0, float x1, float y1)
-{
- float srcx0 = 0.0f;
- float srcx1 = 1.0f;
- float srcy0 = 0.0f;
- float srcy1 = 1.0f;
-
- // Cull.
- if (x0 > 1280.0 || x1 < 0.0 || y0 > 720.0 || y1 < 0.0) {
- CHECK(resample_effect->set_int("width", 1));
- CHECK(resample_effect->set_int("height", 1));
- CHECK(resample_effect->set_float("zoom_x", 1280.0));
- CHECK(resample_effect->set_float("zoom_y", 720.0));
- CHECK(padding_effect->set_int("left", 2000));
- CHECK(padding_effect->set_int("top", 2000));
- return;
- }
-
- // Clip. (TODO: Clip on upper/left sides, too.)
- if (x1 > 1280.0) {
- srcx1 = (1280.0 - x0) / (x1 - x0);
- x1 = 1280.0;
- }
- if (y1 > 720.0) {
- srcy1 = (720.0 - y0) / (y1 - y0);
- y1 = 720.0;
- }
-
- float x_subpixel_offset = x0 - floor(x0);
- float y_subpixel_offset = y0 - floor(y0);
-
- // Resampling must be to an integral number of pixels. Round up,
- // and then add an extra pixel so we have some leeway for the border.
- int width = int(ceil(x1 - x0)) + 1;
- int height = int(ceil(y1 - y0)) + 1;
- CHECK(resample_effect->set_int("width", width));
- CHECK(resample_effect->set_int("height", height));
-
- // Correct the discrepancy with zoom. (This will leave a small
- // excess edge of pixels and subpixels, which we'll correct for soon.)
- float zoom_x = (x1 - x0) / (width * (srcx1 - srcx0));
- float zoom_y = (y1 - y0) / (height * (srcy1 - srcy0));
- CHECK(resample_effect->set_float("zoom_x", zoom_x));
- CHECK(resample_effect->set_float("zoom_y", zoom_y));
- CHECK(resample_effect->set_float("zoom_center_x", 0.0f));
- CHECK(resample_effect->set_float("zoom_center_y", 0.0f));
-
- // Padding must also be to a whole-pixel offset.
- CHECK(padding_effect->set_int("left", floor(x0)));
- CHECK(padding_effect->set_int("top", floor(y0)));
-
- // Correct _that_ discrepancy by subpixel offset in the resampling.
- CHECK(resample_effect->set_float("left", -x_subpixel_offset / zoom_x));
- CHECK(resample_effect->set_float("top", -y_subpixel_offset / zoom_y));
-
- // Finally, adjust the border so it is exactly where we want it.
- CHECK(padding_effect->set_float("border_offset_left", x_subpixel_offset));
- CHECK(padding_effect->set_float("border_offset_right", x1 - (floor(x0) + width)));
- CHECK(padding_effect->set_float("border_offset_top", y_subpixel_offset));
- CHECK(padding_effect->set_float("border_offset_bottom", y1 - (floor(y0) + height)));
+ if (video_frame.len - video_offset == 0 ||
+ video_frame.len - video_offset != size_t(width * (height + extra_lines_top + extra_lines_bottom) * 2)) {
+ if (video_frame.len != 0) {
+ printf("Card %d: Dropping video frame with wrong length (%ld)\n",
+ card_index, video_frame.len - video_offset);
+ }
+ if (video_frame.owner) {
+ video_frame.owner->release_frame(video_frame);
+ }
+
+ // Still send on the information that we _had_ a frame, even though it's corrupted,
+ // so that pts can go up accordingly.
+ {
+ unique_lock<mutex> lock(bmusb_mutex);
+ card->new_data_ready = true;
+ card->new_frame = RefCountedFrame(FrameAllocator::Frame());
+ card->new_frame_length = frame_length;
+ card->new_frame_interlaced = false;
+ card->new_data_ready_fence = nullptr;
+ card->dropped_frames = dropped_frames;
+ card->new_data_ready_changed.notify_all();
+ }
+ return;
+ }
+
+ PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)video_frame.userdata;
+
+ unsigned num_fields = interlaced ? 2 : 1;
+ timespec frame_upload_start;
+ if (interlaced) {
+ // NOTE: This isn't deinterlacing. This is just sending the two fields along
+ // as separate frames without considering anything like the half-field offset.
+ // We'll need to add a proper deinterlacer on the receiving side to get this right.
+ assert(height % 2 == 0);
+ height /= 2;
+ assert(frame_length % 2 == 0);
+ frame_length /= 2;
+ num_fields = 2;
+ clock_gettime(CLOCK_MONOTONIC, &frame_upload_start);
+ }
+ RefCountedFrame new_frame(video_frame);
+
+ // Upload the textures.
+ size_t cbcr_width = width / 2;
+ size_t cbcr_offset = video_offset / 2;
+ size_t y_offset = video_frame.size / 2 + video_offset / 2;
+
+ for (unsigned field = 0; field < num_fields; ++field) {
+ unsigned field_start_line = (field == 1) ? second_field_start : extra_lines_top + field * (height + 22);
+
+ if (userdata->tex_y[field] == 0 ||
+ userdata->tex_cbcr[field] == 0 ||
+ width != userdata->last_width[field] ||
+ height != userdata->last_height[field]) {
+ // We changed resolution since last use of this texture, so we need to create
+ // a new object. Note that this each card has its own PBOFrameAllocator,
+ // we don't need to worry about these flip-flopping between resolutions.
+ glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]);
+ check_error();
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RG8, cbcr_width, height, 0, GL_RG, GL_UNSIGNED_BYTE, nullptr);
+ check_error();
+ glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]);
+ check_error();
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, nullptr);
+ check_error();
+ userdata->last_width[field] = width;
+ userdata->last_height[field] = height;
+ }
+
+ GLuint pbo = userdata->pbo;
+ check_error();
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
+ check_error();
+ glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, 0, video_frame.size);
+ check_error();
+ //glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
+ //check_error();
+
+ glBindTexture(GL_TEXTURE_2D, userdata->tex_cbcr[field]);
+ check_error();
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, cbcr_width, height, GL_RG, GL_UNSIGNED_BYTE, BUFFER_OFFSET(cbcr_offset + cbcr_width * field_start_line * sizeof(uint16_t)));
+ check_error();
+ glBindTexture(GL_TEXTURE_2D, userdata->tex_y[field]);
+ check_error();
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(y_offset + width * field_start_line));
+ check_error();
+ glBindTexture(GL_TEXTURE_2D, 0);
+ check_error();
+ GLsync fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
+ check_error();
+ assert(fence != nullptr);
+
+ if (field == 1) {
+ // Don't upload the second field as fast as we can; wait until
+ // the field time has approximately passed. (Otherwise, we could
+ // get timing jitter against the other sources, and possibly also
+ // against the video display, although the latter is not as critical.)
+ // This requires our system clock to be reasonably close to the
+ // video clock, but that's not an unreasonable assumption.
+ timespec second_field_start;
+ second_field_start.tv_nsec = frame_upload_start.tv_nsec +
+ frame_length * 1000000000 / TIMEBASE;
+ second_field_start.tv_sec = frame_upload_start.tv_sec +
+ second_field_start.tv_nsec / 1000000000;
+ second_field_start.tv_nsec %= 1000000000;
+
+ while (clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME,
+ &second_field_start, nullptr) == -1 &&
+ errno == EINTR) ;
+ }
+
+ {
+ unique_lock<mutex> lock(bmusb_mutex);
+ card->new_data_ready = true;
+ card->new_frame = new_frame;
+ card->new_frame_length = frame_length;
+ card->new_frame_field = field;
+ card->new_frame_interlaced = interlaced;
+ card->new_data_ready_fence = fence;
+ card->dropped_frames = dropped_frames;
+ card->new_data_ready_changed.notify_all();
+
+ if (field != num_fields - 1) {
+ // Wait until the previous frame was consumed.
+ card->new_data_ready_changed.wait(lock, [card]{ return !card->new_data_ready || card->should_quit; });
+ if (card->should_quit) return;
+ }
+ }
+ }