return true;
}
-void H264Encoder::end_frame(GLsync fence)
+void H264Encoder::end_frame(GLsync fence, const std::vector<FrameAllocator::Frame> &input_frames_to_release)
{
{
unique_lock<mutex> lock(frame_queue_mutex);
- pending_frames[current_storage_frame++] = fence;
+ pending_frames[current_storage_frame++] = PendingFrame{ fence, input_frames_to_release };
}
frame_queue_nonempty.notify_one();
}
void H264Encoder::copy_thread_func()
{
for ( ;; ) {
- GLsync fence;
+ PendingFrame frame;
encoding2display_order(current_frame_encoding, intra_period, intra_idr_period, ip_period,
¤t_frame_display, ¤t_frame_type);
if (current_frame_type == FRAME_IDR) {
unique_lock<mutex> lock(frame_queue_mutex);
frame_queue_nonempty.wait(lock, [this]{ return copy_thread_should_quit || pending_frames.count(current_frame_display) != 0; });
if (copy_thread_should_quit) return;
- fence = pending_frames[current_frame_display];
+ frame = pending_frames[current_frame_display];
pending_frames.erase(current_frame_display);
}
// Wait for the GPU to be done with the frame.
- glClientWaitSync(fence, 0, 0);
- glDeleteSync(fence);
+ glClientWaitSync(frame.fence, 0, 0);
+ glDeleteSync(frame.fence);
+
+ // Release back any input frames we needed to render this frame.
+ // (Actually, those that were needed one output frame ago.)
+ for (FrameAllocator::Frame input_frame : frame.input_frames_to_release) {
+ input_frame.owner->release_frame(input_frame);
+ }
// Unmap the image.
GLSurface *surf = &gl_surfaces[current_frame_display % SURFACE_NUM];
void
#endif
bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
- void end_frame(GLsync fence);
+ void end_frame(GLsync fence, const std::vector<FrameAllocator::Frame> &input_frames_to_release);
private:
struct storage_task {
//int frame_width, frame_height;
//int ;
int current_storage_frame;
-#if 0
- std::map<int, std::pair<FrameAllocator::Frame, GLsync>> pending_frames;
-#endif
- std::map<int, GLsync> pending_frames;
+
+ struct PendingFrame {
+ GLsync fence;
+ std::vector<FrameAllocator::Frame> input_frames_to_release;
+ };
+ std::map<int, PendingFrame> pending_frames;
QSurface *surface;
AVFormatContext *avctx;
card->new_data_ready_changed.notify_all();
}
}
+
+ vector<FrameAllocator::Frame> input_frames_to_release;
for (int card_index = 0; card_index < NUM_CARDS; ++card_index) {
CaptureCard *card = &card_copy[card_index];
if (!card->new_data_ready)
continue;
- // FIXME: We could still be rendering from it!
- card->usb->get_video_frame_allocator()->release_frame(bmusb_current_rendering_frame[card_index]);
+ // Now we're done with the previous frame, so we can definitely
+ // release it when this is done rendering. (Actually, we could do
+ // it one frame earlier, but before we have a new one, there's no
+ // knowing when the current one is released.)
+ input_frames_to_release.push_back(bmusb_current_rendering_frame[card_index]);
bmusb_current_rendering_frame[card_index] = card->new_frame;
// The new texture might still be uploaded,
check_error();
glDeleteSync(card->new_data_ready_fence);
check_error();
- GLint input_tex_pbo = (GLint)(intptr_t)bmusb_current_rendering_frame[card_index].userdata;
+ GLint input_tex_pbo = (GLint)(intptr_t)card->new_frame.userdata;
input[card_index]->set_pixel_data(0, (unsigned char *)BUFFER_OFFSET((1280 * 750 * 2 + 44) / 2 + 1280 * 25 + 22), input_tex_pbo);
input[card_index]->set_pixel_data(1, (unsigned char *)BUFFER_OFFSET(1280 * 25 + 22), input_tex_pbo);
resource_pool->release_fbo(cbcr_fbo);
}
- h264_encoder.end_frame(fence);
+ h264_encoder.end_frame(fence, input_frames_to_release);
#if 1