X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=ffmpeg_capture.cpp;h=fb1b223c326f4e789193da4e078c65a48d729ec5;hb=4db1223f01b0d1731ff571f20c677a7675b645ec;hp=0fe74a7a95b46e3a891425ed60f7dbb6408ff95d;hpb=8cefe0ef1926be7931d4a9bbfed93ee6e85f3540;p=nageru diff --git a/ffmpeg_capture.cpp b/ffmpeg_capture.cpp index 0fe74a7..fb1b223 100644 --- a/ffmpeg_capture.cpp +++ b/ffmpeg_capture.cpp @@ -200,7 +200,7 @@ YCbCrFormat decode_ycbcr_format(const AVPixFmtDescriptor *desc, const AVFrame *f } // namespace FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height) - : filename(filename), width(width), height(height) + : filename(filename), width(width), height(height), video_timebase{1, 1} { // Not really used for anything. description = "Video: " + filename; @@ -351,7 +351,7 @@ bool FFmpegCapture::play_video(const string &pathname) } const AVCodecParameters *codecpar = format_ctx->streams[video_stream_index]->codecpar; - AVRational video_timebase = format_ctx->streams[video_stream_index]->time_base; + video_timebase = format_ctx->streams[video_stream_index]->time_base; AVCodecContextWithDeleter codec_ctx = avcodec_alloc_context3_unique(nullptr); if (avcodec_parameters_to_context(codec_ctx.get(), codecpar) < 0) { fprintf(stderr, "%s: Cannot fill codec parameters\n", pathname.c_str()); @@ -370,79 +370,19 @@ bool FFmpegCapture::play_video(const string &pathname) codec_ctx.get(), avcodec_close); internal_rewind(); - double rate = 1.0; - - unique_ptr sws_ctx(nullptr, sws_freeContext); - int sws_last_width = -1, sws_last_height = -1, sws_last_src_format = -1; - AVPixelFormat sws_dst_format = AVPixelFormat(-1); // In practice, always initialized. // Main loop. while (!producer_thread_should_quit.should_quit()) { - // Process any queued commands from other threads. - vector commands; - { - lock_guard lock(queue_mu); - swap(commands, command_queue); + if (process_queued_commands(format_ctx.get(), pathname, last_modified, /*rewound=*/nullptr)) { + return true; } - for (const QueuedCommand &cmd : commands) { - switch (cmd.command) { - case QueuedCommand::REWIND: - if (av_seek_frame(format_ctx.get(), /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) { - fprintf(stderr, "%s: Rewind failed, stopping play.\n", pathname.c_str()); - } - // If the file has changed since last time, return to get it reloaded. - // Note that depending on how you move the file into place, you might - // end up corrupting the one you're already playing, so this path - // might not trigger. - if (changed_since(pathname, last_modified)) { - return true; - } - internal_rewind(); - break; - case QueuedCommand::CHANGE_RATE: - start = next_frame_start; - pts_origin = last_pts; - rate = cmd.new_rate; - break; - } + bool error; + AVFrameWithDeleter frame = decode_frame(format_ctx.get(), codec_ctx.get(), pathname, video_stream_index, &error); + if (error) { + return false; } - - // Read packets until we have a frame or there are none left. - int frame_finished = 0; - AVFrameWithDeleter frame = av_frame_alloc_unique(); - bool eof = false; - do { - AVPacket pkt; - unique_ptr pkt_cleanup( - &pkt, av_packet_unref); - av_init_packet(&pkt); - pkt.data = nullptr; - pkt.size = 0; - if (av_read_frame(format_ctx.get(), &pkt) == 0) { - if (pkt.stream_index != video_stream_index) { - // Ignore audio for now. - continue; - } - if (avcodec_send_packet(codec_ctx.get(), &pkt) < 0) { - fprintf(stderr, "%s: Cannot send packet to codec.\n", pathname.c_str()); - return false; - } - } else { - eof = true; // Or error, but ignore that for the time being. - } - - int err = avcodec_receive_frame(codec_ctx.get(), frame.get()); - if (err == 0) { - frame_finished = true; - break; - } else if (err != AVERROR(EAGAIN)) { - fprintf(stderr, "%s: Cannot receive frame from codec.\n", pathname.c_str()); - return false; - } - } while (!eof); - - if (!frame_finished) { + if (frame == nullptr) { // EOF. Loop back to the start if we can. if (av_seek_frame(format_ctx.get(), /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) { fprintf(stderr, "%s: Rewind failed, not looping.\n", pathname.c_str()); @@ -459,93 +399,222 @@ bool FFmpegCapture::play_video(const string &pathname) continue; } - if (sws_ctx == nullptr || - sws_last_width != frame->width || - sws_last_height != frame->height || - sws_last_src_format != frame->format) { - sws_dst_format = decide_dst_format(AVPixelFormat(frame->format), pixel_format); - sws_ctx.reset( - sws_getContext(frame->width, frame->height, AVPixelFormat(frame->format), - width, height, sws_dst_format, - SWS_BICUBIC, nullptr, nullptr, nullptr)); - sws_last_width = frame->width; - sws_last_height = frame->height; - sws_last_src_format = frame->format; - } - if (sws_ctx == nullptr) { - fprintf(stderr, "%s: Could not create scaler context\n", pathname.c_str()); + VideoFormat video_format = construct_video_format(frame.get(), video_timebase); + FrameAllocator::Frame video_frame = make_video_frame(frame.get(), pathname, &error); + if (error) { return false; } - VideoFormat video_format; - video_format.width = width; - video_format.height = height; - if (pixel_format == bmusb::PixelFormat_8BitBGRA) { - video_format.stride = width * 4; + FrameAllocator::Frame audio_frame; + AudioFormat audio_format; + audio_format.bits_per_sample = 32; + audio_format.num_channels = 8; + + for ( ;; ) { + next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate); + video_frame.received_timestamp = next_frame_start; + bool finished_wakeup = producer_thread_should_quit.sleep_until(next_frame_start); + if (finished_wakeup) { + frame_callback(timecode++, + video_frame, 0, video_format, + audio_frame, 0, audio_format); + break; + } else { + if (producer_thread_should_quit.should_quit()) break; + + bool rewound = false; + if (process_queued_commands(format_ctx.get(), pathname, last_modified, &rewound)) { + return true; + } + // If we just rewound, drop this frame on the floor and be done. + if (rewound) { + video_frame_allocator->release_frame(video_frame); + break; + } + // OK, we didn't, so probably a rate change. Recalculate next_frame_start, + // but if it's now in the past, we'll reset the origin, so that we don't + // generate a huge backlog of frames that we need to run through quickly. + next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate); + steady_clock::time_point now = steady_clock::now(); + if (next_frame_start < now) { + pts_origin = frame->pts; + start = next_frame_start = now; + } + } + } + last_pts = frame->pts; + } + return true; +} + +void FFmpegCapture::internal_rewind() +{ + pts_origin = last_pts = 0; + start = next_frame_start = steady_clock::now(); +} + +bool FFmpegCapture::process_queued_commands(AVFormatContext *format_ctx, const std::string &pathname, timespec last_modified, bool *rewound) +{ + // Process any queued commands from other threads. + vector commands; + { + lock_guard lock(queue_mu); + swap(commands, command_queue); + } + for (const QueuedCommand &cmd : commands) { + switch (cmd.command) { + case QueuedCommand::REWIND: + if (av_seek_frame(format_ctx, /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) { + fprintf(stderr, "%s: Rewind failed, stopping play.\n", pathname.c_str()); + } + // If the file has changed since last time, return to get it reloaded. + // Note that depending on how you move the file into place, you might + // end up corrupting the one you're already playing, so this path + // might not trigger. + if (changed_since(pathname, last_modified)) { + return true; + } + internal_rewind(); + if (rewound != nullptr) { + *rewound = true; + } + break; + + case QueuedCommand::CHANGE_RATE: + // Change the origin to the last played frame. + start = compute_frame_start(last_pts, pts_origin, video_timebase, start, rate); + pts_origin = last_pts; + rate = cmd.new_rate; + break; + } + } + return false; +} + +AVFrameWithDeleter FFmpegCapture::decode_frame(AVFormatContext *format_ctx, AVCodecContext *codec_ctx, const std::string &pathname, int video_stream_index, bool *error) +{ + *error = false; + + // Read packets until we have a frame or there are none left. + bool frame_finished = false; + AVFrameWithDeleter frame = av_frame_alloc_unique(); + bool eof = false; + do { + AVPacket pkt; + unique_ptr pkt_cleanup( + &pkt, av_packet_unref); + av_init_packet(&pkt); + pkt.data = nullptr; + pkt.size = 0; + if (av_read_frame(format_ctx, &pkt) == 0) { + if (pkt.stream_index != video_stream_index) { + // Ignore audio for now. + continue; + } + if (avcodec_send_packet(codec_ctx, &pkt) < 0) { + fprintf(stderr, "%s: Cannot send packet to codec.\n", pathname.c_str()); + *error = true; + return AVFrameWithDeleter(nullptr); + } } else { - assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar); - video_format.stride = width; + eof = true; // Or error, but ignore that for the time being. } - video_format.frame_rate_nom = video_timebase.den; - video_format.frame_rate_den = av_frame_get_pkt_duration(frame.get()) * video_timebase.num; - if (video_format.frame_rate_nom == 0 || video_format.frame_rate_den == 0) { - // Invalid frame rate. - video_format.frame_rate_nom = 60; - video_format.frame_rate_den = 1; + + int err = avcodec_receive_frame(codec_ctx, frame.get()); + if (err == 0) { + frame_finished = true; + break; + } else if (err != AVERROR(EAGAIN)) { + fprintf(stderr, "%s: Cannot receive frame from codec.\n", pathname.c_str()); + *error = true; + return AVFrameWithDeleter(nullptr); } - video_format.has_signal = true; - video_format.is_connected = true; + } while (!eof); - next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate); - last_pts = frame->pts; + if (frame_finished) + return frame; + else + return AVFrameWithDeleter(nullptr); +} - FrameAllocator::Frame video_frame = video_frame_allocator->alloc_frame(); - if (video_frame.data != nullptr) { - uint8_t *pic_data[4] = { nullptr, nullptr, nullptr, nullptr }; - int linesizes[4] = { 0, 0, 0, 0 }; - if (pixel_format == bmusb::PixelFormat_8BitBGRA) { - pic_data[0] = video_frame.data; - linesizes[0] = video_format.stride; - video_frame.len = video_format.stride * height; - } else { - assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar); - const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format); +VideoFormat FFmpegCapture::construct_video_format(const AVFrame *frame, AVRational video_timebase) +{ + VideoFormat video_format; + video_format.width = width; + video_format.height = height; + if (pixel_format == bmusb::PixelFormat_8BitBGRA) { + video_format.stride = width * 4; + } else { + assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar); + video_format.stride = width; + } + video_format.frame_rate_nom = video_timebase.den; + video_format.frame_rate_den = av_frame_get_pkt_duration(frame) * video_timebase.num; + if (video_format.frame_rate_nom == 0 || video_format.frame_rate_den == 0) { + // Invalid frame rate. + video_format.frame_rate_nom = 60; + video_format.frame_rate_den = 1; + } + video_format.has_signal = true; + video_format.is_connected = true; + return video_format; +} - int chroma_width = AV_CEIL_RSHIFT(int(width), desc->log2_chroma_w); - int chroma_height = AV_CEIL_RSHIFT(int(height), desc->log2_chroma_h); +FrameAllocator::Frame FFmpegCapture::make_video_frame(const AVFrame *frame, const string &pathname, bool *error) +{ + *error = false; - pic_data[0] = video_frame.data; - linesizes[0] = width; + FrameAllocator::Frame video_frame = video_frame_allocator->alloc_frame(); + if (video_frame.data == nullptr) { + return video_frame; + } - pic_data[1] = pic_data[0] + width * height; - linesizes[1] = chroma_width; + if (sws_ctx == nullptr || + sws_last_width != frame->width || + sws_last_height != frame->height || + sws_last_src_format != frame->format) { + sws_dst_format = decide_dst_format(AVPixelFormat(frame->format), pixel_format); + sws_ctx.reset( + sws_getContext(frame->width, frame->height, AVPixelFormat(frame->format), + width, height, sws_dst_format, + SWS_BICUBIC, nullptr, nullptr, nullptr)); + sws_last_width = frame->width; + sws_last_height = frame->height; + sws_last_src_format = frame->format; + } + if (sws_ctx == nullptr) { + fprintf(stderr, "%s: Could not create scaler context\n", pathname.c_str()); + *error = true; + return video_frame; + } - pic_data[2] = pic_data[1] + chroma_width * chroma_height; - linesizes[2] = chroma_width; + uint8_t *pic_data[4] = { nullptr, nullptr, nullptr, nullptr }; + int linesizes[4] = { 0, 0, 0, 0 }; + if (pixel_format == bmusb::PixelFormat_8BitBGRA) { + pic_data[0] = video_frame.data; + linesizes[0] = width * 4; + video_frame.len = (width * 4) * height; + } else { + assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar); + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format); - video_frame.len = width * height + 2 * chroma_width * chroma_height; + int chroma_width = AV_CEIL_RSHIFT(int(width), desc->log2_chroma_w); + int chroma_height = AV_CEIL_RSHIFT(int(height), desc->log2_chroma_h); - current_frame_ycbcr_format = decode_ycbcr_format(desc, frame.get()); - } - sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes); - video_frame.received_timestamp = next_frame_start; - } + pic_data[0] = video_frame.data; + linesizes[0] = width; - FrameAllocator::Frame audio_frame; - AudioFormat audio_format; - audio_format.bits_per_sample = 32; - audio_format.num_channels = 8; + pic_data[1] = pic_data[0] + width * height; + linesizes[1] = chroma_width; - producer_thread_should_quit.sleep_until(next_frame_start); - frame_callback(timecode++, - video_frame, 0, video_format, - audio_frame, 0, audio_format); + pic_data[2] = pic_data[1] + chroma_width * chroma_height; + linesizes[2] = chroma_width; + + video_frame.len = width * height + 2 * chroma_width * chroma_height; + + current_frame_ycbcr_format = decode_ycbcr_format(desc, frame); } - return true; -} + sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes); -void FFmpegCapture::internal_rewind() -{ - pts_origin = last_pts = 0; - start = next_frame_start = steady_clock::now(); + return video_frame; }