X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=nageru%2Fffmpeg_capture.cpp;h=1a9a295cd28d988233821294116ebf9ff2f50abf;hb=b22b0f8e945c5a0b9e738eb982b61aa70b88ed1d;hp=0dea5598555921c401d8e3d463796dc3edaad9f6;hpb=7092d222bb356549ef453f9c6f0b21123fd8a2ce;p=nageru diff --git a/nageru/ffmpeg_capture.cpp b/nageru/ffmpeg_capture.cpp index 0dea559..1a9a295 100644 --- a/nageru/ffmpeg_capture.cpp +++ b/nageru/ffmpeg_capture.cpp @@ -26,6 +26,7 @@ extern "C" { #include #include #include +#include #include #include @@ -35,7 +36,6 @@ extern "C" { #include "shared/ffmpeg_raii.h" #include "ffmpeg_util.h" #include "flags.h" -#include "image_input.h" #include "ref_counted_frame.h" #include "shared/timebase.h" @@ -43,14 +43,17 @@ extern "C" { #include #endif -#define FRAME_SIZE (8 << 20) // 8 MB. - using namespace std; using namespace std::chrono; using namespace bmusb; using namespace movit; using namespace Eigen; +// Avoid deprecation warnings, but we don't want to drop FFmpeg 5.1 support just yet. +#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 30, 100) +#define pkt_duration duration +#endif + namespace { steady_clock::time_point compute_frame_start(int64_t frame_pts, int64_t pts_origin, const AVRational &video_timebase, const steady_clock::time_point &origin, double rate) @@ -124,8 +127,8 @@ AVPixelFormat decide_dst_format(AVPixelFormat src_format, bmusb::PixelFormat dst if (desc->comp[0].depth != 8) continue; // Same or better chroma resolution only. - int chroma_w_diff = desc->log2_chroma_w - src_desc->log2_chroma_w; - int chroma_h_diff = desc->log2_chroma_h - src_desc->log2_chroma_h; + int chroma_w_diff = src_desc->log2_chroma_w - desc->log2_chroma_w; + int chroma_h_diff = src_desc->log2_chroma_h - desc->log2_chroma_h; if (chroma_w_diff < 0 || chroma_h_diff < 0) continue; @@ -457,22 +460,60 @@ void FFmpegCapture::send_disconnected_frame() template AVPixelFormat get_hw_format(AVCodecContext *ctx, const AVPixelFormat *fmt) { - for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) { - for (int i = 0;; ++i) { // Termination condition inside loop. - const AVCodecHWConfig *config = avcodec_get_hw_config(ctx->codec, i); - if (config == nullptr) { // End of list. - fprintf(stderr, "Decoder %s does not support device.\n", ctx->codec->name); - break; - } - if (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX && - config->device_type == type && - config->pix_fmt == *fmt_ptr) { + bool found_config_of_right_type = false; + for (int i = 0;; ++i) { // Termination condition inside loop. + const AVCodecHWConfig *config = avcodec_get_hw_config(ctx->codec, i); + if (config == nullptr) { // End of list. + break; + } + if (!(config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) || + config->device_type != type) { + // Not interesting for us. + continue; + } + + // We have a config of the right type, but does it actually support + // the pixel format we want? (Seemingly, FFmpeg's way of signaling errors + // is to just replace the pixel format with a software-decoded one, + // such as yuv420p.) + found_config_of_right_type = true; + for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) { + if (config->pix_fmt == *fmt_ptr) { + fprintf(stderr, "Initialized '%s' hardware decoding for codec '%s'.\n", + av_hwdevice_get_type_name(type), ctx->codec->name); + if (ctx->profile == FF_PROFILE_H264_BASELINE) { + fprintf(stderr, "WARNING: Stream claims to be H.264 Baseline, which is generally poorly supported in hardware decoders.\n"); + fprintf(stderr, " Consider encoding it as Constrained Baseline, Main or High instead.\n"); + fprintf(stderr, " Decoding might fail and fall back to software.\n"); + } return config->pix_fmt; } } + fprintf(stderr, "Decoder '%s' supports only these pixel formats:", ctx->codec->name); + unordered_set seen; + for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) { + if (!seen.count(*fmt_ptr)) { + fprintf(stderr, " %s", av_get_pix_fmt_name(*fmt_ptr)); + seen.insert(*fmt_ptr); + } + } + fprintf(stderr, " (wanted %s for hardware acceleration)\n", av_get_pix_fmt_name(config->pix_fmt)); + + } + + if (!found_config_of_right_type) { + fprintf(stderr, "Decoder '%s' does not support device type '%s'.\n", ctx->codec->name, av_hwdevice_get_type_name(type)); + } + + // We found no VA-API formats, so take the first software format. + for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) { + if ((av_pix_fmt_desc_get(*fmt_ptr)->flags & AV_PIX_FMT_FLAG_HWACCEL) == 0) { + fprintf(stderr, "Falling back to software format %s.\n", av_get_pix_fmt_name(*fmt_ptr)); + return *fmt_ptr; + } } - // We found no VA-API formats, so take the best software format. + // Fallback: Just return anything. (Should never really happen.) return fmt[0]; } @@ -494,10 +535,13 @@ bool FFmpegCapture::play_video(const string &pathname) AVFormatContextWithCloser format_ctx; if (srt_sock == -1) { - // Regular file. + // Regular file (or stream). + frame_timeout_started = steady_clock::now(); + frame_timeout_valid = true; format_ctx = avformat_open_input_unique(pathname.c_str(), /*fmt=*/nullptr, /*options=*/nullptr, AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this }); + frame_timeout_valid = false; } else { #ifdef HAVE_SRT // SRT socket, already opened. @@ -616,11 +660,18 @@ bool FFmpegCapture::play_video(const string &pathname) int64_t audio_pts; bool error; + frame_timeout_started = steady_clock::now(); + frame_timeout_valid = true; AVFrameWithDeleter frame = decode_frame(format_ctx.get(), video_codec_ctx.get(), audio_codec_ctx.get(), pathname, video_stream_index, audio_stream_index, subtitle_stream_index, audio_frame.get(), &audio_format, &audio_pts, &error); + frame_timeout_valid = false; + if (should_interrupt.load()) { + // Abort no matter whether we got a frame or not. + return false; + } if (error) { if (++consecutive_errors >= 100) { - fprintf(stderr, "More than 100 consecutive video frames, aborting playback.\n"); + fprintf(stderr, "More than 100 consecutive error video frames, aborting playback.\n"); return false; } else { continue; @@ -854,34 +905,31 @@ AVFrameWithDeleter FFmpegCapture::decode_frame(AVFormatContext *format_ctx, AVCo *audio_pts = -1; bool has_audio = false; do { - AVPacket pkt; - unique_ptr pkt_cleanup( - &pkt, av_packet_unref); - av_init_packet(&pkt); - pkt.data = nullptr; - pkt.size = 0; - if (av_read_frame(format_ctx, &pkt) == 0) { - if (pkt.stream_index == audio_stream_index && audio_callback != nullptr) { - audio_callback(&pkt, format_ctx->streams[audio_stream_index]->time_base); + AVPacketWithDeleter pkt = av_packet_alloc_unique(); + pkt->data = nullptr; + pkt->size = 0; + if (av_read_frame(format_ctx, pkt.get()) == 0) { + if (pkt->stream_index == audio_stream_index && audio_callback != nullptr) { + audio_callback(pkt.get(), format_ctx->streams[audio_stream_index]->time_base); } - if (pkt.stream_index == video_stream_index && video_callback != nullptr) { - video_callback(&pkt, format_ctx->streams[video_stream_index]->time_base); + if (pkt->stream_index == video_stream_index && video_callback != nullptr) { + video_callback(pkt.get(), format_ctx->streams[video_stream_index]->time_base); } - if (pkt.stream_index == video_stream_index && global_flags.transcode_video) { - if (avcodec_send_packet(video_codec_ctx, &pkt) < 0) { + if (pkt->stream_index == video_stream_index && global_flags.transcode_video) { + if (avcodec_send_packet(video_codec_ctx, pkt.get()) < 0) { fprintf(stderr, "%s: Cannot send packet to video codec.\n", pathname.c_str()); *error = true; return AVFrameWithDeleter(nullptr); } - } else if (pkt.stream_index == audio_stream_index && global_flags.transcode_audio) { + } else if (pkt->stream_index == audio_stream_index && global_flags.transcode_audio) { has_audio = true; - if (avcodec_send_packet(audio_codec_ctx, &pkt) < 0) { + if (avcodec_send_packet(audio_codec_ctx, pkt.get()) < 0) { fprintf(stderr, "%s: Cannot send packet to audio codec.\n", pathname.c_str()); *error = true; return AVFrameWithDeleter(nullptr); } - } else if (pkt.stream_index == subtitle_stream_index) { - last_subtitle = string(reinterpret_cast(pkt.data), pkt.size); + } else if (pkt->stream_index == subtitle_stream_index) { + last_subtitle = string(reinterpret_cast(pkt->data), pkt->size); has_last_subtitle = true; } } else { @@ -971,28 +1019,36 @@ void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator:: } audio_format->num_channels = 2; - int64_t channel_layout = audio_avframe->channel_layout; - if (channel_layout == 0) { - channel_layout = av_get_default_channel_layout(audio_avframe->channels); + AVChannelLayout channel_layout = audio_avframe->ch_layout; + if (!av_channel_layout_check(&channel_layout) || + channel_layout.order == AV_CHANNEL_ORDER_UNSPEC) { + av_channel_layout_default(&channel_layout, audio_avframe->ch_layout.nb_channels); } if (resampler == nullptr || audio_avframe->format != last_src_format || dst_format != last_dst_format || - channel_layout != last_channel_layout || + av_channel_layout_compare(&channel_layout, &last_channel_layout) != 0|| audio_avframe->sample_rate != last_sample_rate) { + // TODO: When we get C++20, use AV_CHANNEL_LAYOUT_STEREO_DOWNMIX. + AVChannelLayout stereo_downmix; + stereo_downmix.order = AV_CHANNEL_ORDER_NATIVE; + stereo_downmix.nb_channels = 2; + stereo_downmix.u.mask = AV_CH_LAYOUT_STEREO_DOWNMIX; + swr_free(&resampler); - resampler = swr_alloc_set_opts(nullptr, - /*out_ch_layout=*/AV_CH_LAYOUT_STEREO_DOWNMIX, - /*out_sample_fmt=*/dst_format, - /*out_sample_rate=*/OUTPUT_FREQUENCY, - /*in_ch_layout=*/channel_layout, - /*in_sample_fmt=*/AVSampleFormat(audio_avframe->format), - /*in_sample_rate=*/audio_avframe->sample_rate, - /*log_offset=*/0, - /*log_ctx=*/nullptr); - - if (resampler == nullptr) { + resampler = nullptr; + int err = swr_alloc_set_opts2(&resampler, + /*out_ch_layout=*/&stereo_downmix, + /*out_sample_fmt=*/dst_format, + /*out_sample_rate=*/OUTPUT_FREQUENCY, + /*in_ch_layout=*/&channel_layout, + /*in_sample_fmt=*/AVSampleFormat(audio_avframe->format), + /*in_sample_rate=*/audio_avframe->sample_rate, + /*log_offset=*/0, + /*log_ctx=*/nullptr); + + if (err != 0 || resampler == nullptr) { fprintf(stderr, "Allocating resampler failed.\n"); abort(); } @@ -1107,6 +1163,16 @@ UniqueFrame FFmpegCapture::make_video_frame(const AVFrame *frame, const string & current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg, &last_colorspace, &last_chroma_location); } + + // FIXME: Currently, if the video is too high-res for one of the allocated + // frames, we simply refuse to scale it here to avoid crashes. It would be better + // if we could somehow signal getting larger frames, especially as 4K is a thing now. + if (video_frame->len > FRAME_SIZE) { + fprintf(stderr, "%s: Decoded frame would be larger than supported FRAME_SIZE (%zu > %u), not decoding.\n", pathname.c_str(), video_frame->len, FRAME_SIZE); + *error = true; + return video_frame; + } + sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes); return video_frame; @@ -1119,6 +1185,21 @@ int FFmpegCapture::interrupt_cb_thunk(void *opaque) int FFmpegCapture::interrupt_cb() { + // If ten seconds is gone without anything happening, we assume that + // we are in a network stream that died and FFmpeg just didn't + // pick it up (or perhaps it just hung, keeping the connection open). + // Called back approximately every 100 ms if something is hanging, + // so we get more than enough accuracy for our purposes. + if (!should_interrupt && frame_timeout_valid && + duration(steady_clock::now() - frame_timeout_started).count() >= 10.0) { + string filename_copy; + { + lock_guard lock(filename_mu); + filename_copy = filename; + } + fprintf(stderr, "%s: No frame for more than 10 seconds, restarting stream.\n", filename.c_str()); + should_interrupt = true; + } return should_interrupt.load(); }