X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=ffmpeg_capture.cpp;h=b8010d08219b52e30479b299822550ac8a850519;hb=e284d1c7a2e18ee7e4aea082c4a57a3504a0f5e8;hp=9a151b60d044c9eb038cdcc7a8c74b5a98e7bbd5;hpb=804bfa72263942d9beebe749ef0c8dc07a811413;p=nageru diff --git a/ffmpeg_capture.cpp b/ffmpeg_capture.cpp index 9a151b6..b8010d0 100644 --- a/ffmpeg_capture.cpp +++ b/ffmpeg_capture.cpp @@ -313,12 +313,20 @@ void FFmpegCapture::send_disconnected_frame() VideoFormat video_format; video_format.width = width; video_format.height = height; - video_format.stride = width * 4; video_format.frame_rate_nom = 60; video_format.frame_rate_den = 1; video_format.is_connected = false; - - video_frame.len = width * height * 4; + if (pixel_format == bmusb::PixelFormat_8BitBGRA) { + video_format.stride = width * 4; + video_frame.len = width * height * 4; + } else { + video_format.stride = width; + current_frame_ycbcr_format.full_range = true; + current_frame_ycbcr_format.num_levels = 256; + current_frame_ycbcr_format.chroma_subsampling_x = 2; + current_frame_ycbcr_format.chroma_subsampling_y = 2; + video_frame.len = width * height * 2; + } memset(video_frame.data, 0, video_frame.len); frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++, @@ -381,8 +389,9 @@ bool FFmpegCapture::play_video(const string &pathname) video_codec_ctx.get(), avcodec_close); // Open audio decoder, if we have audio. - AVCodecContextWithDeleter audio_codec_ctx = avcodec_alloc_context3_unique(nullptr); + AVCodecContextWithDeleter audio_codec_ctx; if (audio_stream_index != -1) { + audio_codec_ctx = avcodec_alloc_context3_unique(nullptr); const AVCodecParameters *audio_codecpar = format_ctx->streams[audio_stream_index]->codecpar; audio_timebase = format_ctx->streams[audio_stream_index]->time_base; if (avcodec_parameters_to_context(audio_codec_ctx.get(), audio_codecpar) < 0) { @@ -645,10 +654,15 @@ void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator:: } audio_format->num_channels = 2; + int64_t channel_layout = audio_avframe->channel_layout; + if (channel_layout == 0) { + channel_layout = av_get_default_channel_layout(audio_avframe->channels); + } + if (resampler == nullptr || audio_avframe->format != last_src_format || dst_format != last_dst_format || - av_frame_get_channel_layout(audio_avframe) != last_channel_layout || + channel_layout != last_channel_layout || av_frame_get_sample_rate(audio_avframe) != last_sample_rate) { avresample_free(&resampler); resampler = avresample_alloc_context(); @@ -657,7 +671,7 @@ void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator:: exit(1); } - av_opt_set_int(resampler, "in_channel_layout", av_frame_get_channel_layout(audio_avframe), 0); + av_opt_set_int(resampler, "in_channel_layout", channel_layout, 0); av_opt_set_int(resampler, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(resampler, "in_sample_rate", av_frame_get_sample_rate(audio_avframe), 0); av_opt_set_int(resampler, "out_sample_rate", OUTPUT_FREQUENCY, 0); @@ -671,7 +685,7 @@ void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator:: last_src_format = AVSampleFormat(audio_avframe->format); last_dst_format = dst_format; - last_channel_layout = av_frame_get_channel_layout(audio_avframe); + last_channel_layout = channel_layout; last_sample_rate = av_frame_get_sample_rate(audio_avframe); } @@ -680,7 +694,7 @@ void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator:: uint8_t *data = audio_frame->data + audio_frame->len; int out_samples = avresample_convert(resampler, &data, 0, num_samples_room, - audio_avframe->data, audio_avframe->linesize[0], audio_avframe->nb_samples); + const_cast(audio_avframe->data), audio_avframe->linesize[0], audio_avframe->nb_samples); if (out_samples < 0) { fprintf(stderr, "Audio conversion failed.\n"); exit(1);