X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=modules%2Fffmpeg%2Fproducer%2Futil%2Futil.cpp;h=04a237415003678a83680473a4024bdd67be76c1;hb=009816de6e071c6a35c74b0954d04cf61005b971;hp=d57b9b60da5a4aa60568587daebad2d1da780ef4;hpb=4439ad71cff8cd80c1eeabc059b6f86da6067980;p=casparcg diff --git a/modules/ffmpeg/producer/util/util.cpp b/modules/ffmpeg/producer/util/util.cpp index d57b9b60d..04a237415 100644 --- a/modules/ffmpeg/producer/util/util.cpp +++ b/modules/ffmpeg/producer/util/util.cpp @@ -58,7 +58,7 @@ #pragma warning (push) #pragma warning (disable : 4244) #endif -extern "C" +extern "C" { #include #include @@ -69,7 +69,7 @@ extern "C" #endif namespace caspar { namespace ffmpeg { - + core::field_mode get_mode(const AVFrame& frame) { if(!frame.interlaced_frame) @@ -102,23 +102,23 @@ core::pixel_format get_pixel_format(PixelFormat pix_fmt) core::pixel_format_desc pixel_format_desc(PixelFormat pix_fmt, int width, int height) { // Get linesizes - AVPicture dummy_pict; + AVPicture dummy_pict; avpicture_fill(&dummy_pict, nullptr, pix_fmt, width, height); core::pixel_format_desc desc = get_pixel_format(pix_fmt); - + switch(desc.format) { case core::pixel_format::gray: case core::pixel_format::luma: { - desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1)); + desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1)); return desc; } case core::pixel_format::bgr: case core::pixel_format::rgb: { - desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/3, height, 3)); + desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/3, height, 3)); return desc; } case core::pixel_format::bgra: @@ -126,41 +126,41 @@ core::pixel_format_desc pixel_format_desc(PixelFormat pix_fmt, int width, int he case core::pixel_format::rgba: case core::pixel_format::abgr: { - desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 4)); + desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 4)); return desc; } case core::pixel_format::ycbcr: case core::pixel_format::ycbcra: - { + { // Find chroma height int size2 = static_cast(dummy_pict.data[2] - dummy_pict.data[1]); - int h2 = size2/dummy_pict.linesize[1]; + int h2 = size2/dummy_pict.linesize[1]; desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1)); desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[1], h2, 1)); desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[2], h2, 1)); - if(desc.format == core::pixel_format::ycbcra) - desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[3], height, 1)); + if(desc.format == core::pixel_format::ycbcra) + desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[3], height, 1)); return desc; - } - default: + } + default: desc.format = core::pixel_format::invalid; return desc; } } core::mutable_frame make_frame(const void* tag, const spl::shared_ptr& decoded_frame, core::frame_factory& frame_factory, const core::audio_channel_layout& channel_layout) -{ +{ static tbb::concurrent_unordered_map>> sws_contvalid_exts_; - + if(decoded_frame->width < 1 || decoded_frame->height < 1) return frame_factory.create_frame(tag, core::pixel_format_desc(core::pixel_format::invalid), core::audio_channel_layout::invalid()); const auto width = decoded_frame->width; const auto height = decoded_frame->height; auto desc = pixel_format_desc(static_cast(decoded_frame->format), width, height); - + if(desc.format == core::pixel_format::invalid) { auto pix_fmt = static_cast(decoded_frame->format); @@ -178,7 +178,7 @@ core::mutable_frame make_frame(const void* tag, const spl::shared_ptr& target_pix_fmt = PIX_FMT_YUV422P; else if(pix_fmt == PIX_FMT_YUV444P10) target_pix_fmt = PIX_FMT_YUV444P; - + auto target_desc = pixel_format_desc(target_pix_fmt, width, height); auto write = frame_factory.create_frame(tag, target_desc, channel_layout); @@ -186,31 +186,31 @@ core::mutable_frame make_frame(const void* tag, const spl::shared_ptr& std::shared_ptr sws_context; //CASPAR_LOG(warning) << "Hardware accelerated color transform not supported."; - - int64_t key = ((static_cast(width) << 32) & 0xFFFF00000000) | - ((static_cast(height) << 16) & 0xFFFF0000) | - ((static_cast(pix_fmt) << 8) & 0xFF00) | + + int64_t key = ((static_cast(width) << 32) & 0xFFFF00000000) | + ((static_cast(height) << 16) & 0xFFFF0000) | + ((static_cast(pix_fmt) << 8) & 0xFF00) | ((static_cast(target_pix_fmt) << 0) & 0xFF); - + auto& pool = sws_contvalid_exts_[key]; - + if(!pool.try_pop(sws_context)) { double param; sws_context.reset(sws_getContext(width, height, pix_fmt, width, height, target_pix_fmt, SWS_BILINEAR, nullptr, nullptr, ¶m), sws_freeContext); } - + if(!sws_context) { - CASPAR_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling context.") << + CASPAR_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling context.") << boost::errinfo_api_function("sws_getContext")); - } - + } + auto av_frame = create_frame(); if(target_pix_fmt == PIX_FMT_BGRA) { auto size = avpicture_fill(reinterpret_cast(av_frame.get()), write.image_data(0).begin(), PIX_FMT_BGRA, width, height); - CASPAR_VERIFY(size == write.image_data(0).size()); + CASPAR_VERIFY(size == write.image_data(0).size()); } else { @@ -223,22 +223,22 @@ core::mutable_frame make_frame(const void* tag, const spl::shared_ptr& } } - sws_scale(sws_context.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame->data, av_frame->linesize); - pool.push(sws_context); + sws_scale(sws_context.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame->data, av_frame->linesize); + pool.push(sws_context); return std::move(write); } else { auto write = frame_factory.create_frame(tag, desc, channel_layout); - + for(int n = 0; n < static_cast(desc.planes.size()); ++n) { auto plane = desc.planes[n]; auto result = write.image_data(n).begin(); auto decoded = decoded_frame->data[n]; auto decoded_linesize = decoded_frame->linesize[n]; - + CASPAR_ASSERT(decoded); CASPAR_ASSERT(write.image_data(n).begin()); @@ -257,7 +257,7 @@ core::mutable_frame make_frame(const void* tag, const spl::shared_ptr& fast_memcpy(result, decoded, plane.size); } } - + return std::move(write); } } @@ -274,16 +274,16 @@ spl::shared_ptr make_av_frame(core::mutable_frame& frame) spl::shared_ptr make_av_frame(std::array data, const core::pixel_format_desc& pix_desc) { auto av_frame = create_frame(); - + auto planes = pix_desc.planes; auto format = pix_desc.format; av_frame->width = planes[0].width; av_frame->height = planes[0].height; - for(int n = 0; n < planes.size(); ++n) + for(int n = 0; n < planes.size(); ++n) { av_frame->data[n] = data[n]; - av_frame->linesize[n] = planes[n].linesize; + av_frame->linesize[n] = planes[n].linesize; } switch(format) @@ -295,19 +295,19 @@ spl::shared_ptr make_av_frame(std::array data, const core: av_frame->format = PIX_FMT_BGR24; break; case core::pixel_format::rgba: - av_frame->format = PIX_FMT_RGBA; + av_frame->format = PIX_FMT_RGBA; break; case core::pixel_format::argb: - av_frame->format = PIX_FMT_ARGB; + av_frame->format = PIX_FMT_ARGB; break; case core::pixel_format::bgra: - av_frame->format = PIX_FMT_BGRA; + av_frame->format = PIX_FMT_BGRA; break; case core::pixel_format::abgr: - av_frame->format = PIX_FMT_ABGR; + av_frame->format = PIX_FMT_ABGR; break; case core::pixel_format::gray: - av_frame->format = PIX_FMT_GRAY8; + av_frame->format = PIX_FMT_GRAY8; break; case core::pixel_format::ycbcr: { @@ -345,8 +345,8 @@ bool is_sane_fps(AVRational time_base) AVRational fix_time_base(AVRational time_base) { if(time_base.num == 1) - time_base.num = static_cast(std::pow(10.0, static_cast(std::log10(static_cast(time_base.den)))-1)); - + time_base.num = static_cast(std::pow(10.0, static_cast(std::log10(static_cast(time_base.den)))-1)); + if(!is_sane_fps(time_base)) { auto tmp = time_base; @@ -361,7 +361,7 @@ AVRational fix_time_base(AVRational time_base) double read_fps(AVFormatContext& context, double fail_value) { auto framerate = read_framerate(context, boost::rational(static_cast(fail_value * 1000000.0), 1000000)); - + return static_cast(framerate.numerator()) / static_cast(framerate.denominator()); } @@ -446,7 +446,7 @@ void fix_meta_data(AVFormatContext& context) { auto video_stream = context.streams[video_index]; auto video_context = context.streams[video_index]->codec; - + if(boost::filesystem::path(context.filename).extension().string() == ".flv") { try @@ -465,7 +465,7 @@ void fix_meta_data(AVFormatContext& context) auto ticks = video_context->ticks_per_frame; if(video_stream->nb_frames == 0) - video_stream->nb_frames = (duration*stream_time.num*codec_time.den)/(stream_time.den*codec_time.num*ticks); + video_stream->nb_frames = (duration*stream_time.num*codec_time.den)/(stream_time.den*codec_time.num*ticks); } } } @@ -477,13 +477,13 @@ spl::shared_ptr create_packet() av_free_packet(p); delete p; }); - + av_init_packet(packet.get()); return packet; } spl::shared_ptr create_frame() -{ +{ spl::shared_ptr frame(av_frame_alloc(), [](AVFrame* p) { av_frame_free(&p); @@ -516,7 +516,7 @@ std::shared_ptr empty_video() } spl::shared_ptr open_codec(AVFormatContext& context, enum AVMediaType type, int& index, bool single_threaded) -{ +{ AVCodec* decoder; index = THROW_ON_ERROR2(av_find_best_stream(&context, type, index, -1, &decoder, 0), ""); //if(strcmp(decoder->name, "prores") == 0 && decoder->next && strcmp(decoder->next->name, "prores_lgpl") == 0) @@ -548,7 +548,7 @@ std::wstring print_mode(int width, int height, double fps, bool interlaced) } bool is_valid_file(const std::wstring& filename, bool only_video) -{ +{ static const auto invalid_exts = { L".png", L".tga", @@ -583,21 +583,21 @@ bool is_valid_file(const std::wstring& filename, bool only_video) }; auto ext = boost::to_lower_copy(boost::filesystem::path(filename).extension().wstring()); - + if(std::find(valid_exts.begin(), valid_exts.end(), ext) != valid_exts.end()) return true; if (!only_video && std::find(only_audio.begin(), only_audio.end(), ext) != only_audio.end()) return true; - + if(std::find(invalid_exts.begin(), invalid_exts.end(), ext) != invalid_exts.end()) - return false; + return false; if (only_video && std::find(only_audio.begin(), only_audio.end(), ext) != only_audio.end()) return false; auto u8filename = u8(filename); - + int score = 0; AVProbeData pb = {}; pb.filename = u8filename.c_str(); @@ -772,7 +772,7 @@ core::audio_channel_layout get_audio_channel_layout(int num_channels, std::uint6 } // av_get_default_channel_layout does not work for layouts not predefined in ffmpeg. This is needed to support > 8 channels. -std::int64_t create_channel_layout_bitmask(int num_channels) +std::uint64_t create_channel_layout_bitmask(int num_channels) { if (num_channels > 63) CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(L"FFmpeg cannot handle more than 63 audio channels")); @@ -782,7 +782,7 @@ std::int64_t create_channel_layout_bitmask(int num_channels) auto to_shift = 63 - num_channels; auto result = ALL_63_CHANNELS >> to_shift; - return static_cast(result); + return static_cast(result); } std::string to_string(const boost::rational& framerate)