]> git.sesse.net Git - casparcg/commitdiff
[ffmpeg] Remove usage of deprecated API usage to easier support a newer version of...
authorHelge Norberg <helge.norberg@svt.se>
Tue, 4 Apr 2017 12:47:10 +0000 (14:47 +0200)
committerHelge Norberg <helge.norberg@svt.se>
Tue, 4 Apr 2017 12:47:10 +0000 (14:47 +0200)
accelerator/cpu/image/image_mixer.cpp
modules/decklink/producer/decklink_producer.cpp
modules/ffmpeg/StdAfx.h
modules/ffmpeg/producer/filter/audio_filter.cpp
modules/ffmpeg/producer/filter/filter.cpp
modules/ffmpeg/producer/tbb_avcodec.cpp
modules/ffmpeg/producer/util/util.cpp
modules/ffmpeg/producer/util/util.h
modules/screen/consumer/screen_consumer.cpp

index bf69a0558ec8b98332f4adb374b0e38c9edce5e8..315d3f9b7b27ab116902e453c6c6e7109624512c 100644 (file)
@@ -58,7 +58,7 @@
 #pragma warning (push)
 #pragma warning (disable : 4244)
 #endif
-extern "C" 
+extern "C"
 {
        #include <libswscale/swscale.h>
        #include <libavcodec/avcodec.h>
@@ -69,7 +69,7 @@ extern "C"
 #endif
 
 namespace caspar { namespace accelerator { namespace cpu {
-               
+
 struct item
 {
        core::pixel_format_desc                 pix_desc        = core::pixel_format::invalid;
@@ -91,54 +91,54 @@ bool operator!=(const item& lhs, const item& rhs)
 {
        return !(lhs == rhs);
 }
-       
+
 // 100% accurate blending with correct rounding.
 inline xmm::s8_x blend(xmm::s8_x d, xmm::s8_x s)
-{      
+{
        using namespace xmm;
-               
+
        // C(S, D) = S + D - (((T >> 8) + T) >> 8);
        // T(S, D) = S * D[A] + 0x80
 
        auto aaaa   = s8_x::shuffle(d, s8_x(15, 15, 15, 15, 11, 11, 11, 11, 7, 7, 7, 7, 3, 3, 3, 3));
        d                       = s8_x(u8_x::min(u8_x(d), u8_x(aaaa))); // Overflow guard. Some source files have color values which incorrectly exceed pre-multiplied alpha values, e.g. red(255) > alpha(254).
 
-       auto xaxa       = s16_x(aaaa) >> 8;             
-                             
-       auto t1         = s16_x::multiply_low(s16_x(s) & 0x00FF, xaxa) + 0x80;    
+       auto xaxa       = s16_x(aaaa) >> 8;
+
+       auto t1         = s16_x::multiply_low(s16_x(s) & 0x00FF, xaxa) + 0x80;
        auto t2         = s16_x::multiply_low(s16_x(s) >> 8    , xaxa) + 0x80;
-               
-       auto xyxy       = s8_x(((t1 >> 8) + t1) >> 8);      
-       auto yxyx       = s8_x((t2 >> 8) + t2);    
+
+       auto xyxy       = s8_x(((t1 >> 8) + t1) >> 8);
+       auto yxyx       = s8_x((t2 >> 8) + t2);
        auto argb   = s8_x::blend(xyxy, yxyx, s8_x(-1, 0, -1, 0));
 
        return s8_x(s) + (d - argb);
 }
-       
+
 template<typename temporal, typename alignment>
 static void kernel(uint8_t* dest, const uint8_t* source, size_t count)
-{                      
+{
        using namespace xmm;
 
-       for(auto n = 0; n < count; n += 32)    
+       for(auto n = 0; n < count; n += 32)
        {
                auto s0 = s8_x::load<temporal_tag, alignment>(dest+n+0);
                auto s1 = s8_x::load<temporal_tag, alignment>(dest+n+16);
 
                auto d0 = s8_x::load<temporal_tag, alignment>(source+n+0);
                auto d1 = s8_x::load<temporal_tag, alignment>(source+n+16);
-               
+
                auto argb0 = blend(d0, s0);
                auto argb1 = blend(d1, s1);
 
                s8_x::store<temporal, alignment>(argb0, dest+n+0 );
                s8_x::store<temporal, alignment>(argb1, dest+n+16);
-       } 
+       }
 }
 
 template<typename temporal>
 static void kernel(uint8_t* dest, const uint8_t* source, size_t count)
-{                      
+{
        using namespace xmm;
 
        if(reinterpret_cast<std::uint64_t>(dest) % 16 != 0 || reinterpret_cast<std::uint64_t>(source) % 16 != 0)
@@ -152,7 +152,7 @@ class image_renderer
        tbb::concurrent_unordered_map<int64_t, tbb::concurrent_bounded_queue<std::shared_ptr<SwsContext>>>      sws_devices_;
        tbb::concurrent_bounded_queue<spl::shared_ptr<buffer>>                                                                                          temp_buffers_;
        core::video_format_desc                                                                                                                                                         format_desc_;
-public:        
+public:
        std::future<array<const std::uint8_t>> operator()(std::vector<item> items, const core::video_format_desc& format_desc)
        {
                if (format_desc != format_desc_)
@@ -161,14 +161,14 @@ public:
                        sws_devices_.clear();
                }
 
-               convert(items, format_desc.width, format_desc.height);          
-                               
+               convert(items, format_desc.width, format_desc.height);
+
                // Remove first field stills.
                boost::range::remove_erase_if(items, [&](const item& item)
                {
                        return item.transform.is_still && item.transform.field_mode == format_desc.field_mode; // only us last field for stills.
                });
-               
+
                // Stills are progressive
                for (auto& item : items)
                {
@@ -178,7 +178,7 @@ public:
 
                auto result = spl::make_shared<buffer>(format_desc.size, 0);
                if(format_desc.field_mode != core::field_mode::progressive)
-               {                       
+               {
                        draw(items, result->data(), format_desc.width, format_desc.height, core::field_mode::upper);
                        draw(items, result->data(), format_desc.width, format_desc.height, core::field_mode::lower);
                }
@@ -188,17 +188,17 @@ public:
                }
 
                temp_buffers_.clear();
-               
+
                return make_ready_future(array<const std::uint8_t>(result->data(), format_desc.size, true, result));
        }
 
 private:
 
        void draw(std::vector<item> items, uint8_t* dest, std::size_t width, std::size_t height, core::field_mode field_mode)
-       {               
+       {
                for (auto& item : items)
                        item.transform.field_mode &= field_mode;
-               
+
                // Remove empty items.
                boost::range::remove_erase_if(items, [&](const item& item)
                {
@@ -207,10 +207,10 @@ private:
 
                if(items.empty())
                        return;
-               
+
                auto start = field_mode == core::field_mode::lower ? 1 : 0;
                auto step  = field_mode == core::field_mode::progressive ? 1 : 2;
-               
+
                // TODO: Add support for fill translations.
                // TODO: Add support for mask rect.
                // TODO: Add support for opacity.
@@ -226,29 +226,29 @@ private:
 
                                for(std::size_t n = 0; n < items.size()-1; ++n)
                                        kernel<xmm::temporal_tag>(dest + y*width*4, items[n].data.at(0) + y*width*4, width*4);
-                               
-                               std::size_t n = items.size()-1;                         
+
+                               std::size_t n = items.size()-1;
                                kernel<xmm::nontemporal_tag>(dest + y*width*4, items[n].data.at(0) + y*width*4, width*4);
                        }
 
                        _mm_mfence();
                });
        }
-               
+
        void convert(std::vector<item>& source_items, int width, int height)
        {
                std::set<std::array<const uint8_t*, 4>> buffers;
 
                for (auto& item : source_items)
                        buffers.insert(item.data);
-               
+
                auto dest_items = source_items;
 
                tbb::parallel_for_each(buffers.begin(), buffers.end(), [&](const std::array<const uint8_t*, 4>& data)
-               {                       
+               {
                        auto pix_desc = std::find_if(source_items.begin(), source_items.end(), [&](const item& item){return item.data == data;})->pix_desc;
 
-                       if(pix_desc.format == core::pixel_format::bgra && 
+                       if(pix_desc.format == core::pixel_format::bgra &&
                                pix_desc.planes.at(0).width == width &&
                                pix_desc.planes.at(0).height == height)
                                return;
@@ -259,9 +259,9 @@ private:
 
                        auto input_av_frame = ffmpeg::make_av_frame(data2, pix_desc);
 
-               
-                       int64_t key = ((static_cast<int64_t>(input_av_frame->width)      << 32) & 0xFFFF00000000) | 
-                                                 ((static_cast<int64_t>(input_av_frame->height) << 16) & 0xFFFF0000) | 
+
+                       int64_t key = ((static_cast<int64_t>(input_av_frame->width)      << 32) & 0xFFFF00000000) |
+                                                 ((static_cast<int64_t>(input_av_frame->height) << 16) & 0xFFFF0000) |
                                                  ((static_cast<int64_t>(input_av_frame->format) <<  8) & 0xFF00);
 
                        auto& pool = sws_devices_[key];
@@ -270,23 +270,23 @@ private:
                        if(!pool.try_pop(sws_device))
                        {
                                double param;
-                               sws_device.reset(sws_getContext(input_av_frame->width, input_av_frame->height, static_cast<PixelFormat>(input_av_frame->format), width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);
+                               sws_device.reset(sws_getContext(input_av_frame->width, input_av_frame->height, static_cast<AVPixelFormat>(input_av_frame->format), width, height, AVPixelFormat::AV_PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);
                        }
-                       
-                       if(!sws_device)                         
-                               CASPAR_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling device.") << boost::errinfo_api_function("sws_getContext"));                           
-               
+
+                       if(!sws_device)
+                               CASPAR_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling device.") << boost::errinfo_api_function("sws_getContext"));
+
                        auto dest_frame = spl::make_shared<buffer>(width*height*4);
                        temp_buffers_.push(dest_frame);
 
                        {
                                auto dest_av_frame = ffmpeg::create_frame();
-                               avpicture_fill(reinterpret_cast<AVPicture*>(dest_av_frame.get()), dest_frame->data(), PIX_FMT_BGRA, width, height);
-                               
-                               sws_scale(sws_device.get(), input_av_frame->data, input_av_frame->linesize, 0, input_av_frame->height, dest_av_frame->data, dest_av_frame->linesize);                           
+                               avpicture_fill(reinterpret_cast<AVPicture*>(dest_av_frame.get()), dest_frame->data(), AVPixelFormat::AV_PIX_FMT_BGRA, width, height);
+
+                               sws_scale(sws_device.get(), input_av_frame->data, input_av_frame->linesize, 0, input_av_frame->height, dest_av_frame->data, dest_av_frame->linesize);
                                pool.push(sws_device);
                        }
-                                       
+
                        for(std::size_t n = 0; n < source_items.size(); ++n)
                        {
                                if(source_items[n].data == data)
@@ -298,37 +298,37 @@ private:
                                        dest_items[n].transform                 = source_items[n].transform;
                                }
                        }
-               });     
+               });
 
                source_items = std::move(dest_items);
        }
 };
-               
+
 struct image_mixer::impl : boost::noncopyable
-{      
+{
        image_renderer                                          renderer_;
        std::vector<core::image_transform>      transform_stack_;
        std::vector<item>                                       items_; // layer/stream/items
 public:
        impl(int channel_id)
-               : transform_stack_(1)   
+               : transform_stack_(1)
        {
                CASPAR_LOG(info) << L"Initialized Streaming SIMD Extensions Accelerated CPU Image Mixer for channel " << channel_id;
        }
-               
+
        void push(const core::frame_transform& transform)
        {
                transform_stack_.push_back(transform_stack_.back()*transform.image_transform);
        }
-               
+
        void visit(const core::const_frame& frame)
-       {                       
+       {
                if(frame.pixel_format_desc().format == core::pixel_format::invalid)
                        return;
 
                if(frame.pixel_format_desc().planes.empty())
                        return;
-               
+
                if(frame.pixel_format_desc().planes.at(0).size < 16)
                        return;
 
@@ -339,7 +339,7 @@ public:
                item.pix_desc   = frame.pixel_format_desc();
                item.transform  = transform_stack_.back();
                for(int n = 0; n < item.pix_desc.planes.size(); ++n)
-                       item.data.at(n) = frame.image_data(n).begin();          
+                       item.data.at(n) = frame.image_data(n).begin();
 
                items_.push_back(item);
        }
@@ -348,12 +348,12 @@ public:
        {
                transform_stack_.pop_back();
        }
-       
+
        std::future<array<const std::uint8_t>> render(const core::video_format_desc& format_desc)
        {
                return renderer_(std::move(items_), format_desc);
        }
-       
+
        core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc, const core::audio_channel_layout& channel_layout)
        {
                std::vector<array<std::uint8_t>> buffers;
index cdc6a63fa06988f9c13a330c84193cc15d2fe78a..a6ead70661933b9bf96cc44f518ccd0ff0bc96da 100644 (file)
@@ -234,7 +234,7 @@ public:
 
                        video_frame->data[0]                    = reinterpret_cast<uint8_t*>(video_bytes);
                        video_frame->linesize[0]                = video->GetRowBytes();
-                       video_frame->format                             = PIX_FMT_UYVY422;
+                       video_frame->format                             = AVPixelFormat::AV_PIX_FMT_UYVY422;
                        video_frame->width                              = video->GetWidth();
                        video_frame->height                             = video->GetHeight();
                        video_frame->interlaced_frame   = in_format_desc_.field_mode != core::field_mode::progressive;
index f7fb536ae90c8c5d638caed16f56e390d6410b20..96b404275a4b41ffe5d4ae2e5a9c8ffe93983109 100644 (file)
 
 #pragma warning(push, 1)
 
-extern "C" 
+extern "C"
 {
        #define __STDC_CONSTANT_MACROS
        #define __STDC_LIMIT_MACROS
        #include <libavcodec/avcodec.h>
-       #include <libavfilter/avcodec.h>
        #include <libavfilter/avfilter.h>
        #include <libavfilter/avfiltergraph.h>
        #include <libavfilter/buffersink.h>
index 1f65c1ff6880592f5d3fbf207a0fb520717e53f3..b9059309a334a19277e6e71ae02160912f5c9245 100644 (file)
@@ -50,7 +50,6 @@ extern "C"
        #include <libavutil/imgutils.h>
        #include <libavutil/opt.h>
        #include <libavfilter/avfilter.h>
-       #include <libavfilter/avcodec.h>
        #include <libavfilter/buffersink.h>
        #include <libavfilter/buffersrc.h>
 }
index aa83cf36662584a583fd758a55b3850c03d0a53b..1b59b8e350b537f15bd1a4b694132b08b03b53d5 100644 (file)
@@ -50,7 +50,6 @@ extern "C"
        #include <libavutil/imgutils.h>
        #include <libavutil/opt.h>
        #include <libavfilter/avfilter.h>
-       #include <libavfilter/avcodec.h>
        #include <libavfilter/buffersink.h>
        #include <libavfilter/buffersrc.h>
 }
index ac412b462fb6ad992e142132c85c7f0acc64b91e..44857ccf3e93d96347b501f2af0b4e37138e9832 100644 (file)
@@ -36,7 +36,7 @@
 #pragma warning (push)
 #pragma warning (disable : 4244)
 #endif
-extern "C" 
+extern "C"
 {
        #define __STDC_CONSTANT_MACROS
        #define __STDC_LIMIT_MACROS
@@ -47,7 +47,7 @@ extern "C"
 #endif
 
 namespace caspar {
-               
+
 static const int MAX_THREADS = 16; // See mpegvideo.h
 
 int thread_execute(AVCodecContext* s, int (*func)(AVCodecContext *c2, void *arg2), void* arg, int* ret, int count, int size)
@@ -55,7 +55,7 @@ int thread_execute(AVCodecContext* s, int (*func)(AVCodecContext *c2, void *arg2
        tbb::parallel_for(0, count, 1, [&](int i)
        {
         int r = func(s, (char*)arg + i*size);
-        if(ret) 
+        if(ret)
                        ret[i] = r;
     });
 
@@ -63,44 +63,44 @@ int thread_execute(AVCodecContext* s, int (*func)(AVCodecContext *c2, void *arg2
 }
 
 int thread_execute2(AVCodecContext* s, int (*func)(AVCodecContext* c2, void* arg2, int, int), void* arg, int* ret, int count)
-{         
+{
        // TODO: Micro-optimize...
 
        std::array<std::vector<int>, 16> jobs;
-       
-       for(int n = 0; n < count; ++n)  
-               jobs[(n*MAX_THREADS) / count].push_back(n);     
-       
-       tbb::parallel_for(0, MAX_THREADS, [&](int n)    
-    {   
+
+       for(int n = 0; n < count; ++n)
+               jobs[(n*MAX_THREADS) / count].push_back(n);
+
+       tbb::parallel_for(0, MAX_THREADS, [&](int n)
+    {
                for (auto k : jobs[n])
                {
                        int r = func(s, arg, k, n);
-                       if(ret) 
+                       if(ret)
                                ret[k]= r;
                }
-    });   
+    });
 
-       return 0; 
+       return 0;
 }
 
 void thread_init(AVCodecContext* s)
 {
        static int dummy_opaque;
 
-    s->active_thread_type = FF_THREAD_SLICE;
-       s->thread_opaque          = &dummy_opaque; 
-    s->execute                   = thread_execute;
-    s->execute2                          = thread_execute2;
-    s->thread_count              = MAX_THREADS; // We are using a task-scheduler, so use as many "threads/tasks" as possible. 
+    s->active_thread_type      = FF_THREAD_SLICE;
+       s->opaque                               = &dummy_opaque;
+    s->execute                         = thread_execute;
+    s->execute2                                = thread_execute2;
+    s->thread_count                    = MAX_THREADS; // We are using a task-scheduler, so use as many "threads/tasks" as possible.
 }
 
 void thread_free(AVCodecContext* s)
 {
-       if(!s->thread_opaque)
+       if(!s->opaque)
                return;
 
-       s->thread_opaque = nullptr;
+       s->opaque = nullptr;
 }
 
 int tbb_avcodec_open(AVCodecContext* avctx, AVCodec* codec, bool single_threaded)
@@ -112,16 +112,16 @@ int tbb_avcodec_open(AVCodecContext* avctx, AVCodec* codec, bool single_threaded
 
        if(!single_threaded && codec->capabilities & CODEC_CAP_SLICE_THREADS)
                thread_init(avctx);
-       
+
        // ff_thread_init will not be executed since thread_opaque != nullptr || thread_count == 1.
-       return avcodec_open2(avctx, codec, nullptr); 
+       return avcodec_open2(avctx, codec, nullptr);
 }
 
 int tbb_avcodec_close(AVCodecContext* avctx)
 {
        thread_free(avctx);
        // ff_thread_free will not be executed since thread_opaque == nullptr.
-       return avcodec_close(avctx); 
+       return avcodec_close(avctx);
 }
 
 }
index 91e40e36e9dd7537ccd274821e4fae21521d1427..c257a237e6f1dc33b516c81261ebfc95f7406f9e 100644 (file)
@@ -78,28 +78,28 @@ core::field_mode get_mode(const AVFrame& frame)
        return frame.top_field_first ? core::field_mode::upper : core::field_mode::lower;
 }
 
-core::pixel_format get_pixel_format(PixelFormat pix_fmt)
+core::pixel_format get_pixel_format(AVPixelFormat pix_fmt)
 {
        switch(pix_fmt)
        {
-       case PIX_FMT_GRAY8:                     return core::pixel_format::gray;
-       case PIX_FMT_RGB24:                     return core::pixel_format::rgb;
-       case PIX_FMT_BGR24:                     return core::pixel_format::bgr;
-       case PIX_FMT_BGRA:                      return core::pixel_format::bgra;
-       case PIX_FMT_ARGB:                      return core::pixel_format::argb;
-       case PIX_FMT_RGBA:                      return core::pixel_format::rgba;
-       case PIX_FMT_ABGR:                      return core::pixel_format::abgr;
-       case PIX_FMT_YUV444P:           return core::pixel_format::ycbcr;
-       case PIX_FMT_YUV422P:           return core::pixel_format::ycbcr;
-       case PIX_FMT_YUV420P:           return core::pixel_format::ycbcr;
-       case PIX_FMT_YUV411P:           return core::pixel_format::ycbcr;
-       case PIX_FMT_YUV410P:           return core::pixel_format::ycbcr;
-       case PIX_FMT_YUVA420P:          return core::pixel_format::ycbcra;
-       default:                                        return core::pixel_format::invalid;
+       case AVPixelFormat::AV_PIX_FMT_GRAY8:           return core::pixel_format::gray;
+       case AVPixelFormat::AV_PIX_FMT_RGB24:           return core::pixel_format::rgb;
+       case AVPixelFormat::AV_PIX_FMT_BGR24:           return core::pixel_format::bgr;
+       case AVPixelFormat::AV_PIX_FMT_BGRA:            return core::pixel_format::bgra;
+       case AVPixelFormat::AV_PIX_FMT_ARGB:            return core::pixel_format::argb;
+       case AVPixelFormat::AV_PIX_FMT_RGBA:            return core::pixel_format::rgba;
+       case AVPixelFormat::AV_PIX_FMT_ABGR:            return core::pixel_format::abgr;
+       case AVPixelFormat::AV_PIX_FMT_YUV444P:         return core::pixel_format::ycbcr;
+       case AVPixelFormat::AV_PIX_FMT_YUV422P:         return core::pixel_format::ycbcr;
+       case AVPixelFormat::AV_PIX_FMT_YUV420P:         return core::pixel_format::ycbcr;
+       case AVPixelFormat::AV_PIX_FMT_YUV411P:         return core::pixel_format::ycbcr;
+       case AVPixelFormat::AV_PIX_FMT_YUV410P:         return core::pixel_format::ycbcr;
+       case AVPixelFormat::AV_PIX_FMT_YUVA420P:        return core::pixel_format::ycbcra;
+       default:                                                                        return core::pixel_format::invalid;
        }
 }
 
-core::pixel_format_desc pixel_format_desc(PixelFormat pix_fmt, int width, int height)
+core::pixel_format_desc pixel_format_desc(AVPixelFormat pix_fmt, int width, int height)
 {
        // Get linesizes
        AVPicture dummy_pict;
@@ -159,25 +159,25 @@ core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>&
 
        const auto width  = decoded_frame->width;
        const auto height = decoded_frame->height;
-       auto desc                 = pixel_format_desc(static_cast<PixelFormat>(decoded_frame->format), width, height);
+       auto desc                 = pixel_format_desc(static_cast<AVPixelFormat>(decoded_frame->format), width, height);
 
        if(desc.format == core::pixel_format::invalid)
        {
-               auto pix_fmt = static_cast<PixelFormat>(decoded_frame->format);
-               auto target_pix_fmt = PIX_FMT_BGRA;
-
-               if(pix_fmt == PIX_FMT_UYVY422)
-                       target_pix_fmt = PIX_FMT_YUV422P;
-               else if(pix_fmt == PIX_FMT_YUYV422)
-                       target_pix_fmt = PIX_FMT_YUV422P;
-               else if(pix_fmt == PIX_FMT_UYYVYY411)
-                       target_pix_fmt = PIX_FMT_YUV411P;
-               else if(pix_fmt == PIX_FMT_YUV420P10)
-                       target_pix_fmt = PIX_FMT_YUV420P;
-               else if(pix_fmt == PIX_FMT_YUV422P10)
-                       target_pix_fmt = PIX_FMT_YUV422P;
-               else if(pix_fmt == PIX_FMT_YUV444P10)
-                       target_pix_fmt = PIX_FMT_YUV444P;
+               auto pix_fmt = static_cast<AVPixelFormat>(decoded_frame->format);
+               auto target_pix_fmt = AVPixelFormat::AV_PIX_FMT_BGRA;
+
+               if(pix_fmt == AVPixelFormat::AV_PIX_FMT_UYVY422)
+                       target_pix_fmt = AVPixelFormat::AV_PIX_FMT_YUV422P;
+               else if(pix_fmt == AVPixelFormat::AV_PIX_FMT_YUYV422)
+                       target_pix_fmt = AVPixelFormat::AV_PIX_FMT_YUV422P;
+               else if(pix_fmt == AVPixelFormat::AV_PIX_FMT_UYYVYY411)
+                       target_pix_fmt = AVPixelFormat::AV_PIX_FMT_YUV411P;
+               else if(pix_fmt == AVPixelFormat::AV_PIX_FMT_YUV420P10)
+                       target_pix_fmt = AVPixelFormat::AV_PIX_FMT_YUV420P;
+               else if(pix_fmt == AVPixelFormat::AV_PIX_FMT_YUV422P10)
+                       target_pix_fmt = AVPixelFormat::AV_PIX_FMT_YUV422P;
+               else if(pix_fmt == AVPixelFormat::AV_PIX_FMT_YUV444P10)
+                       target_pix_fmt = AVPixelFormat::AV_PIX_FMT_YUV444P;
 
                auto target_desc = pixel_format_desc(target_pix_fmt, width, height);
 
@@ -207,9 +207,9 @@ core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>&
                }
 
                auto av_frame = create_frame();
-               if(target_pix_fmt == PIX_FMT_BGRA)
+               if(target_pix_fmt == AVPixelFormat::AV_PIX_FMT_BGRA)
                {
-                       auto size = avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write.image_data(0).begin(), PIX_FMT_BGRA, width, height);
+                       auto size = avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write.image_data(0).begin(), AVPixelFormat::AV_PIX_FMT_BGRA, width, height);
                        CASPAR_VERIFY(size == write.image_data(0).size());
                }
                else
@@ -289,25 +289,25 @@ spl::shared_ptr<AVFrame> make_av_frame(std::array<uint8_t*, 4> data, const core:
        switch(format)
        {
        case core::pixel_format::rgb:
-               av_frame->format = PIX_FMT_RGB24;
+               av_frame->format = AVPixelFormat::AV_PIX_FMT_RGB24;
                break;
        case core::pixel_format::bgr:
-               av_frame->format = PIX_FMT_BGR24;
+               av_frame->format = AVPixelFormat::AV_PIX_FMT_BGR24;
                break;
        case core::pixel_format::rgba:
-               av_frame->format = PIX_FMT_RGBA;
+               av_frame->format = AVPixelFormat::AV_PIX_FMT_RGBA;
                break;
        case core::pixel_format::argb:
-               av_frame->format = PIX_FMT_ARGB;
+               av_frame->format = AVPixelFormat::AV_PIX_FMT_ARGB;
                break;
        case core::pixel_format::bgra:
-               av_frame->format = PIX_FMT_BGRA;
+               av_frame->format = AVPixelFormat::AV_PIX_FMT_BGRA;
                break;
        case core::pixel_format::abgr:
-               av_frame->format = PIX_FMT_ABGR;
+               av_frame->format = AVPixelFormat::AV_PIX_FMT_ABGR;
                break;
        case core::pixel_format::gray:
-               av_frame->format = PIX_FMT_GRAY8;
+               av_frame->format = AVPixelFormat::AV_PIX_FMT_GRAY8;
                break;
        case core::pixel_format::ycbcr:
        {
@@ -317,20 +317,20 @@ spl::shared_ptr<AVFrame> make_av_frame(std::array<uint8_t*, 4> data, const core:
                int c_h = planes[1].height;
 
                if(c_h == y_h && c_w == y_w)
-                       av_frame->format = PIX_FMT_YUV444P;
+                       av_frame->format = AVPixelFormat::AV_PIX_FMT_YUV444P;
                else if(c_h == y_h && c_w*2 == y_w)
-                       av_frame->format = PIX_FMT_YUV422P;
+                       av_frame->format = AVPixelFormat::AV_PIX_FMT_YUV422P;
                else if(c_h == y_h && c_w*4 == y_w)
-                       av_frame->format = PIX_FMT_YUV411P;
+                       av_frame->format = AVPixelFormat::AV_PIX_FMT_YUV411P;
                else if(c_h*2 == y_h && c_w*2 == y_w)
-                       av_frame->format = PIX_FMT_YUV420P;
+                       av_frame->format = AVPixelFormat::AV_PIX_FMT_YUV420P;
                else if(c_h*2 == y_h && c_w*4 == y_w)
-                       av_frame->format = PIX_FMT_YUV410P;
+                       av_frame->format = AVPixelFormat::AV_PIX_FMT_YUV410P;
 
                break;
        }
        case core::pixel_format::ycbcra:
-               av_frame->format = PIX_FMT_YUVA420P;
+               av_frame->format = AVPixelFormat::AV_PIX_FMT_YUVA420P;
                break;
        }
        return av_frame;
index 4c1f45c06c51d294562a89ed3ef4409b55eab79d..999f7f163ce324f17860007eb160d235acdce9aa 100644 (file)
@@ -65,7 +65,7 @@ core::mutable_frame                                   make_frame(const void* tag, const spl::shared_ptr<AVFram
 spl::shared_ptr<AVFrame>                       make_av_frame(core::mutable_frame& frame);
 spl::shared_ptr<AVFrame>                       make_av_frame(std::array<uint8_t*, 4> data, const core::pixel_format_desc& pix_desc);
 
-core::pixel_format_desc                                pixel_format_desc(PixelFormat pix_fmt, int width, int height);
+core::pixel_format_desc                                pixel_format_desc(AVPixelFormat pix_fmt, int width, int height);
 
 spl::shared_ptr<AVPacket> create_packet();
 spl::shared_ptr<AVFrame>  create_frame();
index 818bdf12e976c237f25faf6c7f946e444b9790c9..c095009863c4326e6f5f635b57f06855d78ed1cc 100644 (file)
@@ -429,9 +429,9 @@ public:
                auto av_frame = ffmpeg::create_frame();
 
                av_frame->linesize[0]           = format_desc_.width*4;
-               av_frame->format                        PIX_FMT_BGRA;
+               av_frame->format                                = AVPixelFormat::AV_PIX_FMT_BGRA;
                av_frame->width                         = format_desc_.width;
-               av_frame->height                        = format_desc_.height;
+               av_frame->height                                = format_desc_.height;
                av_frame->interlaced_frame      = format_desc_.field_mode != core::field_mode::progressive;
                av_frame->top_field_first       = format_desc_.field_mode == core::field_mode::upper ? 1 : 0;
                av_frame->pts                           = pts_++;