]> git.sesse.net Git - casparcg/blobdiff - modules/ffmpeg/producer/video/video_decoder.cpp
2.0. Started with deinterlace framework.
[casparcg] / modules / ffmpeg / producer / video / video_decoder.cpp
index e88fa67ccfc5959cf6342d9bf84cc896ef09b9fc..967dad5495937b432238ab74a0e84f3ce79d17cf 100644 (file)
 \r
 #include <core/video_format.h>\r
 #include <core/producer/frame/basic_frame.h>\r
-#include <core/producer/frame/write_frame.h>\r
+#include <core/mixer/write_frame.h>\r
 #include <core/producer/frame/image_transform.h>\r
+#include <core/producer/frame/pixel_format.h>\r
+#include <core/producer/frame/frame_factory.h>\r
 \r
 #include <tbb/parallel_for.h>\r
 \r
+#include <boost/range/algorithm_ext.hpp>\r
+\r
 #if defined(_MSC_VER)\r
 #pragma warning (push)\r
 #pragma warning (disable : 4244)\r
@@ -41,6 +45,7 @@ extern "C"
        #define __STDC_LIMIT_MACROS\r
        #include <libswscale/swscale.h>\r
        #include <libavformat/avformat.h>\r
+       #include <libavcodec/avcodec.h>\r
 }\r
 #if defined(_MSC_VER)\r
 #pragma warning (pop)\r
@@ -113,23 +118,27 @@ core::pixel_format_desc get_pixel_format_desc(PixelFormat pix_fmt, size_t width,
 }\r
 \r
 struct video_decoder::implementation : boost::noncopyable\r
-{      \r
+{\r
+       input& input_;\r
        std::shared_ptr<SwsContext>                                     sws_context_;\r
        const std::shared_ptr<core::frame_factory>      frame_factory_;\r
-       AVCodecContext*                                                         codec_context_;\r
+       AVCodecContext&                                                         codec_context_;\r
        const int                                                                       width_;\r
        const int                                                                       height_;\r
        const PixelFormat                                                       pix_fmt_;\r
        core::pixel_format_desc                                         desc_;\r
+       size_t                                                                          frame_number_;\r
 \r
 public:\r
-       explicit implementation(AVCodecContext* codec_context, const safe_ptr<core::frame_factory>& frame_factory) \r
-               : frame_factory_(frame_factory)\r
-               , codec_context_(codec_context)\r
-               , width_(codec_context_->width)\r
-               , height_(codec_context_->height)\r
-               , pix_fmt_(codec_context_->pix_fmt)\r
+       explicit implementation(input& input, const safe_ptr<core::frame_factory>& frame_factory) \r
+               : input_(input)\r
+               , frame_factory_(frame_factory)\r
+               , codec_context_(*input_.get_video_codec_context())\r
+               , width_(codec_context_.width)\r
+               , height_(codec_context_.height)\r
+               , pix_fmt_(codec_context_.pix_fmt)\r
                , desc_(get_pixel_format_desc(pix_fmt_, width_, height_))\r
+               , frame_number_(0)\r
        {\r
                if(desc_.pix_fmt == core::pixel_format::invalid)\r
                {\r
@@ -144,24 +153,33 @@ public:
                                                                          boost::errinfo_api_function("sws_getContext"));\r
                }\r
        }\r
-       \r
-       std::vector<safe_ptr<core::write_frame>> execute(void* tag, const std::shared_ptr<aligned_buffer>& video_packet)\r
-       {                               \r
-               std::vector<safe_ptr<core::write_frame>> result;\r
 \r
-               if(!video_packet)\r
-                       return result;\r
+       std::deque<std::pair<int, safe_ptr<core::write_frame>>> receive()\r
+       {\r
+               std::deque<std::pair<int, safe_ptr<core::write_frame>>> result;\r
+               \r
+               std::shared_ptr<AVPacket> pkt;\r
+               for(int n = 0; n < 32 && result.empty() && input_.try_pop_video_packet(pkt); ++n)       \r
+                       boost::range::push_back(result, decode(pkt));\r
 \r
-               if(video_packet->empty()) // Need to flush\r
-               {\r
-                       avcodec_flush_buffers(codec_context_);\r
+               return result;\r
+       }\r
+\r
+       std::deque<std::pair<int, safe_ptr<core::write_frame>>> decode(const std::shared_ptr<AVPacket>& video_packet)\r
+       {                       \r
+               std::deque<std::pair<int, safe_ptr<core::write_frame>>> result;\r
+\r
+               if(!video_packet) // eof\r
+               {       \r
+                       avcodec_flush_buffers(&codec_context_);\r
+                       frame_number_ = 0;\r
                        return result;\r
                }\r
 \r
                safe_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
 \r
                int frame_finished = 0;\r
-               const int errn = avcodec_decode_video(codec_context_, decoded_frame.get(), &frame_finished, video_packet->data(), video_packet->size());\r
+               const int errn = avcodec_decode_video2(&codec_context_, decoded_frame.get(), &frame_finished, video_packet.get());\r
                \r
                if(errn < 0)\r
                {\r
@@ -172,17 +190,17 @@ public:
                                boost::errinfo_errno(AVUNERROR(errn)));\r
                }\r
                \r
-               if(frame_finished == 0)\r
-                       return result;\r
-               \r
-               result.push_back(make_write_frame(tag, decoded_frame));\r
+               if(frame_finished != 0)         \r
+                       result.push_back(std::make_pair(frame_number_++, make_write_frame(decoded_frame)));\r
 \r
                return result;\r
        }\r
 \r
-       safe_ptr<core::write_frame> make_write_frame(void* tag, safe_ptr<AVFrame> decoded_frame)\r
+       safe_ptr<core::write_frame> make_write_frame(safe_ptr<AVFrame> decoded_frame)\r
        {               \r
-               auto write = frame_factory_->create_frame(tag, desc_);\r
+               auto write = frame_factory_->create_frame(this, desc_);\r
+               write->set_is_interlaced(decoded_frame->interlaced_frame != 0);\r
+\r
                if(sws_context_ == nullptr)\r
                {\r
                        tbb::parallel_for(0, static_cast<int>(desc_.planes.size()), 1, [&](int n)\r
@@ -193,31 +211,36 @@ public:
                                auto decoded_linesize = decoded_frame->linesize[n];\r
                                \r
                                // Copy line by line since ffmpeg sometimes pads each line.\r
-                               tbb::parallel_for(0, static_cast<int>(desc_.planes[n].height), 1, [&](int y)\r
+                               tbb::parallel_for(tbb::blocked_range<size_t>(0, static_cast<int>(desc_.planes[n].height)), [&](const tbb::blocked_range<size_t>& r)\r
                                {\r
-                                       fast_memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
+                                       for(size_t y = r.begin(); y != r.end(); ++y)\r
+                                               memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
                                });\r
+\r
+                               write->commit(n);\r
                        });\r
                }\r
                else\r
                {\r
-                       // Uses sws_scale when we don't support the provided colorspace.\r
+                       // Use sws_scale when provided colorspace has no hw-accel.\r
                        safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free);     \r
                        avcodec_get_frame_defaults(av_frame.get());                     \r
                        avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width_, height_);\r
                 \r
                        sws_scale(sws_context_.get(), decoded_frame->data, decoded_frame->linesize, 0, height_, av_frame->data, av_frame->linesize);    \r
+\r
+                       write->commit();\r
                }       \r
 \r
                // DVVIDEO is in lower field. Make it upper field if needed.\r
-               if(codec_context_->codec_id == CODEC_ID_DVVIDEO && frame_factory_->get_video_format_desc().mode == core::video_mode::upper)\r
-                       write->get_image_transform().set_fill_translation(0.0f, 1.0/static_cast<double>(height_));\r
+               if(codec_context_.codec_id == CODEC_ID_DVVIDEO && frame_factory_->get_video_format_desc().mode == core::video_mode::upper)\r
+                       write->get_image_transform().set_fill_translation(0.0f, 0.5/static_cast<double>(height_));\r
 \r
                return write;\r
        }\r
 };\r
 \r
-video_decoder::video_decoder(AVCodecContext* codec_context, const safe_ptr<core::frame_factory>& frame_factory) : impl_(new implementation(codec_context, frame_factory)){}\r
-std::vector<safe_ptr<core::write_frame>> video_decoder::execute(void* tag, const std::shared_ptr<aligned_buffer>& video_packet){return impl_->execute(tag, video_packet);}\r
+video_decoder::video_decoder(input& input, const safe_ptr<core::frame_factory>& frame_factory) : impl_(new implementation(input, frame_factory)){}\r
+std::deque<std::pair<int, safe_ptr<core::write_frame>>> video_decoder::receive(){return impl_->receive();}\r
 \r
 }
\ No newline at end of file