]> git.sesse.net Git - casparcg/blobdiff - modules/ffmpeg/producer/video/video_decoder.cpp
2.0: - Changed seeking syntax to SEEK [value]
[casparcg] / modules / ffmpeg / producer / video / video_decoder.cpp
index 9ce8f7e14a29ce1309e4bc44f97b61e30427b7fe..ec7c15928d743fe3851d4a0e070e32a785c3ccaa 100644 (file)
@@ -21,6 +21,8 @@
 \r
 #include "video_decoder.h"\r
 #include "../../ffmpeg_error.h"\r
+#include "../../util/util.h"\r
+#include "../../util/filter.h"\r
 \r
 #include <common/memory/memcpy.h>\r
 \r
@@ -33,6 +35,8 @@
 \r
 #include <tbb/parallel_for.h>\r
 \r
+#include <boost/range/algorithm_ext.hpp>\r
+\r
 #if defined(_MSC_VER)\r
 #pragma warning (push)\r
 #pragma warning (disable : 4244)\r
@@ -51,88 +55,29 @@ extern "C"
 \r
 namespace caspar {\r
        \r
-core::pixel_format::type get_pixel_format(PixelFormat pix_fmt)\r
-{\r
-       switch(pix_fmt)\r
-       {\r
-       case PIX_FMT_GRAY8:             return core::pixel_format::gray;\r
-       case PIX_FMT_BGRA:              return core::pixel_format::bgra;\r
-       case PIX_FMT_ARGB:              return core::pixel_format::argb;\r
-       case PIX_FMT_RGBA:              return core::pixel_format::rgba;\r
-       case PIX_FMT_ABGR:              return core::pixel_format::abgr;\r
-       case PIX_FMT_YUV444P:   return core::pixel_format::ycbcr;\r
-       case PIX_FMT_YUV422P:   return core::pixel_format::ycbcr;\r
-       case PIX_FMT_YUV420P:   return core::pixel_format::ycbcr;\r
-       case PIX_FMT_YUV411P:   return core::pixel_format::ycbcr;\r
-       case PIX_FMT_YUV410P:   return core::pixel_format::ycbcr;\r
-       case PIX_FMT_YUVA420P:  return core::pixel_format::ycbcra;\r
-       default:                                return core::pixel_format::invalid;\r
-       }\r
-}\r
-\r
-core::pixel_format_desc get_pixel_format_desc(PixelFormat pix_fmt, size_t width, size_t height)\r
-{\r
-       // Get linesizes\r
-       AVPicture dummy_pict;   \r
-       avpicture_fill(&dummy_pict, nullptr, pix_fmt, width, height);\r
-\r
-       core::pixel_format_desc desc;\r
-       desc.pix_fmt = get_pixel_format(pix_fmt);\r
-               \r
-       switch(desc.pix_fmt)\r
-       {\r
-       case core::pixel_format::gray:\r
-               {\r
-                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 1));                                             \r
-                       return desc;\r
-               }\r
-       case core::pixel_format::bgra:\r
-       case core::pixel_format::argb:\r
-       case core::pixel_format::rgba:\r
-       case core::pixel_format::abgr:\r
-               {\r
-                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 4));                                             \r
-                       return desc;\r
-               }\r
-       case core::pixel_format::ycbcr:\r
-       case core::pixel_format::ycbcra:\r
-               {               \r
-                       // Find chroma height\r
-                       size_t size2 = dummy_pict.data[2] - dummy_pict.data[1];\r
-                       size_t h2 = size2/dummy_pict.linesize[1];                       \r
-\r
-                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));\r
-                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[1], h2, 1));\r
-                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[2], h2, 1));\r
-\r
-                       if(desc.pix_fmt == core::pixel_format::ycbcra)                                          \r
-                               desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[3], height, 1));       \r
-                       return desc;\r
-               }               \r
-       default:                \r
-               desc.pix_fmt = core::pixel_format::invalid;\r
-               return desc;\r
-       }\r
-}\r
-\r
 struct video_decoder::implementation : boost::noncopyable\r
-{      \r
+{\r
+       input& input_;\r
        std::shared_ptr<SwsContext>                                     sws_context_;\r
-       const std::shared_ptr<core::frame_factory>      frame_factory_;\r
+       const safe_ptr<core::frame_factory>                     frame_factory_;\r
        AVCodecContext&                                                         codec_context_;\r
        const int                                                                       width_;\r
        const int                                                                       height_;\r
        const PixelFormat                                                       pix_fmt_;\r
        core::pixel_format_desc                                         desc_;\r
+       size_t                                                                          frame_number_;\r
+       std::shared_ptr<filter>                                         filter_;\r
 \r
 public:\r
-       explicit implementation(AVCodecContext& codec_context, const safe_ptr<core::frame_factory>& frame_factory) \r
-               : frame_factory_(frame_factory)\r
-               , codec_context_(codec_context)\r
+       explicit implementation(input& input, const safe_ptr<core::frame_factory>& frame_factory, const std::string& filter_str) \r
+               : input_(input)\r
+               , frame_factory_(frame_factory)\r
+               , codec_context_(*input_.get_video_codec_context())\r
                , width_(codec_context_.width)\r
                , height_(codec_context_.height)\r
                , pix_fmt_(codec_context_.pix_fmt)\r
                , desc_(get_pixel_format_desc(pix_fmt_, width_, height_))\r
+               , frame_number_(0)\r
        {\r
                if(desc_.pix_fmt == core::pixel_format::invalid)\r
                {\r
@@ -146,35 +91,67 @@ public:
                                                                          msg_info("Could not create software scaling context.") << \r
                                                                          boost::errinfo_api_function("sws_getContext"));\r
                }\r
+\r
+               if(!filter_str.empty())\r
+                       filter_.reset(new filter(filter_str));\r
        }\r
-       \r
-       std::vector<safe_ptr<core::write_frame>> execute(packet&& video_packet)\r
-       {                               \r
-               std::vector<safe_ptr<core::write_frame>> result;\r
 \r
-               switch(video_packet.type)\r
-               {\r
-               case flush_packet:\r
+       std::deque<std::pair<int, safe_ptr<core::write_frame>>> receive()\r
+       {\r
+               std::deque<std::pair<int, safe_ptr<core::write_frame>>> result;\r
+               \r
+               std::shared_ptr<AVPacket> pkt;\r
+               for(int n = 0; n < 32 && result.empty() && input_.try_pop_video_packet(pkt); ++n)       \r
+                       boost::range::push_back(result, decode(pkt));\r
+\r
+               return result;\r
+       }\r
+\r
+       std::deque<std::pair<int, safe_ptr<core::write_frame>>> decode(const std::shared_ptr<AVPacket>& video_packet)\r
+       {                       \r
+               std::deque<std::pair<int, safe_ptr<core::write_frame>>> result;\r
+\r
+               if(!video_packet) // eof\r
+               {       \r
                        avcodec_flush_buffers(&codec_context_);\r
-                       break;\r
-               case data_packet:               \r
-                       safe_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
+                       frame_number_ = 0;\r
+                       return result;\r
+               }\r
+\r
+               safe_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
 \r
-                       int frame_finished = 0;\r
-                       const int errn = avcodec_decode_video2(&codec_context_, decoded_frame.get(), &frame_finished, video_packet.av_packet.get());\r
+               int frame_finished = 0;\r
+               const int errn = avcodec_decode_video2(&codec_context_, decoded_frame.get(), &frame_finished, video_packet.get());\r
                \r
-                       if(errn < 0)\r
+               if(errn < 0)\r
+               {\r
+                       BOOST_THROW_EXCEPTION(\r
+                               invalid_operation() <<\r
+                               msg_info(av_error_str(errn)) <<\r
+                               boost::errinfo_api_function("avcodec_decode_video") <<\r
+                               boost::errinfo_errno(AVUNERROR(errn)));\r
+               }\r
+               \r
+               if(frame_finished != 0)         \r
+               {\r
+                       if(filter_)\r
                        {\r
-                               BOOST_THROW_EXCEPTION(\r
-                                       invalid_operation() <<\r
-                                       msg_info(av_error_str(errn)) <<\r
-                                       boost::errinfo_api_function("avcodec_decode_video") <<\r
-                                       boost::errinfo_errno(AVUNERROR(errn)));\r
+                               filter_->push(decoded_frame);\r
+\r
+                               std::shared_ptr<AVFrame> frame;\r
+                               while(filter_->try_pop(frame))\r
+                                       result.push_back(std::make_pair(frame_number_, make_write_frame(make_safe(frame))));\r
                        }\r
-               \r
-                       if(frame_finished != 0)         \r
-                               result.push_back(make_write_frame(decoded_frame));\r
+                       else\r
+                               result.push_back(std::make_pair(frame_number_, make_write_frame(decoded_frame)));\r
+                       \r
+                       if(!result.empty())\r
+                               ++frame_number_;\r
+                       else\r
+                               CASPAR_LOG(trace) << " Filter delay.";\r
                }\r
+               else\r
+                       CASPAR_LOG(trace) << " Decoder delay.";\r
 \r
                return result;\r
        }\r
@@ -182,6 +159,8 @@ public:
        safe_ptr<core::write_frame> make_write_frame(safe_ptr<AVFrame> decoded_frame)\r
        {               \r
                auto write = frame_factory_->create_frame(this, desc_);\r
+               write->set_is_interlaced(decoded_frame->interlaced_frame != 0);\r
+\r
                if(sws_context_ == nullptr)\r
                {\r
                        tbb::parallel_for(0, static_cast<int>(desc_.planes.size()), 1, [&](int n)\r
@@ -197,6 +176,8 @@ public:
                                        for(size_t y = r.begin(); y != r.end(); ++y)\r
                                                memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
                                });\r
+\r
+                               write->commit(n);\r
                        });\r
                }\r
                else\r
@@ -207,17 +188,19 @@ public:
                        avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width_, height_);\r
                 \r
                        sws_scale(sws_context_.get(), decoded_frame->data, decoded_frame->linesize, 0, height_, av_frame->data, av_frame->linesize);    \r
+\r
+                       write->commit();\r
                }       \r
 \r
                // DVVIDEO is in lower field. Make it upper field if needed.\r
                if(codec_context_.codec_id == CODEC_ID_DVVIDEO && frame_factory_->get_video_format_desc().mode == core::video_mode::upper)\r
-                       write->get_image_transform().set_fill_translation(0.0f, 1.0/static_cast<double>(height_));\r
+                       write->get_image_transform().set_fill_translation(0.0f, 0.5/static_cast<double>(height_));\r
 \r
                return write;\r
        }\r
 };\r
 \r
-video_decoder::video_decoder(AVCodecContext& codec_context, const safe_ptr<core::frame_factory>& frame_factory) : impl_(new implementation(codec_context, frame_factory)){}\r
-std::vector<safe_ptr<core::write_frame>> video_decoder::execute(packet&& video_packet){return impl_->execute(std::move(video_packet));}\r
+video_decoder::video_decoder(input& input, const safe_ptr<core::frame_factory>& frame_factory, const std::string& filter) : impl_(new implementation(input, frame_factory, filter)){}\r
+std::deque<std::pair<int, safe_ptr<core::write_frame>>> video_decoder::receive(){return impl_->receive();}\r
 \r
 }
\ No newline at end of file