#include "../../stdafx.h"\r
\r
#include "video_decoder.h"\r
+#include "../util.h"\r
+#include "../filter/filter.h"\r
+\r
#include "../../ffmpeg_error.h"\r
+#include "../../tbb_avcodec.h"\r
\r
#include <common/memory/memcpy.h>\r
\r
#include <core/video_format.h>\r
#include <core/producer/frame/basic_frame.h>\r
-#include <core/producer/frame/write_frame.h>\r
+#include <core/mixer/write_frame.h>\r
#include <core/producer/frame/image_transform.h>\r
-#include <core/producer/frame/pixel_format.h>\r
#include <core/producer/frame/frame_factory.h>\r
+#include <core/producer/color/color_producer.h>\r
+\r
+#include <tbb/task_group.h>\r
\r
-#include <tbb/parallel_for.h>\r
+#include <boost/range/algorithm_ext.hpp>\r
\r
#if defined(_MSC_VER)\r
#pragma warning (push)\r
{\r
#define __STDC_CONSTANT_MACROS\r
#define __STDC_LIMIT_MACROS\r
- #include <libswscale/swscale.h>\r
#include <libavformat/avformat.h>\r
+ #include <libavcodec/avcodec.h>\r
}\r
#if defined(_MSC_VER)\r
#pragma warning (pop)\r
#endif\r
\r
namespace caspar {\r
- \r
-core::pixel_format::type get_pixel_format(PixelFormat pix_fmt)\r
+ \r
+struct video_decoder::implementation : boost::noncopyable\r
{\r
- switch(pix_fmt)\r
+ const safe_ptr<core::frame_factory> frame_factory_;\r
+ std::shared_ptr<AVCodecContext> codec_context_;\r
+ int index_;\r
+ core::video_mode::type mode_;\r
+\r
+ std::queue<std::shared_ptr<AVPacket>> packet_buffer_;\r
+\r
+ std::unique_ptr<filter> filter_;\r
+\r
+ double fps_;\r
+public:\r
+ explicit implementation(const std::shared_ptr<AVFormatContext>& context, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter) \r
+ : frame_factory_(frame_factory)\r
+ , mode_(core::video_mode::invalid)\r
+ //, filter_(filter.empty() ? nullptr : new caspar::filter(filter))\r
{\r
- case PIX_FMT_GRAY8: return core::pixel_format::gray;\r
- case PIX_FMT_BGRA: return core::pixel_format::bgra;\r
- case PIX_FMT_ARGB: return core::pixel_format::argb;\r
- case PIX_FMT_RGBA: return core::pixel_format::rgba;\r
- case PIX_FMT_ABGR: return core::pixel_format::abgr;\r
- case PIX_FMT_YUV444P: return core::pixel_format::ycbcr;\r
- case PIX_FMT_YUV422P: return core::pixel_format::ycbcr;\r
- case PIX_FMT_YUV420P: return core::pixel_format::ycbcr;\r
- case PIX_FMT_YUV411P: return core::pixel_format::ycbcr;\r
- case PIX_FMT_YUV410P: return core::pixel_format::ycbcr;\r
- case PIX_FMT_YUVA420P: return core::pixel_format::ycbcra;\r
- default: return core::pixel_format::invalid;\r
- }\r
-}\r
+ AVCodec* dec;\r
+ index_ = av_find_best_stream(context.get(), AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);\r
+\r
+ if(index_ < 0)\r
+ return;\r
+ \r
+ int errn = tbb_avcodec_open(context->streams[index_]->codec, dec);\r
+ if(errn < 0)\r
+ return;\r
+ \r
+ codec_context_.reset(context->streams[index_]->codec, tbb_avcodec_close);\r
\r
-core::pixel_format_desc get_pixel_format_desc(PixelFormat pix_fmt, size_t width, size_t height)\r
-{\r
- // Get linesizes\r
- AVPicture dummy_pict; \r
- avpicture_fill(&dummy_pict, nullptr, pix_fmt, width, height);\r
+ // Some files give an invalid time_base numerator, try to fix it.\r
+ if(codec_context_ && codec_context_->time_base.num == 1)\r
+ codec_context_->time_base.num = static_cast<int>(std::pow(10.0, static_cast<int>(std::log10(static_cast<float>(codec_context_->time_base.den)))-1)); \r
\r
- core::pixel_format_desc desc;\r
- desc.pix_fmt = get_pixel_format(pix_fmt);\r
+ fps_ = static_cast<double>(codec_context_->time_base.den) / static_cast<double>(codec_context_->time_base.num);\r
+ //if(double_rate(filter))\r
+ // fps_ *= 2;\r
+ }\r
\r
- switch(desc.pix_fmt)\r
+ void push(const std::shared_ptr<AVPacket>& packet)\r
{\r
- case core::pixel_format::gray:\r
- {\r
- desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 1)); \r
- return desc;\r
- }\r
- case core::pixel_format::bgra:\r
- case core::pixel_format::argb:\r
- case core::pixel_format::rgba:\r
- case core::pixel_format::abgr:\r
- {\r
- desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 4)); \r
- return desc;\r
- }\r
- case core::pixel_format::ycbcr:\r
- case core::pixel_format::ycbcra:\r
- { \r
- // Find chroma height\r
- size_t size2 = dummy_pict.data[2] - dummy_pict.data[1];\r
- size_t h2 = size2/dummy_pict.linesize[1]; \r
-\r
- desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));\r
- desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[1], h2, 1));\r
- desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[2], h2, 1));\r
-\r
- if(desc.pix_fmt == core::pixel_format::ycbcra) \r
- desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[3], height, 1)); \r
- return desc;\r
- } \r
- default: \r
- desc.pix_fmt = core::pixel_format::invalid;\r
- return desc;\r
- }\r
-}\r
+ if(!codec_context_)\r
+ return;\r
\r
-struct video_decoder::implementation : boost::noncopyable\r
-{ \r
- std::shared_ptr<SwsContext> sws_context_;\r
- const std::shared_ptr<core::frame_factory> frame_factory_;\r
- AVCodecContext& codec_context_;\r
- const int width_;\r
- const int height_;\r
- const PixelFormat pix_fmt_;\r
- core::pixel_format_desc desc_;\r
+ if(packet && packet->stream_index != index_)\r
+ return;\r
\r
-public:\r
- explicit implementation(AVCodecContext& codec_context, const safe_ptr<core::frame_factory>& frame_factory) \r
- : frame_factory_(frame_factory)\r
- , codec_context_(codec_context)\r
- , width_(codec_context_.width)\r
- , height_(codec_context_.height)\r
- , pix_fmt_(codec_context_.pix_fmt)\r
- , desc_(get_pixel_format_desc(pix_fmt_, width_, height_))\r
- {\r
- if(desc_.pix_fmt == core::pixel_format::invalid)\r
- {\r
- CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
-\r
- desc_ = get_pixel_format_desc(PIX_FMT_BGRA, width_, height_);\r
- double param;\r
- sws_context_.reset(sws_getContext(width_, height_, pix_fmt_, width_, height_, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, ¶m), sws_freeContext);\r
- if(!sws_context_)\r
- BOOST_THROW_EXCEPTION(operation_failed() <<\r
- msg_info("Could not create software scaling context.") << \r
- boost::errinfo_api_function("sws_getContext"));\r
- }\r
+ packet_buffer_.push(packet);\r
}\r
- \r
- std::vector<safe_ptr<core::write_frame>> execute(const packet& video_packet)\r
- { \r
+\r
+ std::vector<safe_ptr<core::write_frame>> poll()\r
+ { \r
std::vector<safe_ptr<core::write_frame>> result;\r
\r
- switch(video_packet.type)\r
+ if(!codec_context_)\r
+ result.push_back(make_safe<core::write_frame>());\r
+ else if(!packet_buffer_.empty())\r
{\r
- case flush_packet:\r
- avcodec_flush_buffers(&codec_context_);\r
- break;\r
- case data_packet: \r
- safe_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
-\r
- int frame_finished = 0;\r
- const int errn = avcodec_decode_video(&codec_context_, decoded_frame.get(), &frame_finished, video_packet.data->data(), video_packet.data->size());\r
+ auto packet = std::move(packet_buffer_.front());\r
+ packet_buffer_.pop();\r
\r
- if(errn < 0)\r
+ if(!packet) // eof\r
+ { \r
+ if(codec_context_->codec->capabilities | CODEC_CAP_DELAY)\r
+ {\r
+ // FIXME: This might cause bad performance.\r
+ AVPacket pkt = {0};\r
+ auto frame = decode_frame(pkt);\r
+ if(frame)\r
+ result.push_back(make_write_frame(this, make_safe(frame), frame_factory_));\r
+ }\r
+\r
+ avcodec_flush_buffers(codec_context_.get());\r
+ }\r
+ else\r
{\r
- BOOST_THROW_EXCEPTION(\r
- invalid_operation() <<\r
- msg_info(av_error_str(errn)) <<\r
- boost::errinfo_api_function("avcodec_decode_video") <<\r
- boost::errinfo_errno(AVUNERROR(errn)));\r
+ auto frame = decode_frame(*packet);\r
+ if(frame)\r
+ {\r
+ auto frame2 = make_write_frame(this, make_safe(frame), frame_factory_); \r
+ mode_ = frame2->get_type();\r
+ result.push_back(std::move(frame2));\r
+ }\r
}\r
- \r
- if(frame_finished != 0) \r
- result.push_back(make_write_frame(decoded_frame));\r
}\r
-\r
+ \r
return result;\r
}\r
\r
- safe_ptr<core::write_frame> make_write_frame(safe_ptr<AVFrame> decoded_frame)\r
- { \r
- auto write = frame_factory_->create_frame(this, desc_);\r
- if(sws_context_ == nullptr)\r
+ std::shared_ptr<AVFrame> decode_frame(AVPacket& packet)\r
+ {\r
+ std::shared_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
+\r
+ int frame_finished = 0;\r
+ const int errn = avcodec_decode_video2(codec_context_.get(), decoded_frame.get(), &frame_finished, &packet);\r
+ \r
+ if(errn < 0)\r
{\r
- tbb::parallel_for(0, static_cast<int>(desc_.planes.size()), 1, [&](int n)\r
- {\r
- auto plane = desc_.planes[n];\r
- auto result = write->image_data(n).begin();\r
- auto decoded = decoded_frame->data[n];\r
- auto decoded_linesize = decoded_frame->linesize[n];\r
- \r
- // Copy line by line since ffmpeg sometimes pads each line.\r
- tbb::parallel_for(0, static_cast<int>(desc_.planes[n].height), 1, [&](int y)\r
- {\r
- fast_memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
- });\r
- });\r
+ BOOST_THROW_EXCEPTION(\r
+ invalid_operation() <<\r
+ msg_info(av_error_str(errn)) <<\r
+ boost::errinfo_api_function("avcodec_decode_video") <<\r
+ boost::errinfo_errno(AVUNERROR(errn)));\r
}\r
- else\r
- {\r
- // Use sws_scale when provided colorspace has no hw-accel.\r
- safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free); \r
- avcodec_get_frame_defaults(av_frame.get()); \r
- avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width_, height_);\r
- \r
- sws_scale(sws_context_.get(), decoded_frame->data, decoded_frame->linesize, 0, height_, av_frame->data, av_frame->linesize); \r
- } \r
-\r
- // DVVIDEO is in lower field. Make it upper field if needed.\r
- if(codec_context_.codec_id == CODEC_ID_DVVIDEO && frame_factory_->get_video_format_desc().mode == core::video_mode::upper)\r
- write->get_image_transform().set_fill_translation(0.0f, 1.0/static_cast<double>(height_));\r
-\r
- return write;\r
+\r
+ if(frame_finished == 0) \r
+ decoded_frame.reset();\r
+\r
+ return decoded_frame;\r
+ }\r
+\r
+ bool ready() const\r
+ {\r
+ return !codec_context_ || !packet_buffer_.empty();\r
+ }\r
+ \r
+ core::video_mode::type mode()\r
+ {\r
+ if(!codec_context_)\r
+ return frame_factory_->get_video_format_desc().mode;\r
+\r
+ return mode_;\r
}\r
-};\r
\r
-video_decoder::video_decoder(AVCodecContext& codec_context, const safe_ptr<core::frame_factory>& frame_factory) : impl_(new implementation(codec_context, frame_factory)){}\r
-std::vector<safe_ptr<core::write_frame>> video_decoder::execute(const packet& video_packet){return impl_->execute(video_packet);}\r
+ double fps() const\r
+ {\r
+ return fps_;\r
+ }\r
+};\r
\r
+video_decoder::video_decoder(const std::shared_ptr<AVFormatContext>& context, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter) : impl_(new implementation(context, frame_factory, filter)){}\r
+void video_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}\r
+std::vector<safe_ptr<core::write_frame>> video_decoder::poll(){return impl_->poll();}\r
+bool video_decoder::ready() const{return impl_->ready();}\r
+core::video_mode::type video_decoder::mode(){return impl_->mode();}\r
+double video_decoder::fps() const{return impl_->fps();}\r
}
\ No newline at end of file