#include <tbb/concurrent_queue.h>\r
#include <tbb/scalable_allocator.h>\r
\r
+#include <unordered_map>\r
+\r
#if defined(_MSC_VER)\r
#pragma warning (push)\r
#pragma warning (disable : 4244)\r
\r
namespace caspar { namespace core { namespace ffmpeg{\r
\r
+pixel_format get_pixel_format(PixelFormat pix_fmt)\r
+{\r
+ switch(pix_fmt)\r
+ {\r
+ case PIX_FMT_BGRA: return pixel_format::bgra;\r
+ case PIX_FMT_ARGB: return pixel_format::argb;\r
+ case PIX_FMT_RGBA: return pixel_format::rgba;\r
+ case PIX_FMT_ABGR: return pixel_format::abgr;\r
+ case PIX_FMT_YUV444P: return pixel_format::yuv;\r
+ case PIX_FMT_YUV422P: return pixel_format::yuv;\r
+ case PIX_FMT_YUV420P: return pixel_format::yuv;\r
+ case PIX_FMT_YUV411P: return pixel_format::yuv;\r
+ case PIX_FMT_YUV410P: return pixel_format::yuv;\r
+ case PIX_FMT_YUVA420P: return pixel_format::yuva;\r
+ default: return pixel_format::invalid_pixel_format;\r
+ }\r
+}\r
+\r
struct video_transformer::implementation : boost::noncopyable\r
{\r
+ ~implementation()\r
+ {\r
+ if(factory_)\r
+ factory_->release_frames(this);\r
+ }\r
+\r
video_packet_ptr execute(const video_packet_ptr video_packet)\r
{ \r
assert(video_packet);\r
- size_t width = video_packet->codec_context->width;\r
- size_t height = video_packet->codec_context->height;\r
+ int width = video_packet->codec_context->width;\r
+ int height = video_packet->codec_context->height;\r
auto pix_fmt = video_packet->codec_context->pix_fmt;\r
+ video_packet->decoded_frame;\r
\r
- if(!sws_context_)\r
+ switch(pix_fmt)\r
{\r
- double param;\r
- sws_context_.reset(sws_getContext(width, height, pix_fmt, width, height, \r
- PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, ¶m), sws_freeContext);\r
- }\r
+ case PIX_FMT_BGRA:\r
+ case PIX_FMT_ARGB:\r
+ case PIX_FMT_RGBA:\r
+ case PIX_FMT_ABGR:\r
+ {\r
+ video_packet->frame = factory_->create_frame(width, height, this);\r
+ tbb::parallel_for(0, height, 1, [&](int y)\r
+ {\r
+ common::aligned_memcpy(\r
+ video_packet->frame->data()+y*width*4, \r
+ video_packet->decoded_frame->data[0] + y*video_packet->decoded_frame->linesize[0], \r
+ width*4); \r
+ });\r
+ video_packet->frame->set_pixel_format(get_pixel_format(pix_fmt));\r
+ \r
+ break;\r
+ }\r
+ case PIX_FMT_YUV444P:\r
+ case PIX_FMT_YUV422P:\r
+ case PIX_FMT_YUV420P:\r
+ case PIX_FMT_YUV411P:\r
+ case PIX_FMT_YUV410P:\r
+ case PIX_FMT_YUVA420P:\r
+ { \r
+ // Get linesizes\r
+ AVPicture dummy_pict; \r
+ avpicture_fill(&dummy_pict, nullptr, pix_fmt, width, height);\r
+ \r
+ // Find chroma height\r
+ size_t size2 = dummy_pict.data[2] - dummy_pict.data[1];\r
+ size_t h2 = size2/dummy_pict.linesize[1];\r
\r
- //size_t pic_size = avpicture_get_size(PIX_FMT_YUV411P, width, height);\r
+ planar_frame_dimension data_size;\r
+ data_size[0] = std::make_pair(dummy_pict.linesize[0], height);\r
+ data_size[1] = std::make_pair(dummy_pict.linesize[1], h2);\r
+ data_size[2] = std::make_pair(dummy_pict.linesize[2], h2);\r
+ data_size[3] = std::make_pair(0, 0);\r
\r
- //size_t pic_size_sqr = static_cast<size_t>(sqrt(static_cast<double>(pic_size)))/4;\r
- //pic_size_sqr += pic_size_sqr % 2;\r
+ if(pix_fmt == PIX_FMT_YUVA420P) \r
+ data_size[3] = std::make_pair(dummy_pict.linesize[3], height);\r
\r
- video_packet->frame = factory_->create_frame(width, height);\r
- AVFrame av_frame; \r
- avcodec_get_frame_defaults(&av_frame);\r
- size_t size = avpicture_fill(reinterpret_cast<AVPicture*>(&av_frame), video_packet->frame->data(), PIX_FMT_BGRA, width, height);\r
+ video_packet->frame = factory_->create_frame(data_size, this);\r
+ video_packet->frame->set_pixel_format(get_pixel_format(pix_fmt));\r
+\r
+ tbb::parallel_for(0, static_cast<int>(data_size.size()), 1, [&](int n)\r
+ {\r
+ tbb::parallel_for(0, static_cast<int>(data_size[n].second), 1, [&](int y)\r
+ {\r
+ memcpy(\r
+ video_packet->frame->data(n)+y*dummy_pict.linesize[n], \r
+ video_packet->decoded_frame->data[n] + y*video_packet->decoded_frame->linesize[n], \r
+ dummy_pict.linesize[n]);\r
+ });\r
+ });\r
+ break;\r
+ } \r
+ default: \r
+ {\r
+ video_packet->frame = factory_->create_frame(width, height, this);\r
+ video_packet->frame->set_pixel_format(pixel_format::bgra);\r
+\r
+ AVFrame av_frame; \r
+ avcodec_get_frame_defaults(&av_frame);\r
+ avpicture_fill(reinterpret_cast<AVPicture*>(&av_frame), video_packet->frame->data(), PIX_FMT_BGRA, width, height);\r
+\r
+ if(!sws_context_)\r
+ {\r
+ double param;\r
+ sws_context_.reset(sws_getContext(width, height, pix_fmt, width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, ¶m), sws_freeContext);\r
+ } \r
\r
- sws_scale(sws_context_.get(), video_packet->decoded_frame->data, video_packet->decoded_frame->linesize, 0, height, av_frame.data, av_frame.linesize);\r
- \r
+ sws_scale(sws_context_.get(), video_packet->decoded_frame->data, video_packet->decoded_frame->linesize, 0, height, av_frame.data, av_frame.linesize); \r
+ }\r
+ }\r
+\r
if(video_packet->codec->id == CODEC_ID_DVVIDEO) // Move up one field\r
video_packet->frame->translate(0.0f, 1.0/static_cast<double>(video_packet->format_desc.height));\r
\r
- return video_packet; \r
+ return video_packet;\r
+ }\r
+\r
+ void initialize(const frame_factory_ptr& factory)\r
+ {\r
+ factory_ = factory;\r
}\r
\r
frame_factory_ptr factory_;\r
\r
video_transformer::video_transformer() : impl_(new implementation()){}\r
video_packet_ptr video_transformer::execute(const video_packet_ptr& video_packet){return impl_->execute(video_packet);}\r
-void video_transformer::initialize(const frame_factory_ptr& factory){impl_->factory_ = factory; }\r
+void video_transformer::initialize(const frame_factory_ptr& factory){impl_->initialize(factory); }\r
}}}
\ No newline at end of file