1 #include "../../../stdafx.h"
\r
3 #include "video_transformer.h"
\r
5 #include "../../../video/video_format.h"
\r
6 #include "../../../../common/utility/memory.h"
\r
7 #include "../../../processor/frame.h"
\r
8 #include "../../../processor/frame.h"
\r
9 #include "../../../processor/frame_processor_device.h"
\r
11 #include <tbb/parallel_for.h>
\r
12 #include <tbb/atomic.h>
\r
13 #include <tbb/mutex.h>
\r
14 #include <tbb/concurrent_queue.h>
\r
15 #include <tbb/scalable_allocator.h>
\r
17 #include <unordered_map>
\r
21 #define __STDC_CONSTANT_MACROS
\r
22 #define __STDC_LIMIT_MACROS
\r
23 #include <libswscale/swscale.h>
\r
26 namespace caspar { namespace core { namespace ffmpeg{
\r
28 pixel_format::type get_pixel_format(PixelFormat pix_fmt)
\r
32 case PIX_FMT_BGRA: return pixel_format::bgra;
\r
33 case PIX_FMT_ARGB: return pixel_format::argb;
\r
34 case PIX_FMT_RGBA: return pixel_format::rgba;
\r
35 case PIX_FMT_ABGR: return pixel_format::abgr;
\r
36 case PIX_FMT_YUV444P: return pixel_format::ycbcr;
\r
37 case PIX_FMT_YUV422P: return pixel_format::ycbcr;
\r
38 case PIX_FMT_YUV420P: return pixel_format::ycbcr;
\r
39 case PIX_FMT_YUV411P: return pixel_format::ycbcr;
\r
40 case PIX_FMT_YUV410P: return pixel_format::ycbcr;
\r
41 case PIX_FMT_YUVA420P: return pixel_format::ycbcra;
\r
42 default: return pixel_format::invalid;
\r
46 struct video_transformer::implementation : boost::noncopyable
\r
48 implementation() : sw_warning_(false){}
\r
52 if(frame_processor_)
\r
53 frame_processor_->release_tag(this);
\r
56 video_packet_ptr execute(const video_packet_ptr video_packet)
\r
58 assert(video_packet);
\r
59 int width = video_packet->codec_context->width;
\r
60 int height = video_packet->codec_context->height;
\r
61 auto pix_fmt = video_packet->codec_context->pix_fmt;
\r
62 video_packet->decoded_frame;
\r
71 video_packet->frame = frame_processor_->create_frame(width, height, this);
\r
72 tbb::parallel_for(0, height, 1, [&](int y)
\r
74 common::aligned_memcpy(
\r
75 video_packet->frame->data()+y*width*4,
\r
76 video_packet->decoded_frame->data[0] + y*video_packet->decoded_frame->linesize[0],
\r
79 video_packet->frame->pix_fmt(get_pixel_format(pix_fmt));
\r
83 case PIX_FMT_YUV444P:
\r
84 case PIX_FMT_YUV422P:
\r
85 case PIX_FMT_YUV420P:
\r
86 case PIX_FMT_YUV411P:
\r
87 case PIX_FMT_YUV410P:
\r
88 case PIX_FMT_YUVA420P:
\r
91 AVPicture dummy_pict;
\r
92 avpicture_fill(&dummy_pict, nullptr, pix_fmt, width, height);
\r
94 // Find chroma height
\r
95 size_t size2 = dummy_pict.data[2] - dummy_pict.data[1];
\r
96 size_t h2 = size2/dummy_pict.linesize[1];
\r
98 pixel_format_desc desc;
\r
99 desc.planes[0] = pixel_format_desc::plane(dummy_pict.linesize[0], height, 1);
\r
100 desc.planes[1] = pixel_format_desc::plane(dummy_pict.linesize[1], h2, 1);
\r
101 desc.planes[2] = pixel_format_desc::plane(dummy_pict.linesize[2], h2, 1);
\r
103 if(pix_fmt == PIX_FMT_YUVA420P)
\r
104 desc.planes[3] = pixel_format_desc::plane(dummy_pict.linesize[3], height, 1);
\r
106 desc.pix_fmt = get_pixel_format(pix_fmt);
\r
107 video_packet->frame = frame_processor_->create_frame(desc, this);
\r
109 tbb::parallel_for(0, static_cast<int>(desc.planes.size()), 1, [&](int n)
\r
111 if(desc.planes[n].size == 0)
\r
114 tbb::parallel_for(0, static_cast<int>(desc.planes[n].height), 1, [&](int y)
\r
117 video_packet->frame->data(n)+y*dummy_pict.linesize[n],
\r
118 video_packet->decoded_frame->data[n] + y*video_packet->decoded_frame->linesize[n],
\r
119 dummy_pict.linesize[n]);
\r
128 CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";
\r
129 sw_warning_ = true;
\r
131 video_packet->frame = frame_processor_->create_frame(width, height, this);
\r
134 avcodec_get_frame_defaults(&av_frame);
\r
135 avpicture_fill(reinterpret_cast<AVPicture*>(&av_frame), video_packet->frame->data(), PIX_FMT_BGRA, width, height);
\r
140 sws_context_.reset(sws_getContext(width, height, pix_fmt, width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, ¶m), sws_freeContext);
\r
143 sws_scale(sws_context_.get(), video_packet->decoded_frame->data, video_packet->decoded_frame->linesize, 0, height, av_frame.data, av_frame.linesize);
\r
148 if(video_packet->codec->id == CODEC_ID_DVVIDEO) // Move up one field
\r
149 video_packet->frame->translate(0.0f, 1.0/static_cast<double>(frame_processor_->get_video_format_desc().height));
\r
151 return video_packet;
\r
154 void initialize(const frame_processor_device_ptr& frame_processor)
\r
156 frame_processor_ = frame_processor;
\r
159 frame_processor_device_ptr frame_processor_;
\r
160 std::shared_ptr<SwsContext> sws_context_;
\r
164 video_transformer::video_transformer() : impl_(new implementation()){}
\r
165 video_packet_ptr video_transformer::execute(const video_packet_ptr& video_packet){return impl_->execute(video_packet);}
\r
166 void video_transformer::initialize(const frame_processor_device_ptr& frame_processor){impl_->initialize(frame_processor); }
\r