]> git.sesse.net Git - casparcg/blob - core/producer/ffmpeg/video/video_transformer.cpp
2.0.0.2:
[casparcg] / core / producer / ffmpeg / video / video_transformer.cpp
1 #include "../../../stdafx.h"\r
2 \r
3 #include "video_transformer.h"\r
4 \r
5 #include "../../../video/video_format.h"\r
6 #include "../../../../common/utility/memory.h"\r
7 #include "../../../processor/frame.h"\r
8 #include "../../../processor/frame.h"\r
9 #include "../../../processor/frame_processor_device.h"\r
10 \r
11 #include <tbb/parallel_for.h>\r
12 #include <tbb/atomic.h>\r
13 #include <tbb/mutex.h>\r
14 #include <tbb/concurrent_queue.h>\r
15 #include <tbb/scalable_allocator.h>\r
16 \r
17 #include <unordered_map>\r
18 \r
19 extern "C" \r
20 {\r
21         #define __STDC_CONSTANT_MACROS\r
22         #define __STDC_LIMIT_MACROS\r
23         #include <libswscale/swscale.h>\r
24 }\r
25 \r
26 namespace caspar { namespace core { namespace ffmpeg{\r
27         \r
28 pixel_format::type get_pixel_format(PixelFormat pix_fmt)\r
29 {\r
30         switch(pix_fmt)\r
31         {\r
32                 case PIX_FMT_BGRA:              return pixel_format::bgra;\r
33                 case PIX_FMT_ARGB:              return pixel_format::argb;\r
34                 case PIX_FMT_RGBA:              return pixel_format::rgba;\r
35                 case PIX_FMT_ABGR:              return pixel_format::abgr;\r
36                 case PIX_FMT_YUV444P:   return pixel_format::ycbcr;\r
37                 case PIX_FMT_YUV422P:   return pixel_format::ycbcr;\r
38                 case PIX_FMT_YUV420P:   return pixel_format::ycbcr;\r
39                 case PIX_FMT_YUV411P:   return pixel_format::ycbcr;\r
40                 case PIX_FMT_YUV410P:   return pixel_format::ycbcr;\r
41                 case PIX_FMT_YUVA420P:  return pixel_format::ycbcra;\r
42                 default:                                return pixel_format::invalid;\r
43         }\r
44 }\r
45 \r
46 struct video_transformer::implementation : boost::noncopyable\r
47 {\r
48         implementation() : sw_warning_(false){}\r
49 \r
50         ~implementation()\r
51         {\r
52                 if(frame_processor_)\r
53                         frame_processor_->release_tag(this);\r
54         }\r
55 \r
56         video_packet_ptr execute(const video_packet_ptr video_packet)\r
57         {                               \r
58                 assert(video_packet);\r
59                 int width = video_packet->codec_context->width;\r
60                 int height = video_packet->codec_context->height;\r
61                 auto pix_fmt = video_packet->codec_context->pix_fmt;\r
62                 video_packet->decoded_frame;\r
63 \r
64                 switch(pix_fmt)\r
65                 {\r
66                 case PIX_FMT_BGRA:\r
67                 case PIX_FMT_ARGB:\r
68                 case PIX_FMT_RGBA:\r
69                 case PIX_FMT_ABGR:\r
70                         {\r
71                                 video_packet->frame = frame_processor_->create_frame(width, height, this);\r
72                                 tbb::parallel_for(0, height, 1, [&](int y)\r
73                                 {\r
74                                         common::aligned_memcpy(\r
75                                                 video_packet->frame->data()+y*width*4, \r
76                                                 video_packet->decoded_frame->data[0] + y*video_packet->decoded_frame->linesize[0], \r
77                                                 width*4); \r
78                                 });\r
79                                 video_packet->frame->pix_fmt(get_pixel_format(pix_fmt));\r
80                                                 \r
81                                 break;\r
82                         }\r
83                 case PIX_FMT_YUV444P:\r
84                 case PIX_FMT_YUV422P:\r
85                 case PIX_FMT_YUV420P:\r
86                 case PIX_FMT_YUV411P:\r
87                 case PIX_FMT_YUV410P:\r
88                 case PIX_FMT_YUVA420P:\r
89                         {                       \r
90                                 // Get linesizes\r
91                                 AVPicture dummy_pict;   \r
92                                 avpicture_fill(&dummy_pict, nullptr, pix_fmt, width, height);\r
93                         \r
94                                 // Find chroma height\r
95                                 size_t size2 = dummy_pict.data[2] - dummy_pict.data[1];\r
96                                 size_t h2 = size2/dummy_pict.linesize[1];\r
97 \r
98                                 pixel_format_desc desc;\r
99                                 desc.planes[0] = pixel_format_desc::plane(dummy_pict.linesize[0], height, 1);\r
100                                 desc.planes[1] = pixel_format_desc::plane(dummy_pict.linesize[1], h2, 1);\r
101                                 desc.planes[2] = pixel_format_desc::plane(dummy_pict.linesize[2], h2, 1);\r
102 \r
103                                 if(pix_fmt == PIX_FMT_YUVA420P)                                         \r
104                                         desc.planes[3] = pixel_format_desc::plane(dummy_pict.linesize[3], height, 1);                           \r
105 \r
106                                 desc.pix_fmt = get_pixel_format(pix_fmt);\r
107                                 video_packet->frame = frame_processor_->create_frame(desc, this);\r
108 \r
109                                 tbb::parallel_for(0, static_cast<int>(desc.planes.size()), 1, [&](int n)\r
110                                 {\r
111                                         if(desc.planes[n].size == 0)\r
112                                                 return;\r
113 \r
114                                         tbb::parallel_for(0, static_cast<int>(desc.planes[n].height), 1, [&](int y)\r
115                                         {\r
116                                                 memcpy(\r
117                                                         video_packet->frame->data(n)+y*dummy_pict.linesize[n], \r
118                                                         video_packet->decoded_frame->data[n] + y*video_packet->decoded_frame->linesize[n], \r
119                                                         dummy_pict.linesize[n]);\r
120                                         });\r
121                                 });\r
122                                 break;\r
123                         }               \r
124                 default:        \r
125                         {\r
126                                 if(!sw_warning_)\r
127                                 {\r
128                                         CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
129                                         sw_warning_ = true;\r
130                                 }\r
131                                 video_packet->frame = frame_processor_->create_frame(width, height, this);\r
132 \r
133                                 AVFrame av_frame;       \r
134                                 avcodec_get_frame_defaults(&av_frame);\r
135                                 avpicture_fill(reinterpret_cast<AVPicture*>(&av_frame), video_packet->frame->data(), PIX_FMT_BGRA, width, height);\r
136 \r
137                                 if(!sws_context_)\r
138                                 {\r
139                                         double param;\r
140                                         sws_context_.reset(sws_getContext(width, height, pix_fmt, width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);\r
141                                 }               \r
142                  \r
143                                 sws_scale(sws_context_.get(), video_packet->decoded_frame->data, video_packet->decoded_frame->linesize, 0, height, av_frame.data, av_frame.linesize);           \r
144                         }\r
145                 }\r
146 \r
147                 // TODO:\r
148                 if(video_packet->codec->id == CODEC_ID_DVVIDEO) // Move up one field\r
149                         video_packet->frame->translate(0.0f, 1.0/static_cast<double>(frame_processor_->get_video_format_desc().height));\r
150                 \r
151                 return video_packet;\r
152         }\r
153 \r
154         void initialize(const frame_processor_device_ptr& frame_processor)\r
155         {\r
156                 frame_processor_ = frame_processor;\r
157         }\r
158 \r
159         frame_processor_device_ptr frame_processor_;\r
160         std::shared_ptr<SwsContext> sws_context_;\r
161         bool sw_warning_;\r
162 };\r
163 \r
164 video_transformer::video_transformer() : impl_(new implementation()){}\r
165 video_packet_ptr video_transformer::execute(const video_packet_ptr& video_packet){return impl_->execute(video_packet);}\r
166 void video_transformer::initialize(const frame_processor_device_ptr& frame_processor){impl_->initialize(frame_processor); }\r
167 }}}