]> git.sesse.net Git - casparcg/blob - accelerator/cpu/image/image_mixer.cpp
2.1.0: cpu/image_mixer: Refactored away ugly dynamic cast.
[casparcg] / accelerator / cpu / image / image_mixer.cpp
1 /*\r
2 * Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>\r
3 *\r
4 * This file is part of CasparCG (www.casparcg.com).\r
5 *\r
6 * CasparCG is free software: you can redistribute it and/or modify\r
7 * it under the terms of the GNU General Public License as published by\r
8 * the Free Software Foundation, either version 3 of the License, or\r
9 * (at your option) any later version.\r
10 *\r
11 * CasparCG is distributed in the hope that it will be useful,\r
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
14 * GNU General Public License for more details.\r
15 *\r
16 * You should have received a copy of the GNU General Public License\r
17 * along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
18 *\r
19 * Author: Robert Nagy, ronag89@gmail.com\r
20 */\r
21 \r
22 #include "../../stdafx.h"\r
23 \r
24 #include "image_mixer.h"\r
25 \r
26 #include "../util/data_frame.h"\r
27 #include "../util/xmm.h"\r
28 \r
29 #include <common/assert.h>\r
30 #include <common/gl/gl_check.h>\r
31 #include <common/concurrency/async.h>\r
32 #include <common/memory/memcpy.h>\r
33 \r
34 #include <core/frame/data_frame.h>\r
35 #include <core/frame/frame_transform.h>\r
36 #include <core/frame/pixel_format.h>\r
37 #include <core/video_format.h>\r
38 \r
39 #include <modules/ffmpeg/producer/util/util.h>\r
40 \r
41 #include <asmlib.h>\r
42 \r
43 #include <gl/glew.h>\r
44 \r
45 #include <tbb/cache_aligned_allocator.h>\r
46 #include <tbb/parallel_for_each.h>\r
47 \r
48 #include <boost/assign.hpp>\r
49 #include <boost/foreach.hpp>\r
50 #include <boost/range.hpp>\r
51 #include <boost/range/algorithm_ext/erase.hpp>\r
52 #include <boost/thread/future.hpp>\r
53 \r
54 #include <algorithm>\r
55 #include <stdint.h>\r
56 #include <vector>\r
57 \r
58 #if defined(_MSC_VER)\r
59 #pragma warning (push)\r
60 #pragma warning (disable : 4244)\r
61 #endif\r
62 extern "C" \r
63 {\r
64         #include <libswscale/swscale.h>\r
65         #include <libavcodec/avcodec.h>\r
66         #include <libavformat/avformat.h>\r
67 }\r
68 #if defined(_MSC_VER)\r
69 #pragma warning (pop)\r
70 #endif\r
71 \r
72 namespace caspar { namespace accelerator { namespace cpu {\r
73                 \r
74 struct item\r
75 {\r
76         core::pixel_format_desc                 pix_desc;\r
77         std::array<const uint8_t*, 4>   data;\r
78         core::image_transform                   transform;\r
79 \r
80         item()\r
81                 : pix_desc(core::pixel_format::invalid)\r
82         {\r
83                 data.fill(0);\r
84         }\r
85 };\r
86 \r
87 bool operator==(const item& lhs, const item& rhs)\r
88 {\r
89         return lhs.data == rhs.data && lhs.transform == rhs.transform;\r
90 }\r
91 \r
92 bool operator!=(const item& lhs, const item& rhs)\r
93 {\r
94         return !(lhs == rhs);\r
95 }\r
96         \r
97 inline xmm::s8_x blend(xmm::s8_x d, xmm::s8_x s)\r
98 {       \r
99         using namespace xmm;\r
100                 \r
101         // T(S, D) = S * D[A] + 0x80\r
102         auto aaaa   = s8_x::shuffle(d, s8_x(15, 15, 15, 15, 11, 11, 11, 11, 7, 7, 7, 7, 3, 3, 3, 3));\r
103         d                       = s8_x(u8_x::min(u8_x(d), u8_x(aaaa))); // overflow guard\r
104 \r
105         auto xaxa       = s16_x(aaaa) >> 8;             \r
106                               \r
107         auto t1         = s16_x::multiply_low(s16_x(s) & 0x00FF, xaxa) + 0x80;    \r
108         auto t2         = s16_x::multiply_low(s16_x(s) >> 8    , xaxa) + 0x80;\r
109                 \r
110         // C(S, D) = S + D - (((T >> 8) + T) >> 8);\r
111         auto xyxy       = s8_x(((t1 >> 8) + t1) >> 8);      \r
112         auto yxyx       = s8_x((t2 >> 8) + t2);    \r
113         auto argb   = s8_x::blend(xyxy, yxyx, s8_x(-1, 0, -1, 0));\r
114 \r
115         return s8_x(s) + (d - argb);\r
116 }\r
117         \r
118 template<typename temporal, typename alignment>\r
119 static void kernel(uint8_t* dest, const uint8_t* source, size_t count)\r
120 {                       \r
121         using namespace xmm;\r
122 \r
123         for(auto n = 0; n < count; n += 32)    \r
124         {\r
125                 auto s0 = s8_x::load<temporal_tag, alignment>(dest+n+0);\r
126                 auto s1 = s8_x::load<temporal_tag, alignment>(dest+n+16);\r
127 \r
128                 auto d0 = s8_x::load<temporal_tag, alignment>(source+n+0);\r
129                 auto d1 = s8_x::load<temporal_tag, alignment>(source+n+16);\r
130                 \r
131                 auto argb0 = blend(d0, s0);\r
132                 auto argb1 = blend(d1, s1);\r
133 \r
134                 s8_x::store<temporal, alignment>(argb0, dest+n+0 );\r
135                 s8_x::store<temporal, alignment>(argb1, dest+n+16);\r
136         } \r
137 }\r
138 \r
139 template<typename temporal>\r
140 static void kernel(uint8_t* dest, const uint8_t* source, size_t count)\r
141 {                       \r
142         using namespace xmm;\r
143 \r
144         if(reinterpret_cast<int>(dest) % 16 != 0 || reinterpret_cast<int>(source) % 16 != 0)\r
145                 kernel<temporal_tag, unaligned_tag>(dest, source, count);\r
146         else\r
147                 kernel<temporal_tag, aligned_tag>(dest, source, count);\r
148 }\r
149 \r
150 class image_renderer\r
151 {\r
152         std::pair<std::vector<item>, boost::shared_future<boost::iterator_range<const uint8_t*>>>               last_image_;\r
153         tbb::concurrent_unordered_map<int, tbb::concurrent_bounded_queue<std::shared_ptr<SwsContext>>>  sws_contexts_;\r
154         std::vector<spl::shared_ptr<host_buffer>>                                                                                                               temp_buffers_;\r
155 public: \r
156         boost::shared_future<boost::iterator_range<const uint8_t*>> operator()(std::vector<item> items, const core::video_format_desc& format_desc)\r
157         {       \r
158                 if(last_image_.first == items && last_image_.second.has_value())\r
159                         return last_image_.second;\r
160 \r
161                 auto image      = render(items, format_desc);\r
162                 last_image_ = std::make_pair(std::move(items), image);\r
163                 return image;\r
164         }\r
165 \r
166 private:\r
167         boost::shared_future<boost::iterator_range<const uint8_t*>> render(std::vector<item> items, const core::video_format_desc& format_desc)\r
168         {\r
169                 convert(items, format_desc.width, format_desc.height);          \r
170                 \r
171                 auto result = spl::make_shared<host_buffer>(format_desc.size, 0);\r
172                 if(format_desc.field_mode != core::field_mode::progressive)\r
173                 {                       \r
174                         draw(items, result->data(), format_desc.width, format_desc.height, core::field_mode::upper);\r
175                         draw(items, result->data(), format_desc.width, format_desc.height, core::field_mode::lower);\r
176                 }\r
177                 else\r
178                 {\r
179                         draw(items, result->data(), format_desc.width, format_desc.height,  core::field_mode::progressive);\r
180                 }\r
181                 \r
182                 return async(launch_policy::deferred, [=]\r
183                 {\r
184                         return boost::iterator_range<const uint8_t*>(result->data(), result->data() + format_desc.size);\r
185                 });             \r
186         }\r
187 \r
188         void draw(std::vector<item> items, uint8_t* dest, int width, int height, core::field_mode field_mode)\r
189         {               \r
190                 BOOST_FOREACH(auto& item, items)\r
191                         item.transform.field_mode &= field_mode;\r
192                 \r
193                 // Remove empty items.\r
194                 boost::range::remove_erase_if(items, [&](const item& item)\r
195                 {\r
196                         return item.transform.field_mode == core::field_mode::empty;\r
197                 });\r
198                 \r
199                 // Remove first field stills.\r
200                 boost::range::remove_erase_if(items, [&](const item& item)\r
201                 {\r
202                         return item.transform.is_still && item.transform.field_mode == field_mode; // only us last field for stills.\r
203                 });\r
204 \r
205                 if(items.empty())\r
206                         return;\r
207                 \r
208                 auto start = field_mode == core::field_mode::lower ? 1 : 0;\r
209                 auto step  = field_mode == core::field_mode::progressive ? 1 : 2;\r
210                 \r
211                 // TODO: Add support for fill translations.\r
212                 // TODO: Add support for mask rect.\r
213                 // TODO: Add support for opacity.\r
214                 // TODO: Add support for mix transition.\r
215                 // TODO: Add support for push transition.\r
216                 // TODO: Add support for wipe transition.\r
217                 // TODO: Add support for slide transition.\r
218                 tbb::parallel_for(tbb::blocked_range<int>(0, height/step), [&](const tbb::blocked_range<int>& r)\r
219                 {\r
220                         for(auto i = r.begin(); i != r.end(); ++i)\r
221                         {\r
222                                 auto y = i*step+start;\r
223 \r
224                                 for(std::size_t n = 0; n < items.size()-1; ++n)\r
225                                         kernel<xmm::temporal_tag>(dest + y*width*4, items[n].data.at(0) + y*width*4, width*4);\r
226                                 \r
227                                 std::size_t n = items.size()-1;                         \r
228                                 kernel<xmm::nontemporal_tag>(dest + y*width*4, items[n].data.at(0) + y*width*4, width*4);\r
229                         }\r
230 \r
231                         _mm_mfence();\r
232                 });\r
233 \r
234                 temp_buffers_.clear();\r
235         }\r
236                 \r
237         void convert(std::vector<item>& source_items, int width, int height)\r
238         {\r
239                 std::set<std::array<const uint8_t*, 4>> buffers;\r
240 \r
241                 BOOST_FOREACH(auto& item, source_items)\r
242                         buffers.insert(item.data);\r
243                 \r
244                 auto dest_items = source_items;\r
245 \r
246                 tbb::parallel_for_each(buffers.begin(), buffers.end(), [&](const std::array<const uint8_t*, 4>& data)\r
247                 {                       \r
248                         auto pix_desc = std::find_if(source_items.begin(), source_items.end(), [&](const item& item){return item.data == data;})->pix_desc;\r
249 \r
250                         if(pix_desc.format == core::pixel_format::bgra && \r
251                                 pix_desc.planes.at(0).width == width &&\r
252                                 pix_desc.planes.at(0).height == height)\r
253                                 return;\r
254 \r
255                         std::array<uint8_t*, 4> data2 = {};\r
256                         for(std::size_t n = 0; n < data.size(); ++n)\r
257                                 data2.at(n) = const_cast<uint8_t*>(data[n]);\r
258 \r
259                         auto input_av_frame = ffmpeg::make_av_frame(data2, pix_desc);\r
260                                                                 \r
261                         int key = ((input_av_frame->width << 22) & 0xFFC00000) | ((input_av_frame->height << 6) & 0x003FC000) | ((input_av_frame->format << 7) & 0x00007F00);\r
262                                                 \r
263                         auto& pool = sws_contexts_[key];\r
264 \r
265                         std::shared_ptr<SwsContext> sws_context;\r
266                         if(!pool.try_pop(sws_context))\r
267                         {\r
268                                 double param;\r
269                                 sws_context.reset(sws_getContext(input_av_frame->width, input_av_frame->height, static_cast<PixelFormat>(input_av_frame->format), width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);\r
270                         }\r
271                         \r
272                         if(!sws_context)                                \r
273                                 BOOST_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling context.") << boost::errinfo_api_function("sws_getContext"));                           \r
274                 \r
275                         auto dest_frame = spl::make_shared<host_buffer>(width*height*4);\r
276 \r
277                         {\r
278                                 spl::shared_ptr<AVFrame> dest_av_frame(avcodec_alloc_frame(), av_free); \r
279                                 avcodec_get_frame_defaults(dest_av_frame.get());                        \r
280                                 avpicture_fill(reinterpret_cast<AVPicture*>(dest_av_frame.get()), dest_frame->data(), PIX_FMT_BGRA, width, height);\r
281                                 \r
282                                 sws_scale(sws_context.get(), input_av_frame->data, input_av_frame->linesize, 0, input_av_frame->height, dest_av_frame->data, dest_av_frame->linesize);                          \r
283                                 pool.push(sws_context);\r
284                         }\r
285 \r
286                         temp_buffers_.push_back(dest_frame);\r
287                 \r
288                         for(std::size_t n = 0; n < source_items.size(); ++n)\r
289                         {\r
290                                 if(source_items[n].data == data)\r
291                                 {\r
292                                         dest_items[n].data.assign(0);\r
293                                         dest_items[n].data[0]                   = dest_frame->data();\r
294                                         dest_items[n].pix_desc                  = core::pixel_format_desc(core::pixel_format::bgra);\r
295                                         dest_items[n].pix_desc.planes   = boost::assign::list_of(core::pixel_format_desc::plane(width, height, 4));\r
296                                         dest_items[n].transform                 = source_items[n].transform;\r
297                                 }\r
298                         }\r
299                 });     \r
300 \r
301                 source_items = std::move(dest_items);\r
302         }\r
303 };\r
304                 \r
305 struct image_mixer::impl : boost::noncopyable\r
306 {       \r
307         image_renderer                                          renderer_;\r
308         std::vector<core::image_transform>      transform_stack_;\r
309         std::vector<item>                                       items_; // layer/stream/items\r
310 public:\r
311         impl() \r
312                 : transform_stack_(1)   \r
313         {\r
314                 CASPAR_LOG(info) << L"Initialized Streaming SIMD Extensions Accelerated CPU Image Mixer";\r
315         }\r
316 \r
317         void begin_layer(core::blend_mode blend_mode)\r
318         {\r
319         }\r
320                 \r
321         void push(const core::frame_transform& transform)\r
322         {\r
323                 transform_stack_.push_back(transform_stack_.back()*transform.image_transform);\r
324         }\r
325                 \r
326         void visit(const core::data_frame& frame)\r
327         {                       \r
328                 if(frame.pixel_format_desc().format == core::pixel_format::invalid)\r
329                         return;\r
330 \r
331                 if(frame.pixel_format_desc().planes.empty())\r
332                         return;\r
333 \r
334                 if(transform_stack_.back().field_mode == core::field_mode::empty)\r
335                         return;\r
336 \r
337                 item item;\r
338                 item.pix_desc   = frame.pixel_format_desc();\r
339                 item.transform  = transform_stack_.back();\r
340                 for(int n = 0; n < item.pix_desc.planes.size(); ++n)\r
341                         item.data.at(n) = frame.image_data(n).begin();          \r
342 \r
343                 items_.push_back(item);\r
344         }\r
345 \r
346         void pop()\r
347         {\r
348                 transform_stack_.pop_back();\r
349         }\r
350 \r
351         void end_layer()\r
352         {               \r
353         }\r
354         \r
355         boost::shared_future<boost::iterator_range<const uint8_t*>> render(const core::video_format_desc& format_desc)\r
356         {\r
357                 return renderer_(std::move(items_), format_desc);\r
358         }\r
359         \r
360         virtual spl::unique_ptr<core::data_frame> create_frame(const void* tag, const core::pixel_format_desc& desc, double frame_rate, core::field_mode field_mode)\r
361         {\r
362                 return spl::make_unique<cpu::data_frame>(tag, desc, frame_rate, field_mode);\r
363         }\r
364 };\r
365 \r
366 image_mixer::image_mixer() : impl_(new impl()){}\r
367 void image_mixer::push(const core::frame_transform& transform){impl_->push(transform);}\r
368 void image_mixer::visit(const core::data_frame& frame){impl_->visit(frame);}\r
369 void image_mixer::pop(){impl_->pop();}\r
370 boost::shared_future<boost::iterator_range<const uint8_t*>> image_mixer::operator()(const core::video_format_desc& format_desc){return impl_->render(format_desc);}\r
371 void image_mixer::begin_layer(core::blend_mode blend_mode){impl_->begin_layer(blend_mode);}\r
372 void image_mixer::end_layer(){impl_->end_layer();}\r
373 spl::unique_ptr<core::data_frame> image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc, double frame_rate, core::field_mode field_mode) {return impl_->create_frame(tag, desc, frame_rate, field_mode);}\r
374 \r
375 }}}