]> git.sesse.net Git - casparcg/blob - accelerator/cpu/image/image_mixer.cpp
git-svn-id: https://casparcg.svn.sourceforge.net/svnroot/casparcg/server/branches...
[casparcg] / accelerator / cpu / image / image_mixer.cpp
1 /*\r
2 * Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>\r
3 *\r
4 * This file is part of CasparCG (www.casparcg.com).\r
5 *\r
6 * CasparCG is free software: you can redistribute it and/or modify\r
7 * it under the terms of the GNU General Public License as published by\r
8 * the Free Software Foundation, either version 3 of the License, or\r
9 * (at your option) any later version.\r
10 *\r
11 * CasparCG is distributed in the hope that it will be useful,\r
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
14 * GNU General Public License for more details.\r
15 *\r
16 * You should have received a copy of the GNU General Public License\r
17 * along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
18 *\r
19 * Author: Robert Nagy, ronag89@gmail.com\r
20 */\r
21 \r
22 #include "../../stdafx.h"\r
23 \r
24 #include "image_mixer.h"\r
25 \r
26 #include "../util/data_frame.h"\r
27 #include "../util/xmm.h"\r
28 \r
29 #include <common/assert.h>\r
30 #include <common/gl/gl_check.h>\r
31 #include <common/concurrency/async.h>\r
32 #include <common/memory/memcpy.h>\r
33 \r
34 #include <core/frame/data_frame.h>\r
35 #include <core/frame/frame_transform.h>\r
36 #include <core/frame/pixel_format.h>\r
37 #include <core/video_format.h>\r
38 \r
39 #include <modules/ffmpeg/producer/util/util.h>\r
40 \r
41 #include <asmlib.h>\r
42 \r
43 #include <gl/glew.h>\r
44 \r
45 #include <tbb/cache_aligned_allocator.h>\r
46 #include <tbb/parallel_for_each.h>\r
47 #include <tbb/concurrent_queue.h>\r
48 \r
49 #include <boost/assign.hpp>\r
50 #include <boost/foreach.hpp>\r
51 #include <boost/range.hpp>\r
52 #include <boost/range/algorithm_ext/erase.hpp>\r
53 #include <boost/thread/future.hpp>\r
54 \r
55 #include <algorithm>\r
56 #include <stdint.h>\r
57 #include <vector>\r
58 \r
59 #if defined(_MSC_VER)\r
60 #pragma warning (push)\r
61 #pragma warning (disable : 4244)\r
62 #endif\r
63 extern "C" \r
64 {\r
65         #include <libswscale/swscale.h>\r
66         #include <libavcodec/avcodec.h>\r
67         #include <libavformat/avformat.h>\r
68 }\r
69 #if defined(_MSC_VER)\r
70 #pragma warning (pop)\r
71 #endif\r
72 \r
73 namespace caspar { namespace accelerator { namespace cpu {\r
74                 \r
75 struct item\r
76 {\r
77         core::pixel_format_desc                 pix_desc;\r
78         std::array<const uint8_t*, 4>   data;\r
79         core::image_transform                   transform;\r
80 \r
81         item()\r
82                 : pix_desc(core::pixel_format::invalid)\r
83         {\r
84                 data.fill(0);\r
85         }\r
86 };\r
87 \r
88 bool operator==(const item& lhs, const item& rhs)\r
89 {\r
90         return lhs.data == rhs.data && lhs.transform == rhs.transform;\r
91 }\r
92 \r
93 bool operator!=(const item& lhs, const item& rhs)\r
94 {\r
95         return !(lhs == rhs);\r
96 }\r
97         \r
98 inline xmm::s8_x blend(xmm::s8_x d, xmm::s8_x s)\r
99 {       \r
100         using namespace xmm;\r
101                 \r
102         // T(S, D) = S * D[A] + 0x80\r
103         auto aaaa   = s8_x::shuffle(d, s8_x(15, 15, 15, 15, 11, 11, 11, 11, 7, 7, 7, 7, 3, 3, 3, 3));\r
104         d                       = s8_x(u8_x::min(u8_x(d), u8_x(aaaa))); // overflow guard\r
105 \r
106         auto xaxa       = s16_x(aaaa) >> 8;             \r
107                               \r
108         auto t1         = s16_x::multiply_low(s16_x(s) & 0x00FF, xaxa) + 0x80;    \r
109         auto t2         = s16_x::multiply_low(s16_x(s) >> 8    , xaxa) + 0x80;\r
110                 \r
111         // C(S, D) = S + D - (((T >> 8) + T) >> 8);\r
112         auto xyxy       = s8_x(((t1 >> 8) + t1) >> 8);      \r
113         auto yxyx       = s8_x((t2 >> 8) + t2);    \r
114         auto argb   = s8_x::blend(xyxy, yxyx, s8_x(-1, 0, -1, 0));\r
115 \r
116         return s8_x(s) + (d - argb);\r
117 }\r
118         \r
119 template<typename temporal, typename alignment>\r
120 static void kernel(uint8_t* dest, const uint8_t* source, size_t count)\r
121 {                       \r
122         using namespace xmm;\r
123 \r
124         for(auto n = 0; n < count; n += 32)    \r
125         {\r
126                 auto s0 = s8_x::load<temporal_tag, alignment>(dest+n+0);\r
127                 auto s1 = s8_x::load<temporal_tag, alignment>(dest+n+16);\r
128 \r
129                 auto d0 = s8_x::load<temporal_tag, alignment>(source+n+0);\r
130                 auto d1 = s8_x::load<temporal_tag, alignment>(source+n+16);\r
131                 \r
132                 auto argb0 = blend(d0, s0);\r
133                 auto argb1 = blend(d1, s1);\r
134 \r
135                 s8_x::store<temporal, alignment>(argb0, dest+n+0 );\r
136                 s8_x::store<temporal, alignment>(argb1, dest+n+16);\r
137         } \r
138 }\r
139 \r
140 template<typename temporal>\r
141 static void kernel(uint8_t* dest, const uint8_t* source, size_t count)\r
142 {                       \r
143         using namespace xmm;\r
144 \r
145         if(reinterpret_cast<int>(dest) % 16 != 0 || reinterpret_cast<int>(source) % 16 != 0)\r
146                 kernel<temporal_tag, unaligned_tag>(dest, source, count);\r
147         else\r
148                 kernel<temporal_tag, aligned_tag>(dest, source, count);\r
149 }\r
150 \r
151 class image_renderer\r
152 {\r
153         std::pair<std::vector<item>, boost::shared_future<boost::iterator_range<const uint8_t*>>>               last_image_;\r
154         tbb::concurrent_unordered_map<int, tbb::concurrent_bounded_queue<std::shared_ptr<SwsContext>>>  sws_contexts_;\r
155         tbb::concurrent_bounded_queue<spl::shared_ptr<host_buffer>>                                                                             temp_buffers_;\r
156 public: \r
157         boost::shared_future<boost::iterator_range<const uint8_t*>> operator()(std::vector<item> items, const core::video_format_desc& format_desc)\r
158         {       \r
159                 if(last_image_.first == items && last_image_.second.has_value())\r
160                         return last_image_.second;\r
161 \r
162                 auto image      = render(items, format_desc);\r
163                 last_image_ = std::make_pair(std::move(items), image);\r
164                 return image;\r
165         }\r
166 \r
167 private:\r
168         boost::shared_future<boost::iterator_range<const uint8_t*>> render(std::vector<item> items, const core::video_format_desc& format_desc)\r
169         {\r
170                 convert(items, format_desc.width, format_desc.height);          \r
171                 \r
172                 auto result = spl::make_shared<host_buffer>(format_desc.size, 0);\r
173                 if(format_desc.field_mode != core::field_mode::progressive)\r
174                 {                       \r
175                         draw(items, result->data(), format_desc.width, format_desc.height, core::field_mode::upper);\r
176                         draw(items, result->data(), format_desc.width, format_desc.height, core::field_mode::lower);\r
177                 }\r
178                 else\r
179                 {\r
180                         draw(items, result->data(), format_desc.width, format_desc.height,  core::field_mode::progressive);\r
181                 }\r
182                 \r
183                 return async(launch_policy::deferred, [=]\r
184                 {\r
185                         return boost::iterator_range<const uint8_t*>(result->data(), result->data() + format_desc.size);\r
186                 });             \r
187         }\r
188 \r
189         void draw(std::vector<item> items, uint8_t* dest, int width, int height, core::field_mode field_mode)\r
190         {               \r
191                 BOOST_FOREACH(auto& item, items)\r
192                         item.transform.field_mode &= field_mode;\r
193                 \r
194                 // Remove empty items.\r
195                 boost::range::remove_erase_if(items, [&](const item& item)\r
196                 {\r
197                         return item.transform.field_mode == core::field_mode::empty;\r
198                 });\r
199                 \r
200                 // Remove first field stills.\r
201                 boost::range::remove_erase_if(items, [&](const item& item)\r
202                 {\r
203                         return item.transform.is_still && item.transform.field_mode == field_mode; // only us last field for stills.\r
204                 });\r
205 \r
206                 if(items.empty())\r
207                         return;\r
208                 \r
209                 auto start = field_mode == core::field_mode::lower ? 1 : 0;\r
210                 auto step  = field_mode == core::field_mode::progressive ? 1 : 2;\r
211                 \r
212                 // TODO: Add support for fill translations.\r
213                 // TODO: Add support for mask rect.\r
214                 // TODO: Add support for opacity.\r
215                 // TODO: Add support for mix transition.\r
216                 // TODO: Add support for push transition.\r
217                 // TODO: Add support for wipe transition.\r
218                 // TODO: Add support for slide transition.\r
219                 tbb::parallel_for(tbb::blocked_range<int>(0, height/step), [&](const tbb::blocked_range<int>& r)\r
220                 {\r
221                         for(auto i = r.begin(); i != r.end(); ++i)\r
222                         {\r
223                                 auto y = i*step+start;\r
224 \r
225                                 for(std::size_t n = 0; n < items.size()-1; ++n)\r
226                                         kernel<xmm::temporal_tag>(dest + y*width*4, items[n].data.at(0) + y*width*4, width*4);\r
227                                 \r
228                                 std::size_t n = items.size()-1;                         \r
229                                 kernel<xmm::nontemporal_tag>(dest + y*width*4, items[n].data.at(0) + y*width*4, width*4);\r
230                         }\r
231 \r
232                         _mm_mfence();\r
233                 });\r
234 \r
235                 temp_buffers_.clear();\r
236         }\r
237                 \r
238         void convert(std::vector<item>& source_items, int width, int height)\r
239         {\r
240                 std::set<std::array<const uint8_t*, 4>> buffers;\r
241 \r
242                 BOOST_FOREACH(auto& item, source_items)\r
243                         buffers.insert(item.data);\r
244                 \r
245                 auto dest_items = source_items;\r
246 \r
247                 tbb::parallel_for_each(buffers.begin(), buffers.end(), [&](const std::array<const uint8_t*, 4>& data)\r
248                 {                       \r
249                         auto pix_desc = std::find_if(source_items.begin(), source_items.end(), [&](const item& item){return item.data == data;})->pix_desc;\r
250 \r
251                         if(pix_desc.format == core::pixel_format::bgra && \r
252                                 pix_desc.planes.at(0).width == width &&\r
253                                 pix_desc.planes.at(0).height == height)\r
254                                 return;\r
255 \r
256                         std::array<uint8_t*, 4> data2 = {};\r
257                         for(std::size_t n = 0; n < data.size(); ++n)\r
258                                 data2.at(n) = const_cast<uint8_t*>(data[n]);\r
259 \r
260                         auto input_av_frame = ffmpeg::make_av_frame(data2, pix_desc);\r
261                                                                 \r
262                         int key = ((input_av_frame->width << 22) & 0xFFC00000) | ((input_av_frame->height << 6) & 0x003FC000) | ((input_av_frame->format << 7) & 0x00007F00);\r
263                                                 \r
264                         auto& pool = sws_contexts_[key];\r
265 \r
266                         std::shared_ptr<SwsContext> sws_context;\r
267                         if(!pool.try_pop(sws_context))\r
268                         {\r
269                                 double param;\r
270                                 sws_context.reset(sws_getContext(input_av_frame->width, input_av_frame->height, static_cast<PixelFormat>(input_av_frame->format), width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);\r
271                         }\r
272                         \r
273                         if(!sws_context)                                \r
274                                 BOOST_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling context.") << boost::errinfo_api_function("sws_getContext"));                           \r
275                 \r
276                         auto dest_frame = spl::make_shared<host_buffer>(width*height*4);\r
277                         temp_buffers_.push(dest_frame);\r
278 \r
279                         {\r
280                                 spl::shared_ptr<AVFrame> dest_av_frame(avcodec_alloc_frame(), av_free); \r
281                                 avcodec_get_frame_defaults(dest_av_frame.get());                        \r
282                                 avpicture_fill(reinterpret_cast<AVPicture*>(dest_av_frame.get()), dest_frame->data(), PIX_FMT_BGRA, width, height);\r
283                                 \r
284                                 sws_scale(sws_context.get(), input_av_frame->data, input_av_frame->linesize, 0, input_av_frame->height, dest_av_frame->data, dest_av_frame->linesize);                          \r
285                                 pool.push(sws_context);\r
286                         }\r
287                                         \r
288                         for(std::size_t n = 0; n < source_items.size(); ++n)\r
289                         {\r
290                                 if(source_items[n].data == data)\r
291                                 {\r
292                                         dest_items[n].data.assign(0);\r
293                                         dest_items[n].data[0]                   = dest_frame->data();\r
294                                         dest_items[n].pix_desc                  = core::pixel_format_desc(core::pixel_format::bgra);\r
295                                         dest_items[n].pix_desc.planes   = boost::assign::list_of(core::pixel_format_desc::plane(width, height, 4));\r
296                                         dest_items[n].transform                 = source_items[n].transform;\r
297                                 }\r
298                         }\r
299                 });     \r
300 \r
301                 source_items = std::move(dest_items);\r
302         }\r
303 };\r
304                 \r
305 struct image_mixer::impl : boost::noncopyable\r
306 {       \r
307         image_renderer                                          renderer_;\r
308         std::vector<core::image_transform>      transform_stack_;\r
309         std::vector<item>                                       items_; // layer/stream/items\r
310 public:\r
311         impl() \r
312                 : transform_stack_(1)   \r
313         {\r
314                 CASPAR_LOG(info) << L"Initialized Streaming SIMD Extensions Accelerated CPU Image Mixer";\r
315         }\r
316 \r
317         void begin_layer(core::blend_mode blend_mode)\r
318         {\r
319         }\r
320                 \r
321         void push(const core::frame_transform& transform)\r
322         {\r
323                 transform_stack_.push_back(transform_stack_.back()*transform.image_transform);\r
324         }\r
325                 \r
326         void visit(const core::data_frame& frame)\r
327         {                       \r
328                 if(frame.pixel_format_desc().format == core::pixel_format::invalid)\r
329                         return;\r
330 \r
331                 if(frame.pixel_format_desc().planes.empty())\r
332                         return;\r
333                 \r
334                 if(frame.pixel_format_desc().planes.at(0).size < 16)\r
335                         return;\r
336 \r
337                 if(transform_stack_.back().field_mode == core::field_mode::empty)\r
338                         return;\r
339 \r
340                 item item;\r
341                 item.pix_desc   = frame.pixel_format_desc();\r
342                 item.transform  = transform_stack_.back();\r
343                 for(int n = 0; n < item.pix_desc.planes.size(); ++n)\r
344                         item.data.at(n) = frame.image_data(n).begin();          \r
345 \r
346                 items_.push_back(item);\r
347         }\r
348 \r
349         void pop()\r
350         {\r
351                 transform_stack_.pop_back();\r
352         }\r
353 \r
354         void end_layer()\r
355         {               \r
356         }\r
357         \r
358         boost::shared_future<boost::iterator_range<const uint8_t*>> render(const core::video_format_desc& format_desc)\r
359         {\r
360                 return renderer_(std::move(items_), format_desc);\r
361         }\r
362         \r
363         virtual spl::unique_ptr<core::data_frame> create_frame(const void* tag, const core::pixel_format_desc& desc, double frame_rate, core::field_mode field_mode)\r
364         {\r
365                 return spl::make_unique<cpu::data_frame>(tag, desc, frame_rate, field_mode);\r
366         }\r
367 };\r
368 \r
369 image_mixer::image_mixer() : impl_(new impl()){}\r
370 void image_mixer::push(const core::frame_transform& transform){impl_->push(transform);}\r
371 void image_mixer::visit(const core::data_frame& frame){impl_->visit(frame);}\r
372 void image_mixer::pop(){impl_->pop();}\r
373 boost::shared_future<boost::iterator_range<const uint8_t*>> image_mixer::operator()(const core::video_format_desc& format_desc){return impl_->render(format_desc);}\r
374 void image_mixer::begin_layer(core::blend_mode blend_mode){impl_->begin_layer(blend_mode);}\r
375 void image_mixer::end_layer(){impl_->end_layer();}\r
376 spl::unique_ptr<core::data_frame> image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc, double frame_rate, core::field_mode field_mode) {return impl_->create_frame(tag, desc, frame_rate, field_mode);}\r
377 \r
378 }}}