]> git.sesse.net Git - casparcg/blob - accelerator/cpu/image/image_mixer.cpp
2.1.0: -frame_transform: Split frame_transform into image and audio-transforms.
[casparcg] / accelerator / cpu / image / image_mixer.cpp
1 /*\r
2 * Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>\r
3 *\r
4 * This file is part of CasparCG (www.casparcg.com).\r
5 *\r
6 * CasparCG is free software: you can redistribute it and/or modify\r
7 * it under the terms of the GNU General Public License as published by\r
8 * the Free Software Foundation, either version 3 of the License, or\r
9 * (at your option) any later version.\r
10 *\r
11 * CasparCG is distributed in the hope that it will be useful,\r
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
14 * GNU General Public License for more details.\r
15 *\r
16 * You should have received a copy of the GNU General Public License\r
17 * along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
18 *\r
19 * Author: Robert Nagy, ronag89@gmail.com\r
20 */\r
21 \r
22 #include "../../stdafx.h"\r
23 \r
24 #include "image_mixer.h"\r
25 \r
26 #include "../util/write_frame.h"\r
27 #include "../util/xmm.h"\r
28 \r
29 #include <common/assert.h>\r
30 #include <common/gl/gl_check.h>\r
31 #include <common/concurrency/async.h>\r
32 #include <common/memory/memcpy.h>\r
33 \r
34 #include <core/frame/write_frame.h>\r
35 #include <core/frame/frame_transform.h>\r
36 #include <core/frame/pixel_format.h>\r
37 #include <core/video_format.h>\r
38 \r
39 #include <modules/ffmpeg/producer/util/util.h>\r
40 \r
41 #include <asmlib.h>\r
42 \r
43 #include <gl/glew.h>\r
44 \r
45 #include <tbb/cache_aligned_allocator.h>\r
46 #include <tbb/parallel_for_each.h>\r
47 \r
48 #include <boost/assign.hpp>\r
49 #include <boost/foreach.hpp>\r
50 #include <boost/range.hpp>\r
51 #include <boost/range/algorithm_ext/erase.hpp>\r
52 #include <boost/thread/future.hpp>\r
53 \r
54 #include <algorithm>\r
55 #include <stdint.h>\r
56 #include <vector>\r
57 \r
58 #if defined(_MSC_VER)\r
59 #pragma warning (push)\r
60 #pragma warning (disable : 4244)\r
61 #endif\r
62 extern "C" \r
63 {\r
64         #include <libswscale/swscale.h>\r
65         #include <libavcodec/avcodec.h>\r
66         #include <libavformat/avformat.h>\r
67 }\r
68 #if defined(_MSC_VER)\r
69 #pragma warning (pop)\r
70 #endif\r
71 \r
72 namespace caspar { namespace accelerator { namespace cpu {\r
73                 \r
74 struct item\r
75 {\r
76         core::pixel_format_desc                                         pix_desc;\r
77         std::vector<spl::shared_ptr<host_buffer>>       buffers;\r
78         core::image_transform                                           transform;\r
79 \r
80         item()\r
81                 : pix_desc(core::pixel_format::invalid)\r
82         {\r
83         }\r
84 };\r
85 \r
86 bool operator==(const item& lhs, const item& rhs)\r
87 {\r
88         return lhs.buffers == rhs.buffers && lhs.transform == rhs.transform;\r
89 }\r
90 \r
91 bool operator!=(const item& lhs, const item& rhs)\r
92 {\r
93         return !(lhs == rhs);\r
94 }\r
95         \r
96 inline xmm::s8_x blend(xmm::s8_x d, xmm::s8_x s)\r
97 {       \r
98         using namespace xmm;\r
99                 \r
100         // T(S, D) = S * D[A] + 0x80\r
101         auto aaaa   = s8_x::shuffle(d, s8_x(15, 15, 15, 15, 11, 11, 11, 11, 7, 7, 7, 7, 3, 3, 3, 3));\r
102         d                       = s8_x(u8_x::min(u8_x(d), u8_x(aaaa))); // overflow guard\r
103 \r
104         auto xaxa       = s16_x(aaaa) >> 8;             \r
105                               \r
106         auto t1         = s16_x::multiply_low(s16_x(s) & 0x00FF, xaxa) + 0x80;    \r
107         auto t2         = s16_x::multiply_low(s16_x(s) >> 8    , xaxa) + 0x80;\r
108                 \r
109         // C(S, D) = S + D - (((T >> 8) + T) >> 8);\r
110         auto xyxy       = s8_x(((t1 >> 8) + t1) >> 8);      \r
111         auto yxyx       = s8_x((t2 >> 8) + t2);    \r
112         auto argb   = s8_x::blend(xyxy, yxyx, s8_x(-1, 0, -1, 0));\r
113 \r
114         return s8_x(s) + (d - argb);\r
115 }\r
116         \r
117 template<typename temporal, typename alignment>\r
118 static void kernel(uint8_t* dest, const uint8_t* source, size_t count)\r
119 {                       \r
120         using namespace xmm;\r
121 \r
122         for(auto n = 0; n < count; n += 32)    \r
123         {\r
124                 auto s0 = s8_x::load<temporal_tag, alignment>(dest+n+0);\r
125                 auto s1 = s8_x::load<temporal_tag, alignment>(dest+n+16);\r
126 \r
127                 auto d0 = s8_x::load<temporal_tag, alignment>(source+n+0);\r
128                 auto d1 = s8_x::load<temporal_tag, alignment>(source+n+16);\r
129                 \r
130                 auto argb0 = blend(d0, s0);\r
131                 auto argb1 = blend(d1, s1);\r
132 \r
133                 s8_x::store<temporal, alignment>(argb0, dest+n+0 );\r
134                 s8_x::store<temporal, alignment>(argb1, dest+n+16);\r
135         } \r
136 }\r
137 \r
138 template<typename temporal>\r
139 static void kernel(uint8_t* dest, const uint8_t* source, size_t count)\r
140 {                       \r
141         using namespace xmm;\r
142 \r
143         if(reinterpret_cast<int>(dest) % 16 != 0 || reinterpret_cast<int>(source) % 16 != 0)\r
144                 kernel<temporal_tag, unaligned_tag>(dest, source, count);\r
145         else\r
146                 kernel<temporal_tag, aligned_tag>(dest, source, count);\r
147 }\r
148 \r
149 class image_renderer\r
150 {\r
151         std::pair<std::vector<item>, boost::shared_future<boost::iterator_range<const uint8_t*>>>               last_image_;\r
152         tbb::concurrent_unordered_map<int, tbb::concurrent_bounded_queue<std::shared_ptr<SwsContext>>>  sws_contexts_;\r
153 public: \r
154         boost::shared_future<boost::iterator_range<const uint8_t*>> operator()(std::vector<item> items, const core::video_format_desc& format_desc)\r
155         {       \r
156                 if(last_image_.first == items && last_image_.second.has_value())\r
157                         return last_image_.second;\r
158 \r
159                 auto image      = render(items, format_desc);\r
160                 last_image_ = std::make_pair(std::move(items), image);\r
161                 return image;\r
162         }\r
163 \r
164 private:\r
165         boost::shared_future<boost::iterator_range<const uint8_t*>> render(std::vector<item> items, const core::video_format_desc& format_desc)\r
166         {\r
167                 convert(items, format_desc.width, format_desc.height);          \r
168                 \r
169                 auto result = spl::make_shared<host_buffer>(format_desc.size, 0);\r
170                 if(format_desc.field_mode != core::field_mode::progressive)\r
171                 {                       \r
172                         draw(items, result->data(), format_desc.width, format_desc.height, core::field_mode::upper);\r
173                         draw(items, result->data(), format_desc.width, format_desc.height, core::field_mode::lower);\r
174                 }\r
175                 else\r
176                 {\r
177                         draw(items, result->data(), format_desc.width, format_desc.height,  core::field_mode::progressive);\r
178                 }\r
179                 \r
180                 return async(launch_policy::deferred, [=]\r
181                 {\r
182                         return boost::iterator_range<const uint8_t*>(result->data(), result->data() + format_desc.size);\r
183                 });             \r
184         }\r
185 \r
186         void draw(std::vector<item> items, uint8_t* dest, int width, int height, core::field_mode field_mode)\r
187         {               \r
188                 BOOST_FOREACH(auto& item, items)\r
189                         item.transform.field_mode &= field_mode;\r
190                 \r
191                 // Remove empty items.\r
192                 boost::range::remove_erase_if(items, [&](const item& item)\r
193                 {\r
194                         return item.transform.field_mode == core::field_mode::empty;\r
195                 });\r
196                 \r
197                 // Remove first field stills.\r
198                 boost::range::remove_erase_if(items, [&](const item& item)\r
199                 {\r
200                         return item.transform.is_still && item.transform.field_mode == field_mode; // only us last field for stills.\r
201                 });\r
202 \r
203                 if(items.empty())\r
204                         return;\r
205                 \r
206                 auto start = field_mode == core::field_mode::lower ? 1 : 0;\r
207                 auto step  = field_mode == core::field_mode::progressive ? 1 : 2;\r
208                 \r
209                 // TODO: Add support for fill translations.\r
210                 // TODO: Add support for mask rect.\r
211                 // TODO: Add support for opacity.\r
212                 // TODO: Add support for mix transition.\r
213                 // TODO: Add support for push transition.\r
214                 // TODO: Add support for wipe transition.\r
215                 // TODO: Add support for slide transition.\r
216                 tbb::parallel_for(tbb::blocked_range<int>(0, height/step), [&](const tbb::blocked_range<int>& r)\r
217                 {\r
218                         for(auto i = r.begin(); i != r.end(); ++i)\r
219                         {\r
220                                 auto y = i*step+start;\r
221 \r
222                                 for(std::size_t n = 0; n < items.size()-1; ++n)\r
223                                         kernel<xmm::temporal_tag>(dest + y*width*4, items[n].buffers.at(0)->data() + y*width*4, width*4);\r
224                                 \r
225                                 std::size_t n = items.size()-1;                         \r
226                                 kernel<xmm::nontemporal_tag>(dest + y*width*4, items[n].buffers.at(0)->data() + y*width*4, width*4);\r
227                         }\r
228 \r
229                         _mm_mfence();\r
230                 });\r
231         }\r
232                 \r
233         void convert(std::vector<item>& source_items, int width, int height)\r
234         {\r
235                 std::set<std::vector<spl::shared_ptr<host_buffer>>> buffers;\r
236 \r
237                 BOOST_FOREACH(auto& item, source_items)\r
238                         buffers.insert(item.buffers);\r
239                 \r
240                 auto dest_items = source_items;\r
241 \r
242                 tbb::parallel_for_each(buffers.begin(), buffers.end(), [&](const std::vector<spl::shared_ptr<host_buffer>>& buffers)\r
243                 {                       \r
244                         auto pix_desc = std::find_if(source_items.begin(), source_items.end(), [&](const item& item){return item.buffers == buffers;})->pix_desc;\r
245 \r
246                         if(pix_desc.format == core::pixel_format::bgra && \r
247                                 pix_desc.planes.at(0).width == width &&\r
248                                 pix_desc.planes.at(0).height == height)\r
249                                 return;\r
250 \r
251                         auto input_av_frame = ffmpeg::make_av_frame(buffers, pix_desc);\r
252                                                                 \r
253                         int key = ((input_av_frame->width << 22) & 0xFFC00000) | ((input_av_frame->height << 6) & 0x003FC000) | ((input_av_frame->format << 7) & 0x00007F00);\r
254                                                 \r
255                         auto& pool = sws_contexts_[key];\r
256 \r
257                         std::shared_ptr<SwsContext> sws_context;\r
258                         if(!pool.try_pop(sws_context))\r
259                         {\r
260                                 double param;\r
261                                 sws_context.reset(sws_getContext(input_av_frame->width, input_av_frame->height, static_cast<PixelFormat>(input_av_frame->format), width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);\r
262                         }\r
263                         \r
264                         if(!sws_context)                                \r
265                                 BOOST_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling context.") << boost::errinfo_api_function("sws_getContext"));                           \r
266                 \r
267                         auto dest_frame = spl::make_shared<host_buffer>(width*height*4);\r
268 \r
269                         {\r
270                                 spl::shared_ptr<AVFrame> dest_av_frame(avcodec_alloc_frame(), av_free); \r
271                                 avcodec_get_frame_defaults(dest_av_frame.get());                        \r
272                                 avpicture_fill(reinterpret_cast<AVPicture*>(dest_av_frame.get()), dest_frame->data(), PIX_FMT_BGRA, width, height);\r
273                                 \r
274                                 sws_scale(sws_context.get(), input_av_frame->data, input_av_frame->linesize, 0, input_av_frame->height, dest_av_frame->data, dest_av_frame->linesize);                          \r
275                                 pool.push(sws_context);\r
276                         }\r
277                 \r
278                         for(std::size_t n = 0; n < source_items.size(); ++n)\r
279                         {\r
280                                 if(source_items[n].buffers == buffers)\r
281                                 {\r
282                                         dest_items[n].buffers                   = boost::assign::list_of(dest_frame);\r
283                                         dest_items[n].pix_desc                  = core::pixel_format_desc(core::pixel_format::bgra);\r
284                                         dest_items[n].pix_desc.planes   = boost::assign::list_of(core::pixel_format_desc::plane(width, height, 4));\r
285                                         dest_items[n].transform                 = source_items[n].transform;\r
286                                 }\r
287                         }\r
288                 });     \r
289 \r
290                 source_items = std::move(dest_items);\r
291         }\r
292 };\r
293                 \r
294 struct image_mixer::impl : boost::noncopyable\r
295 {       \r
296         image_renderer                                          renderer_;\r
297         std::vector<core::image_transform>      transform_stack_;\r
298         std::vector<item>                                       items_; // layer/stream/items\r
299 public:\r
300         impl() \r
301                 : transform_stack_(1)   \r
302         {\r
303                 CASPAR_LOG(info) << L"Initialized Streaming SIMD Extensions Accelerated CPU Image Mixer";\r
304         }\r
305 \r
306         void begin_layer(core::blend_mode blend_mode)\r
307         {\r
308         }\r
309                 \r
310         void push(const core::frame_transform& transform)\r
311         {\r
312                 transform_stack_.push_back(transform_stack_.back()*transform.image_transform);\r
313         }\r
314                 \r
315         void visit(const core::data_frame& frame2)\r
316         {                       \r
317                 auto frame = dynamic_cast<const write_frame*>(&frame2);\r
318                 if(frame == nullptr)\r
319                         return;\r
320 \r
321                 if(frame->get_pixel_format_desc().format == core::pixel_format::invalid)\r
322                         return;\r
323 \r
324                 if(frame->get_buffers().empty())\r
325                         return;\r
326 \r
327                 if(transform_stack_.back().field_mode == core::field_mode::empty)\r
328                         return;\r
329 \r
330                 item item;\r
331                 item.pix_desc                   = frame->get_pixel_format_desc();\r
332                 item.buffers                    = frame->get_buffers();                         \r
333                 item.transform                  = transform_stack_.back();\r
334 \r
335                 items_.push_back(item);\r
336         }\r
337 \r
338         void pop()\r
339         {\r
340                 transform_stack_.pop_back();\r
341         }\r
342 \r
343         void end_layer()\r
344         {               \r
345         }\r
346         \r
347         boost::shared_future<boost::iterator_range<const uint8_t*>> render(const core::video_format_desc& format_desc)\r
348         {\r
349                 return renderer_(std::move(items_), format_desc);\r
350         }\r
351         \r
352         virtual spl::shared_ptr<cpu::write_frame> create_frame(const void* tag, const core::pixel_format_desc& desc)\r
353         {\r
354                 return spl::make_shared<cpu::write_frame>(tag, desc);\r
355         }\r
356 };\r
357 \r
358 image_mixer::image_mixer() : impl_(new impl()){}\r
359 void image_mixer::push(const core::frame_transform& transform){impl_->push(transform);}\r
360 void image_mixer::visit(const core::data_frame& frame){impl_->visit(frame);}\r
361 void image_mixer::pop(){impl_->pop();}\r
362 boost::shared_future<boost::iterator_range<const uint8_t*>> image_mixer::operator()(const core::video_format_desc& format_desc){return impl_->render(format_desc);}\r
363 void image_mixer::begin_layer(core::blend_mode blend_mode){impl_->begin_layer(blend_mode);}\r
364 void image_mixer::end_layer(){impl_->end_layer();}\r
365 spl::shared_ptr<core::write_frame> image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc) {return impl_->create_frame(tag, desc);}\r
366 \r
367 }}}