]> git.sesse.net Git - casparcg/blob - accelerator/cpu/image/image_mixer.cpp
### Mayor refactoring. Simplified frame handling and image_mixer. Separated video...
[casparcg] / accelerator / cpu / image / image_mixer.cpp
1 /*\r
2 * Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>\r
3 *\r
4 * This file is part of CasparCG (www.casparcg.com).\r
5 *\r
6 * CasparCG is free software: you can redistribute it and/or modify\r
7 * it under the terms of the GNU General Public License as published by\r
8 * the Free Software Foundation, either version 3 of the License, or\r
9 * (at your option) any later version.\r
10 *\r
11 * CasparCG is distributed in the hope that it will be useful,\r
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
14 * GNU General Public License for more details.\r
15 *\r
16 * You should have received a copy of the GNU General Public License\r
17 * along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
18 *\r
19 * Author: Robert Nagy, ronag89@gmail.com\r
20 */\r
21 \r
22 #include "../../stdafx.h"\r
23 \r
24 #include "image_mixer.h"\r
25 \r
26 #include "../util/xmm.h"\r
27 \r
28 #include <common/assert.h>\r
29 #include <common/gl/gl_check.h>\r
30 #include <common/concurrency/async.h>\r
31 #include <common/memory/memcpy.h>\r
32 \r
33 #include <core/frame/frame.h>\r
34 #include <core/frame/frame_transform.h>\r
35 #include <core/frame/pixel_format.h>\r
36 #include <core/video_format.h>\r
37 \r
38 #include <modules/ffmpeg/producer/util/util.h>\r
39 \r
40 #include <asmlib.h>\r
41 \r
42 #include <gl/glew.h>\r
43 \r
44 #include <tbb/cache_aligned_allocator.h>\r
45 #include <tbb/parallel_for_each.h>\r
46 #include <tbb/concurrent_queue.h>\r
47 \r
48 #include <boost/assign.hpp>\r
49 #include <boost/foreach.hpp>\r
50 #include <boost/range.hpp>\r
51 #include <boost/range/algorithm_ext/erase.hpp>\r
52 #include <boost/thread/future.hpp>\r
53 \r
54 #include <algorithm>\r
55 #include <stdint.h>\r
56 #include <vector>\r
57 \r
58 #if defined(_MSC_VER)\r
59 #pragma warning (push)\r
60 #pragma warning (disable : 4244)\r
61 #endif\r
62 extern "C" \r
63 {\r
64         #include <libswscale/swscale.h>\r
65         #include <libavcodec/avcodec.h>\r
66         #include <libavformat/avformat.h>\r
67 }\r
68 #if defined(_MSC_VER)\r
69 #pragma warning (pop)\r
70 #endif\r
71 \r
72 namespace caspar { namespace accelerator { namespace cpu {\r
73                 \r
74 struct item\r
75 {\r
76         core::pixel_format_desc                 pix_desc;\r
77         std::array<const uint8_t*, 4>   data;\r
78         core::image_transform                   transform;\r
79 \r
80         item()\r
81                 : pix_desc(core::pixel_format::invalid)\r
82         {\r
83                 data.fill(0);\r
84         }\r
85 };\r
86 \r
87 bool operator==(const item& lhs, const item& rhs)\r
88 {\r
89         return lhs.data == rhs.data && lhs.transform == rhs.transform;\r
90 }\r
91 \r
92 bool operator!=(const item& lhs, const item& rhs)\r
93 {\r
94         return !(lhs == rhs);\r
95 }\r
96         \r
97 // 100% accurate blending with correct rounding.\r
98 inline xmm::s8_x blend(xmm::s8_x d, xmm::s8_x s)\r
99 {       \r
100         using namespace xmm;\r
101                 \r
102         // C(S, D) = S + D - (((T >> 8) + T) >> 8);\r
103         // T(S, D) = S * D[A] + 0x80\r
104 \r
105         auto aaaa   = s8_x::shuffle(d, s8_x(15, 15, 15, 15, 11, 11, 11, 11, 7, 7, 7, 7, 3, 3, 3, 3));\r
106         d                       = s8_x(u8_x::min(u8_x(d), u8_x(aaaa))); // Overflow guard. Some source files have color values which incorrectly exceed pre-multiplied alpha values, e.g. red(255) > alpha(254).\r
107 \r
108         auto xaxa       = s16_x(aaaa) >> 8;             \r
109                               \r
110         auto t1         = s16_x::multiply_low(s16_x(s) & 0x00FF, xaxa) + 0x80;    \r
111         auto t2         = s16_x::multiply_low(s16_x(s) >> 8    , xaxa) + 0x80;\r
112                 \r
113         auto xyxy       = s8_x(((t1 >> 8) + t1) >> 8);      \r
114         auto yxyx       = s8_x((t2 >> 8) + t2);    \r
115         auto argb   = s8_x::blend(xyxy, yxyx, s8_x(-1, 0, -1, 0));\r
116 \r
117         return s8_x(s) + (d - argb);\r
118 }\r
119         \r
120 template<typename temporal, typename alignment>\r
121 static void kernel(uint8_t* dest, const uint8_t* source, size_t count)\r
122 {                       \r
123         using namespace xmm;\r
124 \r
125         for(auto n = 0; n < count; n += 32)    \r
126         {\r
127                 auto s0 = s8_x::load<temporal_tag, alignment>(dest+n+0);\r
128                 auto s1 = s8_x::load<temporal_tag, alignment>(dest+n+16);\r
129 \r
130                 auto d0 = s8_x::load<temporal_tag, alignment>(source+n+0);\r
131                 auto d1 = s8_x::load<temporal_tag, alignment>(source+n+16);\r
132                 \r
133                 auto argb0 = blend(d0, s0);\r
134                 auto argb1 = blend(d1, s1);\r
135 \r
136                 s8_x::store<temporal, alignment>(argb0, dest+n+0 );\r
137                 s8_x::store<temporal, alignment>(argb1, dest+n+16);\r
138         } \r
139 }\r
140 \r
141 template<typename temporal>\r
142 static void kernel(uint8_t* dest, const uint8_t* source, size_t count)\r
143 {                       \r
144         using namespace xmm;\r
145 \r
146         if(reinterpret_cast<int>(dest) % 16 != 0 || reinterpret_cast<int>(source) % 16 != 0)\r
147                 kernel<temporal_tag, unaligned_tag>(dest, source, count);\r
148         else\r
149                 kernel<temporal_tag, aligned_tag>(dest, source, count);\r
150 }\r
151 \r
152 class image_renderer\r
153 {\r
154         tbb::concurrent_unordered_map<int, tbb::concurrent_bounded_queue<std::shared_ptr<SwsContext>>>  sws_devices_;\r
155         tbb::concurrent_bounded_queue<spl::shared_ptr<buffer>>                                                                                  temp_buffers_;\r
156 public: \r
157         boost::unique_future<core::const_array> operator()(std::vector<item> items, const core::video_format_desc& format_desc)\r
158         {       \r
159                 convert(items, format_desc.width, format_desc.height);          \r
160                 \r
161                 auto result = spl::make_shared<buffer>(format_desc.size, 0);\r
162                 if(format_desc.field_mode != core::field_mode::progressive)\r
163                 {                       \r
164                         draw(items, result->data(), format_desc.width, format_desc.height, core::field_mode::upper);\r
165                         draw(items, result->data(), format_desc.width, format_desc.height, core::field_mode::lower);\r
166                 }\r
167                 else\r
168                 {\r
169                         draw(items, result->data(), format_desc.width, format_desc.height,  core::field_mode::progressive);\r
170                 }\r
171 \r
172                 temp_buffers_.clear();\r
173                 \r
174                 return async(launch_policy::deferred, [=]\r
175                 {\r
176                         return core::const_array(result->data(), format_desc.size, result);\r
177                 });     \r
178         }\r
179 \r
180 private:\r
181 \r
182         void draw(std::vector<item> items, uint8_t* dest, std::size_t width, std::size_t height, core::field_mode field_mode)\r
183         {               \r
184                 BOOST_FOREACH(auto& item, items)\r
185                         item.transform.field_mode &= field_mode;\r
186                 \r
187                 // Remove empty items.\r
188                 boost::range::remove_erase_if(items, [&](const item& item)\r
189                 {\r
190                         return item.transform.field_mode == core::field_mode::empty;\r
191                 });\r
192                 \r
193                 // Remove first field stills.\r
194                 boost::range::remove_erase_if(items, [&](const item& item)\r
195                 {\r
196                         return item.transform.is_still && item.transform.field_mode == field_mode; // only us last field for stills.\r
197                 });\r
198 \r
199                 if(items.empty())\r
200                         return;\r
201                 \r
202                 auto start = field_mode == core::field_mode::lower ? 1 : 0;\r
203                 auto step  = field_mode == core::field_mode::progressive ? 1 : 2;\r
204                 \r
205                 // TODO: Add support for fill translations.\r
206                 // TODO: Add support for mask rect.\r
207                 // TODO: Add support for opacity.\r
208                 // TODO: Add support for mix transition.\r
209                 // TODO: Add support for push transition.\r
210                 // TODO: Add support for wipe transition.\r
211                 // TODO: Add support for slide transition.\r
212                 tbb::parallel_for(tbb::blocked_range<std::size_t>(0, height/step), [&](const tbb::blocked_range<std::size_t>& r)\r
213                 {\r
214                         for(auto i = r.begin(); i != r.end(); ++i)\r
215                         {\r
216                                 auto y = i*step+start;\r
217 \r
218                                 for(std::size_t n = 0; n < items.size()-1; ++n)\r
219                                         kernel<xmm::temporal_tag>(dest + y*width*4, items[n].data.at(0) + y*width*4, width*4);\r
220                                 \r
221                                 std::size_t n = items.size()-1;                         \r
222                                 kernel<xmm::nontemporal_tag>(dest + y*width*4, items[n].data.at(0) + y*width*4, width*4);\r
223                         }\r
224 \r
225                         _mm_mfence();\r
226                 });\r
227         }\r
228                 \r
229         void convert(std::vector<item>& source_items, int width, int height)\r
230         {\r
231                 std::set<std::array<const uint8_t*, 4>> buffers;\r
232 \r
233                 BOOST_FOREACH(auto& item, source_items)\r
234                         buffers.insert(item.data);\r
235                 \r
236                 auto dest_items = source_items;\r
237 \r
238                 tbb::parallel_for_each(buffers.begin(), buffers.end(), [&](const std::array<const uint8_t*, 4>& data)\r
239                 {                       \r
240                         auto pix_desc = std::find_if(source_items.begin(), source_items.end(), [&](const item& item){return item.data == data;})->pix_desc;\r
241 \r
242                         if(pix_desc.format == core::pixel_format::bgra && \r
243                                 pix_desc.planes.at(0).width == width &&\r
244                                 pix_desc.planes.at(0).height == height)\r
245                                 return;\r
246 \r
247                         std::array<uint8_t*, 4> data2 = {};\r
248                         for(std::size_t n = 0; n < data.size(); ++n)\r
249                                 data2.at(n) = const_cast<uint8_t*>(data[n]);\r
250 \r
251                         auto input_av_frame = ffmpeg::make_av_frame(data2, pix_desc);\r
252                                                                 \r
253                         int key = ((input_av_frame->width << 22) & 0xFFC00000) | ((input_av_frame->height << 6) & 0x003FC000) | ((input_av_frame->format << 7) & 0x00007F00);\r
254                                                 \r
255                         auto& pool = sws_devices_[key];\r
256 \r
257                         std::shared_ptr<SwsContext> sws_device;\r
258                         if(!pool.try_pop(sws_device))\r
259                         {\r
260                                 double param;\r
261                                 sws_device.reset(sws_getContext(input_av_frame->width, input_av_frame->height, static_cast<PixelFormat>(input_av_frame->format), width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);\r
262                         }\r
263                         \r
264                         if(!sws_device)                         \r
265                                 BOOST_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling device.") << boost::errinfo_api_function("sws_getContext"));                            \r
266                 \r
267                         auto dest_frame = spl::make_shared<buffer>(width*height*4);\r
268                         temp_buffers_.push(dest_frame);\r
269 \r
270                         {\r
271                                 spl::shared_ptr<AVFrame> dest_av_frame(avcodec_alloc_frame(), av_free); \r
272                                 avcodec_get_frame_defaults(dest_av_frame.get());                        \r
273                                 avpicture_fill(reinterpret_cast<AVPicture*>(dest_av_frame.get()), dest_frame->data(), PIX_FMT_BGRA, width, height);\r
274                                 \r
275                                 sws_scale(sws_device.get(), input_av_frame->data, input_av_frame->linesize, 0, input_av_frame->height, dest_av_frame->data, dest_av_frame->linesize);                           \r
276                                 pool.push(sws_device);\r
277                         }\r
278                                         \r
279                         for(std::size_t n = 0; n < source_items.size(); ++n)\r
280                         {\r
281                                 if(source_items[n].data == data)\r
282                                 {\r
283                                         dest_items[n].data.assign(0);\r
284                                         dest_items[n].data[0]                   = dest_frame->data();\r
285                                         dest_items[n].pix_desc                  = core::pixel_format_desc(core::pixel_format::bgra);\r
286                                         dest_items[n].pix_desc.planes   = boost::assign::list_of(core::pixel_format_desc::plane(width, height, 4));\r
287                                         dest_items[n].transform                 = source_items[n].transform;\r
288                                 }\r
289                         }\r
290                 });     \r
291 \r
292                 source_items = std::move(dest_items);\r
293         }\r
294 };\r
295                 \r
296 struct image_mixer::impl : boost::noncopyable\r
297 {       \r
298         image_renderer                                          renderer_;\r
299         std::vector<core::image_transform>      transform_stack_;\r
300         std::vector<item>                                       items_; // layer/stream/items\r
301 public:\r
302         impl() \r
303                 : transform_stack_(1)   \r
304         {\r
305                 CASPAR_LOG(info) << L"Initialized Streaming SIMD Extensions Accelerated CPU Image Mixer";\r
306         }\r
307 \r
308         void begin_layer(core::blend_mode blend_mode)\r
309         {\r
310         }\r
311                 \r
312         void push(const core::frame_transform& transform)\r
313         {\r
314                 transform_stack_.push_back(transform_stack_.back()*transform.image_transform);\r
315         }\r
316                 \r
317         void visit(const core::mutable_frame& frame)\r
318         {                       \r
319                 if(frame.pixel_format_desc().format == core::pixel_format::invalid)\r
320                         return;\r
321 \r
322                 if(frame.pixel_format_desc().planes.empty())\r
323                         return;\r
324                 \r
325                 if(frame.pixel_format_desc().planes.at(0).size < 16)\r
326                         return;\r
327 \r
328                 if(transform_stack_.back().field_mode == core::field_mode::empty)\r
329                         return;\r
330 \r
331                 item item;\r
332                 item.pix_desc   = frame.pixel_format_desc();\r
333                 item.transform  = transform_stack_.back();\r
334                 for(int n = 0; n < item.pix_desc.planes.size(); ++n)\r
335                         item.data.at(n) = frame.image_data(n).begin();          \r
336 \r
337                 items_.push_back(item);\r
338         }\r
339 \r
340         void pop()\r
341         {\r
342                 transform_stack_.pop_back();\r
343         }\r
344 \r
345         void end_layer()\r
346         {               \r
347         }\r
348         \r
349         boost::unique_future<core::const_array> render(const core::video_format_desc& format_desc)\r
350         {\r
351                 return renderer_(std::move(items_), format_desc);\r
352         }\r
353         \r
354         virtual core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc, double frame_rate, core::field_mode field_mode)\r
355         {\r
356                 std::vector<core::mutable_array> buffers;\r
357                 BOOST_FOREACH(auto& plane, desc.planes)\r
358                 {\r
359                         auto buf = spl::make_shared<buffer>(plane.size);\r
360                         buffers.push_back(core::mutable_array(buf->data(), plane.size, buf));\r
361                 }\r
362                 return core::mutable_frame(std::move(buffers), core::audio_buffer(), tag, desc, frame_rate, field_mode);\r
363         }\r
364 };\r
365 \r
366 image_mixer::image_mixer() : impl_(new impl()){}\r
367 image_mixer::~image_mixer(){}\r
368 void image_mixer::push(const core::frame_transform& transform){impl_->push(transform);}\r
369 void image_mixer::visit(const core::mutable_frame& frame){impl_->visit(frame);}\r
370 void image_mixer::pop(){impl_->pop();}\r
371 boost::unique_future<core::const_array> image_mixer::operator()(const core::video_format_desc& format_desc){return impl_->render(format_desc);}\r
372 void image_mixer::begin_layer(core::blend_mode blend_mode){impl_->begin_layer(blend_mode);}\r
373 void image_mixer::end_layer(){impl_->end_layer();}\r
374 core::mutable_frame image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc, double frame_rate, core::field_mode field_mode) {return impl_->create_frame(tag, desc, frame_rate, field_mode);}\r
375 \r
376 }}}