]> git.sesse.net Git - casparcg/blob - accelerator/cpu/image/image_mixer.cpp
* Added RxCpp library for LINQ api, replacing Boost.Range based iterations where...
[casparcg] / accelerator / cpu / image / image_mixer.cpp
1 /*
2 * Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
3 *
4 * This file is part of CasparCG (www.casparcg.com).
5 *
6 * CasparCG is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 3 of the License, or
9 * (at your option) any later version.
10 *
11 * CasparCG is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * Author: Robert Nagy, ronag89@gmail.com
20 */
21
22 #include "../../stdafx.h"
23
24 #include "image_mixer.h"
25
26 #include "../util/xmm.h"
27
28 #include <common/assert.h>
29 #include <common/gl/gl_check.h>
30 #include <common/future.h>
31 #include <common/array.h>
32
33 #include <core/frame/frame.h>
34 #include <core/frame/frame_transform.h>
35 #include <core/frame/pixel_format.h>
36 #include <core/video_format.h>
37
38 #include <modules/ffmpeg/producer/util/util.h>
39
40 #include <asmlib.h>
41
42 #include <gl/glew.h>
43
44 #include <tbb/parallel_for.h>
45 #include <tbb/parallel_for_each.h>
46 #include <tbb/concurrent_queue.h>
47
48 #include <boost/range/algorithm_ext/erase.hpp>
49 #include <boost/thread/future.hpp>
50
51 #include <algorithm>
52 #include <cstdint>
53 #include <vector>
54 #include <set>
55
56 #if defined(_MSC_VER)
57 #pragma warning (push)
58 #pragma warning (disable : 4244)
59 #endif
60 extern "C" 
61 {
62         #include <libswscale/swscale.h>
63         #include <libavcodec/avcodec.h>
64         #include <libavformat/avformat.h>
65 }
66 #if defined(_MSC_VER)
67 #pragma warning (pop)
68 #endif
69
70 namespace caspar { namespace accelerator { namespace cpu {
71                 
72 struct item
73 {
74         core::pixel_format_desc                 pix_desc        = core::pixel_format::invalid;
75         std::array<const uint8_t*, 4>   data;
76         core::image_transform                   transform;
77
78         item()
79         {
80                 data.fill(0);
81         }
82 };
83
84 bool operator==(const item& lhs, const item& rhs)
85 {
86         return lhs.data == rhs.data && lhs.transform == rhs.transform;
87 }
88
89 bool operator!=(const item& lhs, const item& rhs)
90 {
91         return !(lhs == rhs);
92 }
93         
94 // 100% accurate blending with correct rounding.
95 inline xmm::s8_x blend(xmm::s8_x d, xmm::s8_x s)
96 {       
97         using namespace xmm;
98                 
99         // C(S, D) = S + D - (((T >> 8) + T) >> 8);
100         // T(S, D) = S * D[A] + 0x80
101
102         auto aaaa   = s8_x::shuffle(d, s8_x(15, 15, 15, 15, 11, 11, 11, 11, 7, 7, 7, 7, 3, 3, 3, 3));
103         d                       = s8_x(u8_x::min(u8_x(d), u8_x(aaaa))); // Overflow guard. Some source files have color values which incorrectly exceed pre-multiplied alpha values, e.g. red(255) > alpha(254).
104
105         auto xaxa       = s16_x(aaaa) >> 8;             
106                               
107         auto t1         = s16_x::multiply_low(s16_x(s) & 0x00FF, xaxa) + 0x80;    
108         auto t2         = s16_x::multiply_low(s16_x(s) >> 8    , xaxa) + 0x80;
109                 
110         auto xyxy       = s8_x(((t1 >> 8) + t1) >> 8);      
111         auto yxyx       = s8_x((t2 >> 8) + t2);    
112         auto argb   = s8_x::blend(xyxy, yxyx, s8_x(-1, 0, -1, 0));
113
114         return s8_x(s) + (d - argb);
115 }
116         
117 template<typename temporal, typename alignment>
118 static void kernel(uint8_t* dest, const uint8_t* source, size_t count)
119 {                       
120         using namespace xmm;
121
122         for(auto n = 0; n < count; n += 32)    
123         {
124                 auto s0 = s8_x::load<temporal_tag, alignment>(dest+n+0);
125                 auto s1 = s8_x::load<temporal_tag, alignment>(dest+n+16);
126
127                 auto d0 = s8_x::load<temporal_tag, alignment>(source+n+0);
128                 auto d1 = s8_x::load<temporal_tag, alignment>(source+n+16);
129                 
130                 auto argb0 = blend(d0, s0);
131                 auto argb1 = blend(d1, s1);
132
133                 s8_x::store<temporal, alignment>(argb0, dest+n+0 );
134                 s8_x::store<temporal, alignment>(argb1, dest+n+16);
135         } 
136 }
137
138 template<typename temporal>
139 static void kernel(uint8_t* dest, const uint8_t* source, size_t count)
140 {                       
141         using namespace xmm;
142
143         if(reinterpret_cast<int>(dest) % 16 != 0 || reinterpret_cast<int>(source) % 16 != 0)
144                 kernel<temporal_tag, unaligned_tag>(dest, source, count);
145         else
146                 kernel<temporal_tag, aligned_tag>(dest, source, count);
147 }
148
149 class image_renderer
150 {
151         tbb::concurrent_unordered_map<int64_t, tbb::concurrent_bounded_queue<std::shared_ptr<SwsContext>>>      sws_devices_;
152         tbb::concurrent_bounded_queue<spl::shared_ptr<buffer>>                                                                                          temp_buffers_;
153 public: 
154         std::future<array<const std::uint8_t>> operator()(std::vector<item> items, const core::video_format_desc& format_desc)
155         {       
156                 convert(items, format_desc.width, format_desc.height);          
157                                 
158                 // Remove first field stills.
159                 boost::range::remove_erase_if(items, [&](const item& item)
160                 {
161                         return item.transform.is_still && item.transform.field_mode == format_desc.field_mode; // only us last field for stills.
162                 });
163                 
164                 // Stills are progressive
165                 for (auto& item : items)
166                 {
167                         if(item.transform.is_still)
168                                 item.transform.field_mode = core::field_mode::progressive;
169                 }
170
171                 auto result = spl::make_shared<buffer>(format_desc.size, 0);
172                 if(format_desc.field_mode != core::field_mode::progressive)
173                 {                       
174                         draw(items, result->data(), format_desc.width, format_desc.height, core::field_mode::upper);
175                         draw(items, result->data(), format_desc.width, format_desc.height, core::field_mode::lower);
176                 }
177                 else
178                 {
179                         draw(items, result->data(), format_desc.width, format_desc.height,  core::field_mode::progressive);
180                 }
181
182                 temp_buffers_.clear();
183                 
184                 return make_ready_future(array<const std::uint8_t>(result->data(), format_desc.size, true, result));
185         }
186
187 private:
188
189         void draw(std::vector<item> items, uint8_t* dest, std::size_t width, std::size_t height, core::field_mode field_mode)
190         {               
191                 for (auto& item : items)
192                         item.transform.field_mode &= field_mode;
193                 
194                 // Remove empty items.
195                 boost::range::remove_erase_if(items, [&](const item& item)
196                 {
197                         return item.transform.field_mode == core::field_mode::empty;
198                 });
199
200                 if(items.empty())
201                         return;
202                 
203                 auto start = field_mode == core::field_mode::lower ? 1 : 0;
204                 auto step  = field_mode == core::field_mode::progressive ? 1 : 2;
205                 
206                 // TODO: Add support for fill translations.
207                 // TODO: Add support for mask rect.
208                 // TODO: Add support for opacity.
209                 // TODO: Add support for mix transition.
210                 // TODO: Add support for push transition.
211                 // TODO: Add support for wipe transition.
212                 // TODO: Add support for slide transition.
213                 tbb::parallel_for(tbb::blocked_range<std::size_t>(0, height/step), [&](const tbb::blocked_range<std::size_t>& r)
214                 {
215                         for(auto i = r.begin(); i != r.end(); ++i)
216                         {
217                                 auto y = i*step+start;
218
219                                 for(std::size_t n = 0; n < items.size()-1; ++n)
220                                         kernel<xmm::temporal_tag>(dest + y*width*4, items[n].data.at(0) + y*width*4, width*4);
221                                 
222                                 std::size_t n = items.size()-1;                         
223                                 kernel<xmm::nontemporal_tag>(dest + y*width*4, items[n].data.at(0) + y*width*4, width*4);
224                         }
225
226                         _mm_mfence();
227                 });
228         }
229                 
230         void convert(std::vector<item>& source_items, int width, int height)
231         {
232                 std::set<std::array<const uint8_t*, 4>> buffers;
233
234                 for (auto& item : source_items)
235                         buffers.insert(item.data);
236                 
237                 auto dest_items = source_items;
238
239                 tbb::parallel_for_each(buffers.begin(), buffers.end(), [&](const std::array<const uint8_t*, 4>& data)
240                 {                       
241                         auto pix_desc = std::find_if(source_items.begin(), source_items.end(), [&](const item& item){return item.data == data;})->pix_desc;
242
243                         if(pix_desc.format == core::pixel_format::bgra && 
244                                 pix_desc.planes.at(0).width == width &&
245                                 pix_desc.planes.at(0).height == height)
246                                 return;
247
248                         std::array<uint8_t*, 4> data2 = {};
249                         for(std::size_t n = 0; n < data.size(); ++n)
250                                 data2.at(n) = const_cast<uint8_t*>(data[n]);
251
252                         auto input_av_frame = ffmpeg::make_av_frame(data2, pix_desc);
253
254                 
255                         int64_t key = ((static_cast<int64_t>(input_av_frame->width)      << 32) & 0xFFFF00000000) | 
256                                                   ((static_cast<int64_t>(input_av_frame->height) << 16) & 0xFFFF0000) | 
257                                                   ((static_cast<int64_t>(input_av_frame->format) <<  8) & 0xFF00);
258
259                         auto& pool = sws_devices_[key];
260
261                         std::shared_ptr<SwsContext> sws_device;
262                         if(!pool.try_pop(sws_device))
263                         {
264                                 double param;
265                                 sws_device.reset(sws_getContext(input_av_frame->width, input_av_frame->height, static_cast<PixelFormat>(input_av_frame->format), width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);
266                         }
267                         
268                         if(!sws_device)                         
269                                 CASPAR_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling device.") << boost::errinfo_api_function("sws_getContext"));                           
270                 
271                         auto dest_frame = spl::make_shared<buffer>(width*height*4);
272                         temp_buffers_.push(dest_frame);
273
274                         {
275                                 spl::shared_ptr<AVFrame> dest_av_frame(avcodec_alloc_frame(), av_free); 
276                                 avcodec_get_frame_defaults(dest_av_frame.get());                        
277                                 avpicture_fill(reinterpret_cast<AVPicture*>(dest_av_frame.get()), dest_frame->data(), PIX_FMT_BGRA, width, height);
278                                 
279                                 sws_scale(sws_device.get(), input_av_frame->data, input_av_frame->linesize, 0, input_av_frame->height, dest_av_frame->data, dest_av_frame->linesize);                           
280                                 pool.push(sws_device);
281                         }
282                                         
283                         for(std::size_t n = 0; n < source_items.size(); ++n)
284                         {
285                                 if(source_items[n].data == data)
286                                 {
287                                         dest_items[n].data.assign(0);
288                                         dest_items[n].data[0]                   = dest_frame->data();
289                                         dest_items[n].pix_desc                  = core::pixel_format_desc(core::pixel_format::bgra);
290                                         dest_items[n].pix_desc.planes   = { core::pixel_format_desc::plane(width, height, 4) };
291                                         dest_items[n].transform                 = source_items[n].transform;
292                                 }
293                         }
294                 });     
295
296                 source_items = std::move(dest_items);
297         }
298 };
299                 
300 struct image_mixer::impl : boost::noncopyable
301 {       
302         image_renderer                                          renderer_;
303         std::vector<core::image_transform>      transform_stack_;
304         std::vector<item>                                       items_; // layer/stream/items
305 public:
306         impl() 
307                 : transform_stack_(1)   
308         {
309                 CASPAR_LOG(info) << L"Initialized Streaming SIMD Extensions Accelerated CPU Image Mixer";
310         }
311
312         void begin_layer(core::blend_mode blend_mode)
313         {
314         }
315                 
316         void push(const core::frame_transform& transform)
317         {
318                 transform_stack_.push_back(transform_stack_.back()*transform.image_transform);
319         }
320                 
321         void visit(const core::const_frame& frame)
322         {                       
323                 if(frame.pixel_format_desc().format == core::pixel_format::invalid)
324                         return;
325
326                 if(frame.pixel_format_desc().planes.empty())
327                         return;
328                 
329                 if(frame.pixel_format_desc().planes.at(0).size < 16)
330                         return;
331
332                 if(transform_stack_.back().field_mode == core::field_mode::empty)
333                         return;
334
335                 item item;
336                 item.pix_desc   = frame.pixel_format_desc();
337                 item.transform  = transform_stack_.back();
338                 for(int n = 0; n < item.pix_desc.planes.size(); ++n)
339                         item.data.at(n) = frame.image_data(n).begin();          
340
341                 items_.push_back(item);
342         }
343
344         void pop()
345         {
346                 transform_stack_.pop_back();
347         }
348
349         void end_layer()
350         {               
351         }
352         
353         std::future<array<const std::uint8_t>> render(const core::video_format_desc& format_desc)
354         {
355                 return renderer_(std::move(items_), format_desc);
356         }
357         
358         virtual core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc)
359         {
360                 std::vector<array<std::uint8_t>> buffers;
361                 for (auto& plane : desc.planes)
362                 {
363                         auto buf = spl::make_shared<buffer>(plane.size);
364                         buffers.push_back(array<std::uint8_t>(buf->data(), plane.size, true, buf));
365                 }
366                 return core::mutable_frame(std::move(buffers), core::audio_buffer(), tag, desc);
367         }
368 };
369
370 image_mixer::image_mixer() : impl_(new impl()){}
371 image_mixer::~image_mixer(){}
372 void image_mixer::push(const core::frame_transform& transform){impl_->push(transform);}
373 void image_mixer::visit(const core::const_frame& frame){impl_->visit(frame);}
374 void image_mixer::pop(){impl_->pop();}
375 std::future<array<const std::uint8_t>> image_mixer::operator()(const core::video_format_desc& format_desc){return impl_->render(format_desc);}
376 void image_mixer::begin_layer(core::blend_mode blend_mode){impl_->begin_layer(blend_mode);}
377 void image_mixer::end_layer(){impl_->end_layer();}
378 core::mutable_frame image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc) {return impl_->create_frame(tag, desc);}
379
380 }}}