]> git.sesse.net Git - casparcg/blob - accelerator/ogl/image/image_mixer.cpp
git-svn-id: https://casparcg.svn.sourceforge.net/svnroot/casparcg/server/branches...
[casparcg] / accelerator / ogl / image / image_mixer.cpp
1 /*\r
2 * Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>\r
3 *\r
4 * This file is part of CasparCG (www.casparcg.com).\r
5 *\r
6 * CasparCG is free software: you can redistribute it and/or modify\r
7 * it under the terms of the GNU General Public License as published by\r
8 * the Free Software Foundation, either version 3 of the License, or\r
9 * (at your option) any later version.\r
10 *\r
11 * CasparCG is distributed in the hope that it will be useful,\r
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
14 * GNU General Public License for more details.\r
15 *\r
16 * You should have received a copy of the GNU General Public License\r
17 * along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
18 *\r
19 * Author: Robert Nagy, ronag89@gmail.com\r
20 */\r
21 \r
22 #include "../../stdafx.h"\r
23 \r
24 #include "image_mixer.h"\r
25 \r
26 #include "image_kernel.h"\r
27 \r
28 #include "../util/write_frame.h"\r
29 #include "../util/context.h"\r
30 #include "../util/host_buffer.h"\r
31 #include "../util/device_buffer.h"\r
32 \r
33 #include <common/gl/gl_check.h>\r
34 #include <common/concurrency/async.h>\r
35 #include <common/memory/memcpy.h>\r
36 \r
37 #include <core/frame/write_frame.h>\r
38 #include <core/frame/frame_transform.h>\r
39 #include <core/frame/pixel_format.h>\r
40 #include <core/video_format.h>\r
41 \r
42 #include <asmlib.h>\r
43 \r
44 #include <gl/glew.h>\r
45 \r
46 #include <boost/foreach.hpp>\r
47 #include <boost/range/algorithm_ext/erase.hpp>\r
48 #include <boost/thread/future.hpp>\r
49 \r
50 #include <algorithm>\r
51 #include <vector>\r
52 \r
53 using namespace boost::assign;\r
54 \r
55 namespace caspar { namespace accelerator { namespace ogl {\r
56                 \r
57 typedef boost::shared_future<spl::shared_ptr<device_buffer>> future_texture;\r
58 \r
59 struct item\r
60 {\r
61         core::pixel_format_desc                                         pix_desc;\r
62         std::vector<spl::shared_ptr<host_buffer>>       buffers;\r
63         std::vector<future_texture>                                     textures;\r
64         core::frame_transform                                           transform;\r
65 \r
66 \r
67         item()\r
68                 : pix_desc(core::pixel_format::invalid)\r
69         {\r
70         }\r
71 };\r
72 \r
73 bool operator==(const item& lhs, const item& rhs)\r
74 {\r
75         return lhs.buffers == rhs.buffers && lhs.transform == rhs.transform;\r
76 }\r
77 \r
78 bool operator!=(const item& lhs, const item& rhs)\r
79 {\r
80         return !(lhs == rhs);\r
81 }\r
82 \r
83 struct layer\r
84 {\r
85         std::vector<item>       items;\r
86         core::blend_mode        blend_mode;\r
87 \r
88         layer()\r
89                 : blend_mode(core::blend_mode::normal)\r
90         {\r
91         }\r
92 \r
93         layer(std::vector<item> items, core::blend_mode blend_mode)\r
94                 : items(std::move(items))\r
95                 , blend_mode(blend_mode)\r
96         {\r
97         }\r
98 };\r
99 \r
100 bool operator==(const layer& lhs, const layer& rhs)\r
101 {\r
102         return lhs.items == rhs.items && lhs.blend_mode == rhs.blend_mode;\r
103 }\r
104 \r
105 bool operator!=(const layer& lhs, const layer& rhs)\r
106 {\r
107         return !(lhs == rhs);\r
108 }\r
109 \r
110 class image_renderer\r
111 {\r
112         spl::shared_ptr<context>                                                                                                                                                ogl_;\r
113         image_kernel                                                                                                                                                                    kernel_;\r
114         std::pair<std::vector<layer>, boost::shared_future<boost::iterator_range<const uint8_t*>>>              last_image_;\r
115 public:\r
116         image_renderer(const spl::shared_ptr<context>& ogl)\r
117                 : ogl_(ogl)\r
118                 , kernel_(ogl_)\r
119         {\r
120         }\r
121         \r
122         boost::shared_future<boost::iterator_range<const uint8_t*>> operator()(std::vector<layer> layers, const core::video_format_desc& format_desc)\r
123         {       \r
124                 if(last_image_.first == layers && last_image_.second.has_value())\r
125                         return last_image_.second;\r
126 \r
127                 auto image      = render(layers, format_desc);\r
128                 last_image_ = std::make_pair(std::move(layers), image);\r
129                 return image;\r
130         }\r
131 \r
132 private:        \r
133         boost::shared_future<boost::iterator_range<const uint8_t*>> render(std::vector<layer> layers, const core::video_format_desc& format_desc)\r
134         {       \r
135                 static const auto empty = spl::make_shared<const std::vector<uint8_t, tbb::cache_aligned_allocator<uint8_t>>>(2048*2048*4, 0);\r
136                 CASPAR_VERIFY(empty->size() >= format_desc.size);\r
137                 \r
138                 if(layers.empty())\r
139                 { // Bypass GPU with empty frame.\r
140                         return async(launch_policy::deferred, [=]\r
141                         {\r
142                                 return boost::iterator_range<const uint8_t*>(empty->data(), empty->data() + format_desc.size);\r
143                         });\r
144                 }\r
145                 else if(has_uswc_memcpy() &&                            \r
146                                 layers.size()                           == 1 &&\r
147                             layers.at(0).items.size()   == 1 &&\r
148                            (kernel_.has_blend_modes() && layers.at(0).blend_mode != core::blend_mode::normal) == false &&\r
149                             layers.at(0).items.at(0).pix_desc.format            == core::pixel_format::bgra &&\r
150                             layers.at(0).items.at(0).buffers.at(0)->size() == format_desc.size &&\r
151                             layers.at(0).items.at(0).transform                          == core::frame_transform())\r
152                 { // Bypass GPU using streaming loads to cachable memory.\r
153                         auto uswc_buffer = layers.at(0).items.at(0).buffers.at(0);\r
154                         auto buffer              = std::make_shared<std::vector<uint8_t, tbb::cache_aligned_allocator<uint8_t>>>(uswc_buffer->size());\r
155 \r
156                         uswc_memcpy(buffer->data(), uswc_buffer->data(), uswc_buffer->size());\r
157 \r
158                         return async(launch_policy::deferred, [=]\r
159                         {\r
160                                 return boost::iterator_range<const uint8_t*>(buffer->data(), buffer->data() + buffer->size());\r
161                         });\r
162                 }\r
163                 else\r
164                 {       \r
165                         // Start host->device transfers.\r
166 \r
167                         std::map<const host_buffer*, future_texture> buffer_map;\r
168 \r
169                         BOOST_FOREACH(auto& layer, layers)\r
170                         {\r
171                                 BOOST_FOREACH(auto& item, layer.items)\r
172                                 {\r
173                                         auto host_buffers = boost::get<std::vector<spl::shared_ptr<host_buffer>>>(item.buffers);\r
174                                         auto textures     = std::vector<future_texture>();\r
175 \r
176                                         for(size_t n = 0; n < host_buffers.size(); ++n) \r
177                                         {\r
178                                                 auto buffer     = host_buffers[n];\r
179                                                 auto it         = buffer_map.find(buffer.get());\r
180                                                 if(it == buffer_map.end())\r
181                                                 {\r
182                                                         auto plane                      = item.pix_desc.planes[n];\r
183                                                         auto future_texture     = ogl_->copy_async(buffer, plane.width, plane.height, plane.channels);\r
184                                                         it = buffer_map.insert(std::make_pair(buffer.get(), std::move(future_texture))).first;\r
185                                                 }\r
186                                                 item.textures.push_back(it->second);\r
187                                         }       \r
188                                         item.buffers.clear();\r
189                                 }\r
190                         }       \r
191                         \r
192                         // Draw\r
193                         boost::shared_future<spl::shared_ptr<host_buffer>> buffer = ogl_->begin_invoke([=]() mutable -> spl::shared_ptr<host_buffer>\r
194                         {\r
195                                 auto draw_buffer = create_mixer_buffer(4, format_desc);\r
196 \r
197                                 if(format_desc.field_mode != core::field_mode::progressive)\r
198                                 {\r
199                                         auto upper = layers;\r
200                                         auto lower = std::move(layers);\r
201 \r
202                                         BOOST_FOREACH(auto& layer, upper)\r
203                                         {\r
204                                                 BOOST_FOREACH(auto& item, layer.items)\r
205                                                         item.transform.field_mode &= core::field_mode::upper;\r
206                                         }\r
207 \r
208                                         BOOST_FOREACH(auto& layer, lower)\r
209                                         {\r
210                                                 BOOST_FOREACH(auto& item, layer.items)\r
211                                                         item.transform.field_mode &= core::field_mode::lower;\r
212                                         }\r
213 \r
214                                         draw(std::move(upper), draw_buffer, format_desc);\r
215                                         draw(std::move(lower), draw_buffer, format_desc);\r
216                                 }\r
217                                 else\r
218                                 {\r
219                                         draw(std::move(layers), draw_buffer, format_desc);\r
220                                 }\r
221                         \r
222                                 auto result = ogl_->create_host_buffer(static_cast<int>(format_desc.size), host_buffer::usage::read_only); \r
223                                 draw_buffer->copy_to(*result);                                                  \r
224                                 return result;\r
225                         });\r
226                 \r
227                         // Defer memory mapping.\r
228                         return async(launch_policy::deferred, [=]() mutable -> boost::iterator_range<const uint8_t*>\r
229                         {\r
230                                 const auto& buf = buffer.get();\r
231                                 if(!buf->data())\r
232                                         ogl_->invoke(std::bind(&host_buffer::map, std::ref(buf)), task_priority::high_priority);\r
233 \r
234                                 auto ptr = reinterpret_cast<const uint8_t*>(buf->data()); // .get() and ->data() can block calling thread, ->data() can also block OpenGL thread, defer it as long as possible.\r
235                                 return boost::iterator_range<const uint8_t*>(ptr, ptr + buffer.get()->size());\r
236                         });\r
237                 }\r
238         }\r
239         \r
240         void draw(std::vector<layer>&&                          layers, \r
241                           spl::shared_ptr<device_buffer>&       draw_buffer, \r
242                           const core::video_format_desc&        format_desc)\r
243         {\r
244                 std::shared_ptr<device_buffer> layer_key_buffer;\r
245 \r
246                 BOOST_FOREACH(auto& layer, layers)\r
247                         draw_layer(std::move(layer), draw_buffer, layer_key_buffer, format_desc);\r
248         }\r
249 \r
250         void draw_layer(layer&&                                                         layer, \r
251                                         spl::shared_ptr<device_buffer>&         draw_buffer,\r
252                                         std::shared_ptr<device_buffer>&         layer_key_buffer,\r
253                                         const core::video_format_desc&          format_desc)\r
254         {               \r
255                 // Remove empty items.\r
256                 boost::range::remove_erase_if(layer.items, [](const item& item)\r
257                 {\r
258                         return item.transform.field_mode == core::field_mode::empty;\r
259                 });\r
260 \r
261                 if(layer.items.empty())\r
262                         return;\r
263 \r
264                 std::shared_ptr<device_buffer> local_key_buffer;\r
265                 std::shared_ptr<device_buffer> local_mix_buffer;\r
266                                 \r
267                 if(layer.blend_mode != core::blend_mode::normal)\r
268                 {\r
269                         auto layer_draw_buffer = create_mixer_buffer(4, format_desc);\r
270 \r
271                         BOOST_FOREACH(auto& item, layer.items)\r
272                                 draw_item(std::move(item), layer_draw_buffer, layer_key_buffer, local_key_buffer, local_mix_buffer, format_desc);       \r
273                 \r
274                         draw_mixer_buffer(layer_draw_buffer, std::move(local_mix_buffer), core::blend_mode::normal);                                                    \r
275                         draw_mixer_buffer(draw_buffer, std::move(layer_draw_buffer), layer.blend_mode);\r
276                 }\r
277                 else // fast path\r
278                 {\r
279                         BOOST_FOREACH(auto& item, layer.items)          \r
280                                 draw_item(std::move(item), draw_buffer, layer_key_buffer, local_key_buffer, local_mix_buffer, format_desc);             \r
281                                         \r
282                         draw_mixer_buffer(draw_buffer, std::move(local_mix_buffer), core::blend_mode::normal);\r
283                 }                                       \r
284 \r
285                 layer_key_buffer = std::move(local_key_buffer);\r
286         }\r
287 \r
288         void draw_item(item&&                                                   item, \r
289                                    spl::shared_ptr<device_buffer>&      draw_buffer, \r
290                                    std::shared_ptr<device_buffer>&      layer_key_buffer, \r
291                                    std::shared_ptr<device_buffer>&      local_key_buffer, \r
292                                    std::shared_ptr<device_buffer>&      local_mix_buffer,\r
293                                    const core::video_format_desc&       format_desc)\r
294         {                       \r
295                 draw_params draw_params;\r
296                 draw_params.pix_desc    = std::move(item.pix_desc);\r
297                 draw_params.transform   = std::move(item.transform);\r
298                 BOOST_FOREACH(auto& future_texture, item.textures)\r
299                         draw_params.textures.push_back(future_texture.get());\r
300 \r
301                 if(item.transform.is_key)\r
302                 {\r
303                         local_key_buffer = local_key_buffer ? local_key_buffer : create_mixer_buffer(1, format_desc);\r
304 \r
305                         draw_params.background                  = local_key_buffer;\r
306                         draw_params.local_key                   = nullptr;\r
307                         draw_params.layer_key                   = nullptr;\r
308 \r
309                         kernel_.draw(std::move(draw_params));\r
310                 }\r
311                 else if(item.transform.is_mix)\r
312                 {\r
313                         local_mix_buffer = local_mix_buffer ? local_mix_buffer : create_mixer_buffer(4, format_desc);\r
314 \r
315                         draw_params.background                  = local_mix_buffer;\r
316                         draw_params.local_key                   = std::move(local_key_buffer);\r
317                         draw_params.layer_key                   = layer_key_buffer;\r
318 \r
319                         draw_params.keyer                               = keyer::additive;\r
320 \r
321                         kernel_.draw(std::move(draw_params));\r
322                 }\r
323                 else\r
324                 {\r
325                         draw_mixer_buffer(draw_buffer, std::move(local_mix_buffer), core::blend_mode::normal);\r
326                         \r
327                         draw_params.background                  = draw_buffer;\r
328                         draw_params.local_key                   = std::move(local_key_buffer);\r
329                         draw_params.layer_key                   = layer_key_buffer;\r
330 \r
331                         kernel_.draw(std::move(draw_params));\r
332                 }       \r
333         }\r
334 \r
335         void draw_mixer_buffer(spl::shared_ptr<device_buffer>&  draw_buffer, \r
336                                                    std::shared_ptr<device_buffer>&& source_buffer, \r
337                                                    core::blend_mode                                     blend_mode = core::blend_mode::normal)\r
338         {\r
339                 if(!source_buffer)\r
340                         return;\r
341 \r
342                 draw_params draw_params;\r
343                 draw_params.pix_desc.format             = core::pixel_format::bgra;\r
344                 draw_params.pix_desc.planes             = list_of(core::pixel_format_desc::plane(source_buffer->width(), source_buffer->height(), 4));\r
345                 draw_params.textures                    = list_of(source_buffer);\r
346                 draw_params.transform                   = core::frame_transform();\r
347                 draw_params.blend_mode                  = blend_mode;\r
348                 draw_params.background                  = draw_buffer;\r
349 \r
350                 kernel_.draw(std::move(draw_params));\r
351         }\r
352                         \r
353         spl::shared_ptr<device_buffer> create_mixer_buffer(int stride, const core::video_format_desc& format_desc)\r
354         {\r
355                 auto buffer = ogl_->create_device_buffer(format_desc.width, format_desc.height, stride);\r
356                 buffer->clear();\r
357                 return buffer;\r
358         }\r
359 };\r
360                 \r
361 struct image_mixer::impl : boost::noncopyable\r
362 {       \r
363         spl::shared_ptr<context>                        ogl_;\r
364         image_renderer                                          renderer_;\r
365         std::vector<core::frame_transform>      transform_stack_;\r
366         std::vector<layer>                                      layers_; // layer/stream/items\r
367 public:\r
368         impl(const spl::shared_ptr<context>& ogl) \r
369                 : ogl_(ogl)\r
370                 , renderer_(ogl)\r
371                 , transform_stack_(1)   \r
372         {\r
373                 CASPAR_LOG(info) << L"Initialized OpenGL Accelerated GPU Image Mixer";\r
374         }\r
375 \r
376         void begin_layer(core::blend_mode blend_mode)\r
377         {\r
378                 layers_.push_back(layer(std::vector<item>(), blend_mode));\r
379         }\r
380                 \r
381         void push(core::frame_transform& transform)\r
382         {\r
383                 transform_stack_.push_back(transform_stack_.back()*transform);\r
384         }\r
385                 \r
386         void visit(core::data_frame& frame2)\r
387         {                       \r
388                 write_frame* frame = dynamic_cast<write_frame*>(&frame2);\r
389                 if(frame == nullptr)\r
390                         return;\r
391 \r
392                 if(frame->get_pixel_format_desc().format == core::pixel_format::invalid)\r
393                         return;\r
394 \r
395                 if(frame->get_buffers().empty())\r
396                         return;\r
397 \r
398                 if(transform_stack_.back().field_mode == core::field_mode::empty)\r
399                         return;\r
400 \r
401                 item item;\r
402                 item.pix_desc                   = frame->get_pixel_format_desc();\r
403                 item.buffers                    = frame->get_buffers();                         \r
404                 item.transform                  = transform_stack_.back();\r
405                 item.transform.volume   = core::frame_transform().volume; // Set volume to default since we don't care about it here.\r
406 \r
407                 layers_.back().items.push_back(item);\r
408         }\r
409 \r
410         void pop()\r
411         {\r
412                 transform_stack_.pop_back();\r
413         }\r
414 \r
415         void end_layer()\r
416         {               \r
417         }\r
418         \r
419         boost::shared_future<boost::iterator_range<const uint8_t*>> render(const core::video_format_desc& format_desc)\r
420         {\r
421                 // Remove empty layers.\r
422                 boost::range::remove_erase_if(layers_, [](const layer& layer)\r
423                 {\r
424                         return layer.items.empty();\r
425                 });\r
426 \r
427                 return renderer_(std::move(layers_), format_desc);\r
428         }\r
429         \r
430         virtual spl::shared_ptr<ogl::write_frame> create_frame(const void* tag, const core::pixel_format_desc& desc)\r
431         {\r
432                 return spl::make_shared<ogl::write_frame>(ogl_, tag, desc);\r
433         }\r
434 };\r
435 \r
436 image_mixer::image_mixer(const spl::shared_ptr<context>& ogl) : impl_(new impl(ogl)){}\r
437 void image_mixer::push(core::frame_transform& transform){impl_->push(transform);}\r
438 void image_mixer::visit(core::data_frame& frame){impl_->visit(frame);}\r
439 void image_mixer::pop(){impl_->pop();}\r
440 boost::shared_future<boost::iterator_range<const uint8_t*>> image_mixer::operator()(const core::video_format_desc& format_desc){return impl_->render(format_desc);}\r
441 void image_mixer::begin_layer(core::blend_mode blend_mode){impl_->begin_layer(blend_mode);}\r
442 void image_mixer::end_layer(){impl_->end_layer();}\r
443 spl::shared_ptr<core::write_frame> image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc) {return impl_->create_frame(tag, desc);}\r
444 \r
445 }}}