2 * copyright (c) 2010 Sveriges Television AB <info@casparcg.com>
\r
4 * This file is part of CasparCG.
\r
6 * CasparCG is free software: you can redistribute it and/or modify
\r
7 * it under the terms of the GNU General Public License as published by
\r
8 * the Free Software Foundation, either version 3 of the License, or
\r
9 * (at your option) any later version.
\r
11 * CasparCG is distributed in the hope that it will be useful,
\r
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
\r
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
\r
14 * GNU General Public License for more details.
\r
16 * You should have received a copy of the GNU General Public License
\r
17 * along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
\r
20 #include "../../stdafx.h"
\r
22 #include "image_mixer.h"
\r
23 #include "image_kernel.h"
\r
25 #include "../gpu/ogl_device.h"
\r
26 #include "../gpu/host_buffer.h"
\r
27 #include "../gpu/device_buffer.h"
\r
28 #include "../write_frame.h"
\r
30 #include "../../video_channel_context.h"
\r
32 #include <common/concurrency/executor.h>
\r
33 #include <common/exception/exceptions.h>
\r
34 #include <common/gl/gl_check.h>
\r
36 #include <core/producer/frame/image_transform.h>
\r
37 #include <core/producer/frame/pixel_format.h>
\r
38 #include <core/video_format.h>
\r
40 #include <gl/glew.h>
\r
42 #include <boost/foreach.hpp>
\r
43 #include <boost/range.hpp>
\r
44 #include <boost/range/algorithm/find.hpp>
\r
46 #include <algorithm>
\r
49 #include <unordered_map>
\r
51 namespace caspar { namespace core {
\r
53 struct image_mixer::implementation : boost::noncopyable
\r
55 typedef std::deque<render_item> layer;
\r
57 video_channel_context& channel_;
\r
59 std::vector<image_transform> transform_stack_;
\r
60 std::vector<video_mode::type> mode_stack_;
\r
62 std::deque<std::deque<render_item>> layers_; // layer/stream/items
\r
64 image_kernel kernel_;
\r
66 std::shared_ptr<device_buffer> draw_buffer_;
\r
68 implementation(video_channel_context& video_channel)
\r
69 : channel_(video_channel)
\r
70 , transform_stack_(1)
\r
71 , mode_stack_(1, video_mode::progressive)
\r
77 channel_.ogl().gc();
\r
80 void begin(core::basic_frame& frame)
\r
82 transform_stack_.push_back(transform_stack_.back()*frame.get_image_transform());
\r
83 mode_stack_.push_back(frame.get_mode() == video_mode::progressive ? mode_stack_.back() : frame.get_mode());
\r
86 void visit(core::write_frame& frame)
\r
88 CASPAR_ASSERT(!layers_.empty());
\r
90 // Check if frame has been discarded by interlacing
\r
91 if(boost::range::find(mode_stack_, video_mode::upper) != mode_stack_.end() && boost::range::find(mode_stack_, video_mode::lower) != mode_stack_.end())
\r
94 core::render_item item(frame.get_pixel_format_desc(), frame.get_textures(), transform_stack_.back(), mode_stack_.back(), frame.tag());
\r
96 auto& layer = layers_.back();
\r
98 if(boost::range::find(layer, item) == layer.end())
\r
99 layer.push_back(item);
\r
104 transform_stack_.pop_back();
\r
105 mode_stack_.pop_back();
\r
110 layers_.push_back(layer());
\r
117 boost::unique_future<safe_ptr<host_buffer>> render()
\r
119 auto layers = std::move(layers_);
\r
120 return channel_.ogl().begin_invoke([=]()mutable
\r
122 return render(std::move(layers));
\r
126 safe_ptr<host_buffer> render(std::deque<layer>&& layers)
\r
128 std::shared_ptr<device_buffer> layer_key_buffer;
\r
130 draw_buffer_ = channel_.ogl().create_device_buffer(channel_.get_format_desc().width, channel_.get_format_desc().height, 4);
\r
131 channel_.ogl().clear(*draw_buffer_);
\r
133 BOOST_FOREACH(auto& layer, layers)
\r
134 draw(std::move(layer), layer_key_buffer);
\r
136 auto host_buffer = channel_.ogl().create_host_buffer(channel_.get_format_desc().size, host_buffer::read_only);
\r
137 channel_.ogl().attach(*draw_buffer_);
\r
138 host_buffer->begin_read(draw_buffer_->width(), draw_buffer_->height(), format(draw_buffer_->stride()));
\r
142 return host_buffer;
\r
145 void draw(layer&& layer, std::shared_ptr<device_buffer>& layer_key_buffer)
\r
147 std::shared_ptr<device_buffer> local_key_buffer;
\r
149 std::shared_ptr<device_buffer> atomic_draw_buffer;
\r
150 std::shared_ptr<device_buffer> atomic_local_key_buffer;
\r
152 BOOST_FOREACH(auto& item, layer)
\r
154 //if(item.transform.get_is_atomic()) // layers need to be atomic in-order to support blend-modes properly
\r
156 // if(!atomic_draw_buffer)
\r
158 // atomic_draw_buffer = channel_.ogl().create_device_buffer(channel_.get_format_desc().width, channel_.get_format_desc().height, 4);
\r
159 // channel_.ogl().clear(*atomic_draw_buffer);
\r
162 // draw(std::move(item), atomic_draw_buffer, atomic_local_key_buffer, nullptr);
\r
166 // if(atomic_draw_buffer)
\r
168 // pixel_format_desc desc;
\r
169 // desc.pix_fmt = pixel_format::bgra;
\r
170 // desc.planes.push_back(pixel_format_desc::plane(channel_.get_format_desc().width, channel_.get_format_desc().height, 4));
\r
172 // std::vector<safe_ptr<device_buffer>> textures;
\r
173 // textures.push_back(make_safe(atomic_draw_buffer));
\r
175 // atomic_draw_buffer.reset();
\r
176 // atomic_local_key_buffer.reset();
\r
178 // render_item atomic_item(desc, std::move(textures), image_transform(), video_mode::progressive, nullptr);
\r
179 // draw(std::move(atomic_item), draw_buffer_, local_key_buffer, layer_key_buffer);
\r
182 // draw(std::move(item), draw_buffer_, local_key_buffer, layer_key_buffer);
\r
185 draw(std::move(item), local_key_buffer, layer_key_buffer);
\r
188 std::swap(local_key_buffer, layer_key_buffer);
\r
191 void draw(render_item&& item, std::shared_ptr<device_buffer>& local_key_buffer, std::shared_ptr<device_buffer>& layer_key_buffer)
\r
193 if(item.transform.get_is_key())
\r
195 if(!local_key_buffer)
\r
197 local_key_buffer = channel_.ogl().create_device_buffer(channel_.get_format_desc().width, channel_.get_format_desc().height, 1);
\r
198 channel_.ogl().clear(*local_key_buffer);
\r
201 draw(std::move(item), local_key_buffer, nullptr, nullptr);
\r
205 draw(std::move(item), draw_buffer_, local_key_buffer, layer_key_buffer);
\r
206 local_key_buffer.reset();
\r
210 void draw(render_item&& item, std::shared_ptr<device_buffer>& draw_buffer, const std::shared_ptr<device_buffer>& local_key, const std::shared_ptr<device_buffer>& layer_key)
\r
212 if(!std::all_of(item.textures.begin(), item.textures.end(), std::mem_fn(&device_buffer::ready)))
\r
214 CASPAR_LOG(warning) << L"[image_mixer] Performance warning. Host to device transfer not complete, GPU will be stalled";
\r
215 channel_.ogl().yield(); // Try to give it some more time.
\r
218 kernel_.draw(channel_.ogl(), std::move(item), make_safe(draw_buffer), local_key, layer_key);
\r
221 safe_ptr<write_frame> create_frame(const void* tag, const core::pixel_format_desc& desc)
\r
223 return make_safe<write_frame>(channel_.ogl(), tag, desc);
\r
227 image_mixer::image_mixer(video_channel_context& video_channel) : impl_(new implementation(video_channel)){}
\r
228 void image_mixer::begin(core::basic_frame& frame){impl_->begin(frame);}
\r
229 void image_mixer::visit(core::write_frame& frame){impl_->visit(frame);}
\r
230 void image_mixer::end(){impl_->end();}
\r
231 boost::unique_future<safe_ptr<host_buffer>> image_mixer::render(){return impl_->render();}
\r
232 safe_ptr<write_frame> image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc){return impl_->create_frame(tag, desc);}
\r
233 void image_mixer::begin_layer(){impl_->begin_layer();}
\r
234 void image_mixer::end_layer(){impl_->end_layer();}
\r
235 image_mixer& image_mixer::operator=(image_mixer&& other)
\r
237 impl_ = std::move(other.impl_);
\r