#include "../gpu/ogl_device.h"\r
#include "../gpu/host_buffer.h"\r
#include "../gpu/device_buffer.h"\r
-#include "../gpu/gpu_write_frame.h"\r
+#include "../write_frame.h"\r
+\r
+#include "../../video_channel_context.h"\r
\r
#include <common/concurrency/executor.h>\r
#include <common/exception/exceptions.h>\r
#include <core/producer/frame/pixel_format.h>\r
#include <core/video_format.h>\r
\r
-#include <boost/cast.hpp>\r
+#include <gl/glew.h>\r
\r
-#include <Glee.h>\r
-#include <SFML/Window/Context.hpp>\r
+#include <boost/foreach.hpp>\r
+#include <boost/range.hpp>\r
+#include <boost/range/algorithm/find.hpp>\r
\r
+#include <algorithm>\r
#include <array>\r
+#include <deque>\r
#include <unordered_map>\r
\r
-namespace caspar { namespace mixer {\r
+namespace caspar { namespace core {\r
\r
struct image_mixer::implementation : boost::noncopyable\r
-{ \r
- const core::video_format_desc format_desc_;\r
- \r
- std::stack<core::image_transform> transform_stack_;\r
-\r
- GLuint fbo_;\r
- std::array<std::shared_ptr<device_buffer>, 2> render_targets_;\r
+{ \r
+ typedef std::deque<render_item> layer;\r
\r
- std::shared_ptr<host_buffer> reading_;\r
+ video_channel_context& channel_;\r
\r
- image_kernel kernel_;\r
+ std::vector<image_transform> transform_stack_;\r
+ std::vector<video_mode::type> mode_stack_;\r
\r
- safe_ptr<ogl_device> context_;\r
+ std::deque<std::deque<render_item>> layers_; // layer/stream/items\r
+ \r
+ image_kernel kernel_; \r
\r
+ std::shared_ptr<device_buffer> active_buffer_;\r
public:\r
- implementation(const core::video_format_desc& format_desc) \r
- : format_desc_(format_desc)\r
- , context_(ogl_device::create())\r
+ implementation(video_channel_context& video_channel) \r
+ : channel_(video_channel)\r
+ , transform_stack_(1)\r
+ , mode_stack_(1, video_mode::progressive)\r
{\r
- context_->invoke([]\r
- {\r
- if(!GLEE_VERSION_3_0)\r
- BOOST_THROW_EXCEPTION(not_supported() << msg_info("Missing OpenGL 3.0 support."));\r
- });\r
- \r
- context_->begin_invoke([=]\r
- {\r
- transform_stack_.push(core::image_transform());\r
- transform_stack_.top().set_mode(core::video_mode::progressive);\r
-\r
- GL(glEnable(GL_TEXTURE_2D));\r
- GL(glDisable(GL_DEPTH_TEST)); \r
-\r
- render_targets_[0] = context_->create_device_buffer(format_desc.width, format_desc.height, 4);\r
- render_targets_[1] = context_->create_device_buffer(format_desc.width, format_desc.height, 4);\r
- \r
- GL(glGenFramebuffers(1, &fbo_)); \r
- GL(glBindFramebuffer(GL_FRAMEBUFFER_EXT, fbo_));\r
- GL(glReadBuffer(GL_COLOR_ATTACHMENT0_EXT));\r
-\r
- reading_ = context_->create_host_buffer(format_desc_.size, host_buffer::read_only);\r
- });\r
}\r
\r
~implementation()\r
{\r
- glDeleteFramebuffersEXT(1, &fbo_);\r
+ channel_.ogl().gc();\r
}\r
-\r
- void begin(const core::basic_frame& frame)\r
+ \r
+ void begin(core::basic_frame& frame)\r
{\r
- transform_stack_.push(transform_stack_.top()*frame.get_image_transform());\r
+ transform_stack_.push_back(transform_stack_.back()*frame.get_image_transform());\r
+ mode_stack_.push_back(frame.get_mode() == video_mode::progressive ? mode_stack_.back() : frame.get_mode());\r
}\r
\r
void visit(core::write_frame& frame)\r
- {\r
- auto gpu_frame = boost::polymorphic_downcast<gpu_write_frame*>(&frame);\r
- auto& desc = gpu_frame->get_pixel_format_desc();\r
- auto& buffers = gpu_frame->get_plane_buffers();\r
-\r
- auto transform = transform_stack_.top();\r
- context_->begin_invoke([=]\r
- {\r
- GL(glColor4d(1.0, 1.0, 1.0, transform.get_opacity()));\r
- GL(glViewport(0, 0, format_desc_.width, format_desc_.height));\r
- kernel_.apply(desc, transform);\r
+ { \r
+ CASPAR_ASSERT(!layers_.empty());\r
\r
- std::vector<safe_ptr<device_buffer>> device_buffers;\r
- for(size_t n = 0; n < buffers.size(); ++n)\r
- {\r
- auto texture = context_->create_device_buffer(desc.planes[n].width, desc.planes[n].height, desc.planes[n].channels);\r
- texture->read(*buffers[n]);\r
- device_buffers.push_back(texture);\r
- }\r
+ // Check if frame has been discarded by interlacing\r
+ if(boost::range::find(mode_stack_, video_mode::upper) != mode_stack_.end() && boost::range::find(mode_stack_, video_mode::lower) != mode_stack_.end())\r
+ return;\r
+ \r
+ core::render_item item(frame.get_pixel_format_desc(), frame.get_textures(), transform_stack_.back(), mode_stack_.back(), frame.tag()); \r
\r
- for(size_t n = 0; n < buffers.size(); ++n)\r
- {\r
- GL(glActiveTexture(GL_TEXTURE0+n));\r
- device_buffers[n]->bind();\r
- }\r
+ auto& layer = layers_.back();\r
\r
- auto m_p = transform.get_key_translation();\r
- auto m_s = transform.get_key_scale();\r
- double w = static_cast<double>(format_desc_.width);\r
- double h = static_cast<double>(format_desc_.height);\r
- \r
- GL(glEnable(GL_SCISSOR_TEST));\r
- GL(glScissor(static_cast<size_t>(m_p[0]*w), static_cast<size_t>(m_p[1]*h), static_cast<size_t>(m_s[0]*w), static_cast<size_t>(m_s[1]*h)));\r
- \r
- auto f_p = transform.get_fill_translation();\r
- auto f_s = transform.get_fill_scale();\r
- \r
- glBegin(GL_QUADS);\r
- glTexCoord2d(0.0, 0.0); glVertex2d( f_p[0] *2.0-1.0, f_p[1] *2.0-1.0);\r
- glTexCoord2d(1.0, 0.0); glVertex2d((f_p[0]+f_s[0])*2.0-1.0, f_p[1] *2.0-1.0);\r
- glTexCoord2d(1.0, 1.0); glVertex2d((f_p[0]+f_s[0])*2.0-1.0, (f_p[1]+f_s[1])*2.0-1.0);\r
- glTexCoord2d(0.0, 1.0); glVertex2d( f_p[0] *2.0-1.0, (f_p[1]+f_s[1])*2.0-1.0);\r
- glEnd();\r
- GL(glDisable(GL_SCISSOR_TEST));\r
- });\r
+ if(boost::range::find(layer, item) == layer.end())\r
+ layer.push_back(item);\r
}\r
\r
void end()\r
{\r
- transform_stack_.pop();\r
+ transform_stack_.pop_back();\r
+ mode_stack_.pop_back();\r
}\r
\r
- boost::unique_future<safe_ptr<const host_buffer>> begin_pass()\r
+ void begin_layer()\r
{\r
- return context_->begin_invoke([=]() -> safe_ptr<const host_buffer>\r
- {\r
- reading_->map();\r
- render_targets_[0]->attach(0);\r
- GL(glClear(GL_COLOR_BUFFER_BIT));\r
- return safe_ptr<const host_buffer>(reading_);\r
- });\r
+ layers_.push_back(layer());\r
}\r
\r
- void end_pass()\r
+ void end_layer()\r
{\r
- context_->begin_invoke([=]\r
+ }\r
+ \r
+ boost::unique_future<safe_ptr<host_buffer>> render()\r
+ { \r
+ auto layers = std::move(layers_);\r
+ return channel_.ogl().begin_invoke([=]() mutable\r
{\r
- reading_ = context_->create_host_buffer(format_desc_.size, host_buffer::read_only);\r
- render_targets_[0]->write(*reading_);\r
- std::rotate(render_targets_.begin(), render_targets_.begin() + 1, render_targets_.end());\r
+ return render(std::move(layers));\r
});\r
}\r
- \r
- std::vector<safe_ptr<host_buffer>> create_buffers(const core::pixel_format_desc& format)\r
+ \r
+ safe_ptr<host_buffer> render(std::deque<layer>&& layers)\r
{\r
- std::vector<safe_ptr<host_buffer>> buffers;\r
- std::transform(format.planes.begin(), format.planes.end(), std::back_inserter(buffers), [&](const core::pixel_format_desc::plane& plane)\r
+ std::shared_ptr<device_buffer> layer_key_buffer;\r
+\r
+ auto draw_buffer = create_device_buffer(4);\r
+ \r
+ BOOST_FOREACH(auto& layer, layers)\r
+ draw(std::move(layer), draw_buffer, layer_key_buffer);\r
+ \r
+ auto host_buffer = channel_.ogl().create_host_buffer(channel_.get_format_desc().size, host_buffer::read_only);\r
+ channel_.ogl().attach(*draw_buffer);\r
+ host_buffer->begin_read(draw_buffer->width(), draw_buffer->height(), format(draw_buffer->stride()));\r
+ \r
+ active_buffer_ = draw_buffer;\r
+\r
+ channel_.ogl().flush(); // NOTE: This is important, otherwise fences will deadlock.\r
+ \r
+ return host_buffer;\r
+ }\r
+\r
+ // TODO: We might have more overlaps for opacity transitions\r
+ // TODO: What about blending modes, are they ok? Maybe only overlap detection is required for opacity?\r
+ void draw(layer&& layer, const safe_ptr<device_buffer>& draw_buffer, std::shared_ptr<device_buffer>& layer_key_buffer)\r
+ { \r
+ if(layer.empty())\r
+ return;\r
+\r
+ std::pair<int, std::shared_ptr<device_buffer>> local_key_buffer;\r
+ \r
+ //if(has_overlapping_items(layer, layer.front().transform.get_blend_mode()))\r
+ //{\r
+ // auto local_draw_buffer = create_device_buffer(4); \r
+\r
+ // auto local_blend_mode = layer.front().transform.get_blend_mode();\r
+\r
+ // int fields = 0;\r
+ // BOOST_FOREACH(auto& item, layer)\r
+ // {\r
+ // if(fields & item.mode)\r
+ // item.transform.set_blend_mode(image_transform::blend_mode::normal); // Disable blending, it will be used when merging back into render stack.\r
+ // else\r
+ // {\r
+ // item.transform.set_blend_mode(image_transform::blend_mode::replace); // Target field is empty, no blending, just copy\r
+ // fields |= item.mode;\r
+ // }\r
+\r
+ // draw_item(std::move(item), local_draw_buffer, local_key_buffer, layer_key_buffer); \r
+ // }\r
+\r
+ // kernel_.draw(channel_.ogl(), create_render_item(local_draw_buffer, local_blend_mode), draw_buffer, nullptr, nullptr);\r
+ //}\r
+ //else // fast path\r
+ //{\r
+ BOOST_FOREACH(auto& item, layer) \r
+ draw_item(std::move(item), draw_buffer, local_key_buffer, layer_key_buffer); \r
+ //} \r
+\r
+ CASPAR_ASSERT(local_key_buffer.first == 0 || local_key_buffer.first == core::video_mode::progressive);\r
+\r
+ std::swap(local_key_buffer.second, layer_key_buffer);\r
+ }\r
+\r
+ void draw_item(render_item&& item, \r
+ const safe_ptr<device_buffer>& draw_buffer, \r
+ std::pair<int, std::shared_ptr<device_buffer>>& local_key_buffer, \r
+ std::shared_ptr<device_buffer>& layer_key_buffer)\r
+ { \r
+ if(item.transform.get_is_key())\r
{\r
- return context_->create_host_buffer(plane.size, host_buffer::write_only);\r
- });\r
- return buffers;\r
+ if(!local_key_buffer.second)\r
+ {\r
+ local_key_buffer.first = 0;\r
+ local_key_buffer.second = create_device_buffer(1);\r
+ }\r
+\r
+ // No transparency for key\r
+ item.transform.set_opacity(1.0);\r
+ item.transform.set_blend_mode(image_transform::blend_mode::normal);\r
+\r
+ local_key_buffer.first |= item.mode;\r
+ kernel_.draw(channel_.ogl(), std::move(item), make_safe(local_key_buffer.second), nullptr, nullptr);\r
+ }\r
+ else\r
+ {\r
+ kernel_.draw(channel_.ogl(), std::move(item), draw_buffer, local_key_buffer.second, layer_key_buffer);\r
+ local_key_buffer.first ^= item.mode;\r
+ \r
+ if(local_key_buffer.first == 0)\r
+ {\r
+ local_key_buffer.first = 0;\r
+ local_key_buffer.second.reset();\r
+ }\r
+ }\r
+ }\r
+\r
+ //// TODO: Optimize\r
+ //bool has_overlapping_items(const layer& layer, image_transform::blend_mode::type blend_mode)\r
+ //{\r
+ // if(layer.size() < 2)\r
+ // return false; \r
+ // \r
+ // implementation::layer fill;\r
+\r
+ // std::copy_if(layer.begin(), layer.end(), std::back_inserter(fill), [&](const render_item& item)\r
+ // {\r
+ // return !item.transform.get_is_key();\r
+ // });\r
+ // \r
+ // if(blend_mode == image_transform::blend_mode::normal) // Only overlap if opacity\r
+ // {\r
+ // return std::any_of(fill.begin(), fill.end(), [&](const render_item& item)\r
+ // {\r
+ // return item.transform.get_opacity() < 1.0 - 0.001;\r
+ // });\r
+ // }\r
+\r
+ // // Simple solution, just check if we have differnt video streams / tags.\r
+ // return std::any_of(fill.begin(), fill.end(), [&](const render_item& item)\r
+ // {\r
+ // return item.tag != fill.front().tag;\r
+ // });\r
+ //} \r
+ // \r
+ //render_item create_render_item(const safe_ptr<device_buffer>& buffer, image_transform::blend_mode::type blend_mode)\r
+ //{\r
+ // CASPAR_ASSERT(buffer->stride() == 4 && "Only used for bgra textures");\r
+\r
+ // pixel_format_desc desc;\r
+ // desc.pix_fmt = pixel_format::bgra;\r
+ // desc.planes.push_back(pixel_format_desc::plane(channel_.get_format_desc().width, channel_.get_format_desc().height, 4));\r
+\r
+ // std::vector<safe_ptr<device_buffer>> textures;\r
+ // textures.push_back(buffer);\r
+ // \r
+ // image_transform transform;\r
+ // transform.set_blend_mode(blend_mode);\r
+\r
+ // return render_item(desc, std::move(textures), transform, video_mode::progressive, nullptr); \r
+ //}\r
+\r
+ safe_ptr<device_buffer> create_device_buffer(size_t stride)\r
+ {\r
+ auto buffer = channel_.ogl().create_device_buffer(channel_.get_format_desc().width, channel_.get_format_desc().height, stride);\r
+ channel_.ogl().clear(*buffer);\r
+ return buffer;\r
+ }\r
+\r
+ safe_ptr<write_frame> create_frame(const void* tag, const core::pixel_format_desc& desc)\r
+ {\r
+ return make_safe<write_frame>(channel_.ogl(), tag, desc);\r
}\r
};\r
\r
-image_mixer::image_mixer(const core::video_format_desc& format_desc) : impl_(new implementation(format_desc)){}\r
-void image_mixer::begin(const core::basic_frame& frame){impl_->begin(frame);}\r
+image_mixer::image_mixer(video_channel_context& video_channel) : impl_(new implementation(video_channel)){}\r
+void image_mixer::begin(core::basic_frame& frame){impl_->begin(frame);}\r
void image_mixer::visit(core::write_frame& frame){impl_->visit(frame);}\r
void image_mixer::end(){impl_->end();}\r
-boost::unique_future<safe_ptr<const host_buffer>> image_mixer::begin_pass(){ return impl_->begin_pass();}\r
-void image_mixer::end_pass(){impl_->end_pass();}\r
-std::vector<safe_ptr<host_buffer>> image_mixer::create_buffers(const core::pixel_format_desc& format){return impl_->create_buffers(format);}\r
+boost::unique_future<safe_ptr<host_buffer>> image_mixer::render(){return impl_->render();}\r
+safe_ptr<write_frame> image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc){return impl_->create_frame(tag, desc);}\r
+void image_mixer::begin_layer(){impl_->begin_layer();}\r
+void image_mixer::end_layer(){impl_->end_layer();}\r
+image_mixer& image_mixer::operator=(image_mixer&& other)\r
+{\r
+ impl_ = std::move(other.impl_);\r
+ return *this;\r
+}\r
\r
}}
\ No newline at end of file