]> git.sesse.net Git - casparcg/blobdiff - core/mixer/image/image_mixer.cpp
2.0. shell: Fixed icon.
[casparcg] / core / mixer / image / image_mixer.cpp
index 5aa6a2bee3f44d4d16c7da6905d882ae6d97bb4a..cc4da64ba883b44a99f50b34093a822d9b442b50 100644 (file)
-#include "../../StdAfx.h"\r
+/*\r
+* copyright (c) 2010 Sveriges Television AB <info@casparcg.com>\r
+*\r
+*  This file is part of CasparCG.\r
+*\r
+*    CasparCG is free software: you can redistribute it and/or modify\r
+*    it under the terms of the GNU General Public License as published by\r
+*    the Free Software Foundation, either version 3 of the License, or\r
+*    (at your option) any later version.\r
+*\r
+*    CasparCG is distributed in the hope that it will be useful,\r
+*    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+*    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
+*    GNU General Public License for more details.\r
+\r
+*    You should have received a copy of the GNU General Public License\r
+*    along with CasparCG.  If not, see <http://www.gnu.org/licenses/>.\r
+*\r
+*/\r
+#include "../../stdafx.h"\r
 \r
 #include "image_mixer.h"\r
 #include "image_kernel.h"\r
-#include "image_transform.h"\r
 \r
 #include "../gpu/ogl_device.h"\r
 #include "../gpu/host_buffer.h"\r
 #include "../gpu/device_buffer.h"\r
+#include "../write_frame.h"\r
+\r
+#include "../../video_channel_context.h"\r
 \r
+#include <common/concurrency/executor.h>\r
 #include <common/exception/exceptions.h>\r
 #include <common/gl/gl_check.h>\r
-#include <common/concurrency/executor.h>\r
 \r
-#include <Glee.h>\r
-#include <SFML/Window/Context.hpp>\r
+#include <core/producer/frame/image_transform.h>\r
+#include <core/producer/frame/pixel_format.h>\r
+#include <core/video_format.h>\r
+\r
+#include <gl/glew.h>\r
+\r
+#include <boost/foreach.hpp>\r
+#include <boost/range.hpp>\r
+#include <boost/range/algorithm/find.hpp>\r
+\r
+#include <algorithm>\r
+#include <array>\r
+#include <deque>\r
 #include <unordered_map>\r
 \r
 namespace caspar { namespace core {\r
                \r
 struct image_mixer::implementation : boost::noncopyable\r
-{                      \r
-       const video_format_desc format_desc_;\r
-       \r
-       std::stack<image_transform> transform_stack_;\r
-\r
-       GLuint fbo_;\r
-       std::array<std::shared_ptr<device_buffer>, 2> render_targets_;\r
+{              \r
+       typedef std::deque<render_item>                 layer;\r
 \r
-       std::shared_ptr<host_buffer> reading_;\r
+       video_channel_context&                                  channel_;\r
 \r
-       image_kernel kernel_;\r
+       std::vector<image_transform>                    transform_stack_;\r
+       std::vector<video_mode::type>                   video_mode_stack_;\r
+       std::stack<blend_mode::type>                    blend_mode_stack_;\r
 \r
-       safe_ptr<ogl_device> context_;\r
+       std::deque<std::deque<render_item>>             layers_; // layer/stream/items\r
+       \r
+       image_kernel                                                    kernel_;                \r
 \r
+       std::shared_ptr<device_buffer>                  active_buffer_;\r
 public:\r
-       implementation(const video_format_desc& format_desc) \r
-               : format_desc_(format_desc)\r
-               , context_(ogl_device::create())\r
+       implementation(video_channel_context& video_channel) \r
+               : channel_(video_channel)\r
+               , transform_stack_(1)\r
+               , video_mode_stack_(1, video_mode::progressive)\r
        {\r
-               context_->begin_invoke([=]\r
-               {\r
-                       transform_stack_.push(image_transform());\r
-                       transform_stack_.top().set_mode(video_mode::progressive);\r
+       }\r
 \r
-                       GL(glEnable(GL_TEXTURE_2D));\r
-                       GL(glEnable(GL_STENCIL_TEST));\r
-                       GL(glEnable(GL_SCISSOR_TEST));\r
-                       GL(glDisable(GL_DEPTH_TEST));           \r
+       ~implementation()\r
+       {\r
+               channel_.ogl().gc();\r
+       }\r
+       \r
+       void begin(core::basic_frame& frame)\r
+       {\r
+               transform_stack_.push_back(transform_stack_.back()*frame.get_image_transform());\r
+               video_mode_stack_.push_back(frame.get_mode() == video_mode::progressive ? video_mode_stack_.back() : frame.get_mode());\r
+       }\r
+               \r
+       void visit(core::write_frame& frame)\r
+       {       \r
+               CASPAR_ASSERT(!layers_.empty());\r
 \r
-                       render_targets_[0] = context_->create_device_buffer(format_desc.width, format_desc.height, 4);\r
-                       render_targets_[1] = context_->create_device_buffer(format_desc.width, format_desc.height, 4);\r
-                       \r
-                       GL(glGenFramebuffers(1, &fbo_));                \r
-                       GL(glBindFramebuffer(GL_FRAMEBUFFER_EXT, fbo_));\r
-                       GL(glReadBuffer(GL_COLOR_ATTACHMENT0_EXT));\r
+               // Check if frame has been discarded by interlacing\r
+               if(boost::range::find(video_mode_stack_, video_mode::upper) != video_mode_stack_.end() && boost::range::find(video_mode_stack_, video_mode::lower) != video_mode_stack_.end())\r
+                       return;\r
+               \r
+               core::render_item item;\r
+               item.pix_desc           = frame.get_pixel_format_desc();\r
+               item.textures           = frame.get_textures();\r
+               item.transform          = transform_stack_.back();\r
+               item.mode                       = video_mode_stack_.back();\r
+               item.tag                        = frame.tag();\r
+               item.blend_mode         = blend_mode_stack_.top();      \r
 \r
-                       reading_ = context_->create_host_buffer(format_desc_.size, host_buffer::read_only);\r
-               });\r
+               auto& layer = layers_.back();\r
+               if(boost::range::find(layer, item) == layer.end())\r
+                       layer.push_back(item);\r
        }\r
 \r
-       ~implementation()\r
+       void end()\r
        {\r
-               glDeleteFramebuffersEXT(1, &fbo_);\r
+               transform_stack_.pop_back();\r
+               video_mode_stack_.pop_back();\r
        }\r
 \r
-       void begin(const image_transform& transform)\r
+       void begin_layer(blend_mode::type blend_mode)\r
        {\r
-               transform_stack_.push(transform_stack_.top()*transform);\r
+               blend_mode_stack_.push(blend_mode);\r
+               layers_.push_back(layer());\r
        }\r
-               \r
-       void render(const pixel_format_desc& desc, std::vector<safe_ptr<host_buffer>>& buffers)\r
+\r
+       void end_layer()\r
        {\r
-               auto transform = transform_stack_.top();\r
-               context_->begin_invoke([=]\r
+               blend_mode_stack_.pop();\r
+       }\r
+       \r
+       boost::unique_future<safe_ptr<host_buffer>> render()\r
+       {               \r
+               auto layers = std::move(layers_);\r
+               return channel_.ogl().begin_invoke([=]() mutable\r
                {\r
-                       GL(glColor4d(1.0, 1.0, 1.0, transform.get_opacity()));\r
-                       GL(glViewport(0, 0, format_desc_.width, format_desc_.height));\r
-                       kernel_.apply(desc.pix_fmt, transform);\r
+                       return render(std::move(layers));\r
+               });\r
+       }\r
+       \r
+       safe_ptr<host_buffer> render(std::deque<layer>&& layers)\r
+       {\r
+               std::shared_ptr<device_buffer> layer_key_buffer;\r
 \r
-                       std::vector<safe_ptr<device_buffer>> device_buffers;\r
-                       for(size_t n = 0; n < buffers.size(); ++n)\r
-                       {\r
-                               auto texture = context_->create_device_buffer(desc.planes[n].width, desc.planes[n].height, desc.planes[n].channels);\r
-                               texture->read(*buffers[n]);\r
-                               device_buffers.push_back(texture);\r
-                       }\r
+               auto draw_buffer = create_device_buffer(4);\r
+                               \r
+               BOOST_FOREACH(auto& layer, layers)\r
+                       draw(std::move(layer), draw_buffer, layer_key_buffer);\r
+               \r
+               auto host_buffer = channel_.ogl().create_host_buffer(channel_.get_format_desc().size, host_buffer::read_only);\r
+               channel_.ogl().attach(*draw_buffer);\r
+               host_buffer->begin_read(draw_buffer->width(), draw_buffer->height(), format(draw_buffer->stride()));\r
+               \r
+               active_buffer_ = draw_buffer;\r
+\r
+               channel_.ogl().flush(); // NOTE: This is important, otherwise fences will deadlock.\r
+                       \r
+               return host_buffer;\r
+       }\r
+\r
+       // TODO: We might have more overlaps for opacity transitions\r
+       // TODO: What about blending modes, are they ok? Maybe only overlap detection is required for opacity?\r
+       void draw(layer&& layer, const safe_ptr<device_buffer>& draw_buffer, std::shared_ptr<device_buffer>& layer_key_buffer)\r
+       {                               \r
+               if(layer.empty())\r
+                       return;\r
 \r
-                       for(size_t n = 0; n < buffers.size(); ++n)\r
+               std::pair<int, std::shared_ptr<device_buffer>> local_key_buffer;\r
+                               \r
+               if(has_overlapping_items(layer, layer.front().blend_mode))\r
+               {\r
+                       auto local_draw_buffer = create_device_buffer(4);       \r
+                       auto local_blend_mode = layer.front().blend_mode;\r
+\r
+                       int fields = 0;\r
+                       BOOST_FOREACH(auto& item, layer)\r
                        {\r
-                               glActiveTexture(GL_TEXTURE0+n);\r
-                               device_buffers[n]->bind();\r
+                               if(fields & item.mode)\r
+                                       item.blend_mode = blend_mode::normal; // Disable blending, it will be used when merging back into render stack.\r
+                               else\r
+                               {\r
+                                       item.blend_mode = blend_mode::replace; // Target field is empty, no blending, just copy\r
+                                       fields |= item.mode;\r
+                               }\r
+\r
+                               draw_item(std::move(item), local_draw_buffer, local_key_buffer, layer_key_buffer);              \r
                        }\r
+                       \r
+                       render_item item;\r
+                       item.pix_desc.pix_fmt = pixel_format::bgra;\r
+                       item.pix_desc.planes.push_back(pixel_format_desc::plane(channel_.get_format_desc().width, channel_.get_format_desc().height, 4));\r
+                       item.textures.push_back(local_draw_buffer);\r
+                       item.blend_mode = local_blend_mode;\r
 \r
-                       auto m_p = transform.get_mask_translation();\r
-                       auto m_s = transform.get_mask_scale();\r
-                       double w = static_cast<double>(format_desc_.width);\r
-                       double h = static_cast<double>(format_desc_.height);\r
+                       kernel_.draw(channel_.ogl(), std::move(item), draw_buffer, nullptr, nullptr);\r
+               }\r
+               else // fast path\r
+               {\r
+                       BOOST_FOREACH(auto& item, layer)                \r
+                               draw_item(std::move(item), draw_buffer, local_key_buffer, layer_key_buffer);            \r
+               }                                       \r
 \r
-                       GL(glScissor(static_cast<size_t>(m_p[0]*w), static_cast<size_t>(m_p[1]*h), static_cast<size_t>(m_s[0]*w), static_cast<size_t>(m_s[1]*h)));\r
-                       \r
-                       auto f_p = transform.get_image_translation();\r
-                       auto f_s = transform.get_image_scale();\r
-                       \r
-                       glBegin(GL_QUADS);\r
-                               glTexCoord2d(0.0, 0.0); glVertex2d( f_p[0]        *2.0-1.0,      f_p[1]        *2.0-1.0);\r
-                               glTexCoord2d(1.0, 0.0); glVertex2d((f_p[0]+f_s[0])*2.0-1.0,  f_p[1]        *2.0-1.0);\r
-                               glTexCoord2d(1.0, 1.0); glVertex2d((f_p[0]+f_s[0])*2.0-1.0, (f_p[1]+f_s[1])*2.0-1.0);\r
-                               glTexCoord2d(0.0, 1.0); glVertex2d( f_p[0]        *2.0-1.0, (f_p[1]+f_s[1])*2.0-1.0);\r
-                       glEnd();\r
-\r
-                       GL(glScissor(0, 0, format_desc_.width, format_desc_.height));\r
-               });\r
-       }\r
+               CASPAR_ASSERT(local_key_buffer.first == 0 || local_key_buffer.first == core::video_mode::progressive);\r
 \r
-       void end()\r
-       {\r
-               transform_stack_.pop();\r
+               std::swap(local_key_buffer.second, layer_key_buffer);\r
        }\r
 \r
-       boost::unique_future<safe_ptr<const host_buffer>> begin_pass()\r
-       {\r
-               return context_->begin_invoke([=]() -> safe_ptr<const host_buffer>\r
+       void draw_item(render_item&&                                                                    item, \r
+                                  const safe_ptr<device_buffer>&                                       draw_buffer, \r
+                                  std::pair<int, std::shared_ptr<device_buffer>>&      local_key_buffer, \r
+                                  std::shared_ptr<device_buffer>&                                      layer_key_buffer)\r
+       {                                                                                       \r
+               if(item.transform.get_is_key())\r
                {\r
-                       reading_->map();\r
-                       render_targets_[0]->attach(0);\r
-                       GL(glClear(GL_COLOR_BUFFER_BIT));\r
-                       return safe_ptr<const host_buffer>(reading_);\r
-               });\r
+                       if(!local_key_buffer.second)\r
+                       {\r
+                               local_key_buffer.first = 0;\r
+                               local_key_buffer.second = create_device_buffer(1);\r
+                       }\r
+                       \r
+                       local_key_buffer.first |= item.mode; // Add field to flag.\r
+                       kernel_.draw(channel_.ogl(), std::move(item), make_safe(local_key_buffer.second), nullptr, nullptr);\r
+               }\r
+               else\r
+               {\r
+                       kernel_.draw(channel_.ogl(), std::move(item), draw_buffer, local_key_buffer.second, layer_key_buffer);\r
+                       local_key_buffer.first ^= item.mode; // Remove field from flag.\r
+                       \r
+                       if(local_key_buffer.first == 0) // If all fields from key has been used, reset it\r
+                       {\r
+                               local_key_buffer.first = 0;\r
+                               local_key_buffer.second.reset();\r
+                       }\r
+               }\r
        }\r
 \r
-       void end_pass()\r
+       //// TODO: Optimize\r
+       bool has_overlapping_items(const layer& layer, blend_mode::type blend_mode)\r
        {\r
-               context_->begin_invoke([=]\r
+               if(layer.size() < 2)\r
+                       return false;   \r
+               \r
+               if(blend_mode == blend_mode::normal)\r
+                       return false;\r
+                               \r
+               return std::any_of(layer.begin(), layer.end(), [&](const render_item& item)\r
                {\r
-                       reading_ = context_->create_host_buffer(format_desc_.size, host_buffer::read_only);\r
-                       render_targets_[0]->write(*reading_);\r
-                       std::rotate(render_targets_.begin(), render_targets_.begin() + 1, render_targets_.end());\r
+                       return item.tag != layer.front().tag;\r
                });\r
-       }\r
+\r
+               //std::copy_if(layer.begin(), layer.end(), std::back_inserter(fill), [&](const render_item& item)\r
+               //{\r
+               //      return !item.transform.get_is_key();\r
+               //});\r
+               //      \r
+               //if(blend_mode == blend_mode::normal) // only overlap if opacity\r
+               //{\r
+               //      return std::any_of(fill.begin(), fill.end(), [&](const render_item& item)\r
+               //      {\r
+               //              return item.transform.get_opacity() < 1.0 - 0.001;\r
+               //      });\r
+               //}\r
+\r
+               //// simple solution, just check if we have differnt video streams / tags.\r
+               //return std::any_of(fill.begin(), fill.end(), [&](const render_item& item)\r
+               //{\r
+               //      return item.tag != fill.front().tag;\r
+               //});\r
+       }                       \r
                \r
-       std::vector<safe_ptr<host_buffer>> create_buffers(const pixel_format_desc& format)\r
+       safe_ptr<device_buffer> create_device_buffer(size_t stride)\r
        {\r
-               std::vector<safe_ptr<host_buffer>> buffers;\r
-               std::transform(format.planes.begin(), format.planes.end(), std::back_inserter(buffers), [&](const pixel_format_desc::plane& plane) -> safe_ptr<host_buffer>\r
-               {\r
-                       return context_->create_host_buffer(plane.size, host_buffer::write_only);\r
-               });\r
-               return buffers;\r
+               auto buffer = channel_.ogl().create_device_buffer(channel_.get_format_desc().width, channel_.get_format_desc().height, stride);\r
+               channel_.ogl().clear(*buffer);\r
+               return buffer;\r
+       }\r
+\r
+       safe_ptr<write_frame> create_frame(const void* tag, const core::pixel_format_desc& desc)\r
+       {\r
+               return make_safe<write_frame>(channel_.ogl(), tag, desc);\r
        }\r
 };\r
 \r
-image_mixer::image_mixer(const video_format_desc& format_desc) : impl_(new implementation(format_desc)){}\r
-void image_mixer::begin(const image_transform& transform) {    impl_->begin(transform);}\r
-void image_mixer::process(const pixel_format_desc& desc, std::vector<safe_ptr<host_buffer>>& buffers){ impl_->render(desc, buffers);}\r
+image_mixer::image_mixer(video_channel_context& video_channel) : impl_(new implementation(video_channel)){}\r
+void image_mixer::begin(core::basic_frame& frame){impl_->begin(frame);}\r
+void image_mixer::visit(core::write_frame& frame){impl_->visit(frame);}\r
 void image_mixer::end(){impl_->end();}\r
-boost::unique_future<safe_ptr<const host_buffer>> image_mixer::begin_pass(){   return impl_->begin_pass();}\r
-void image_mixer::end_pass(){impl_->end_pass();}\r
-std::vector<safe_ptr<host_buffer>> image_mixer::create_buffers(const pixel_format_desc& format){return impl_->create_buffers(format);}\r
+boost::unique_future<safe_ptr<host_buffer>> image_mixer::render(){return impl_->render();}\r
+safe_ptr<write_frame> image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc){return impl_->create_frame(tag, desc);}\r
+void image_mixer::begin_layer(blend_mode::type blend_mode){impl_->begin_layer(blend_mode);}\r
+void image_mixer::end_layer(){impl_->end_layer();}\r
+image_mixer& image_mixer::operator=(image_mixer&& other)\r
+{\r
+       impl_ = std::move(other.impl_);\r
+       return *this;\r
+}\r
 \r
 }}
\ No newline at end of file