]> git.sesse.net Git - casparcg/blobdiff - core/mixer/mixer.cpp
git-svn-id: https://casparcg.svn.sourceforge.net/svnroot/casparcg/server/branches...
[casparcg] / core / mixer / mixer.cpp
index 3445a5b798eab56d60854747ddda9fe76f7d8ddc..2b9768c858dc4561e3c45c2f625882b587ea1e83 100644 (file)
 #include "audio/audio_mixer.h"\r
 #include "image/image_mixer.h"\r
 \r
-#include "../video_channel_context.h"\r
-\r
 #include <common/exception/exceptions.h>\r
 #include <common/concurrency/executor.h>\r
 #include <common/utility/tweener.h>\r
+#include <common/env.h>\r
+#include <common/gl/gl_check.h>\r
 \r
 #include <core/mixer/read_frame.h>\r
 #include <core/mixer/write_frame.h>\r
 #include <core/producer/frame/basic_frame.h>\r
 #include <core/producer/frame/frame_factory.h>\r
 #include <core/producer/frame/pixel_format.h>\r
-#include <core/producer/frame/audio_transform.h>\r
-#include <core/producer/frame/image_transform.h>\r
+#include <core/producer/frame/frame_transform.h>\r
 \r
 #include <core/video_format.h>\r
 \r
 #include <boost/fusion/container/map.hpp>\r
 #include <boost/fusion/include/at_key.hpp>\r
-\r
-#include <tbb/parallel_invoke.h>\r
+#include <boost/foreach.hpp>\r
 \r
 #include <unordered_map>\r
 \r
+using namespace Concurrency;\r
+\r
 namespace caspar { namespace core {\r
                \r
 template<typename T>\r
@@ -86,158 +86,121 @@ public:
 \r
 struct mixer::implementation : boost::noncopyable\r
 {              \r
-       video_channel_context& channel_;\r
+       const video_format_desc format_desc_;\r
+       ogl_device&                             ogl_;\r
        \r
        audio_mixer     audio_mixer_;\r
        image_mixer image_mixer_;\r
        \r
-       typedef std::unordered_map<int, tweened_transform<core::image_transform>> image_transforms;\r
-       typedef std::unordered_map<int, tweened_transform<core::audio_transform>> audio_transforms;\r
-\r
-       boost::fusion::map<boost::fusion::pair<core::image_transform, image_transforms>,\r
-                                       boost::fusion::pair<core::audio_transform, audio_transforms>> transforms_;\r
-       \r
+       std::unordered_map<int, tweened_transform<core::frame_transform>> transforms_;  \r
+       std::unordered_map<int, blend_mode::type> blend_modes_;\r
+               \r
+       critical_section                        mutex_;\r
+       Concurrency::transformer<safe_ptr<message<std::map<int, safe_ptr<basic_frame>>>>, \r
+                                                        safe_ptr<message<safe_ptr<core::read_frame>>>> mixer_;\r
 public:\r
-       implementation(video_channel_context& video_channel) \r
-               : channel_(video_channel)\r
-               , audio_mixer_(channel_.get_format_desc())\r
-               , image_mixer_(channel_)\r
+       implementation(mixer::source_t& source, mixer::target_t& target, const video_format_desc& format_desc, ogl_device& ogl) \r
+               : format_desc_(format_desc)\r
+               , ogl_(ogl)\r
+               , audio_mixer_(format_desc)\r
+               , image_mixer_(ogl, format_desc)\r
+               , mixer_(std::bind(&implementation::mix, this, std::placeholders::_1), &target)\r
        {       \r
                CASPAR_LOG(info) << print() << L" Successfully initialized.";   \r
+               source.link_target(&mixer_);\r
        }\r
-                       \r
-       safe_ptr<read_frame> execute(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
-       {                       \r
-               try\r
+               \r
+       safe_ptr<message<safe_ptr<core::read_frame>>> mix(const safe_ptr<message<std::map<int, safe_ptr<basic_frame>>>>& msg)\r
+       {               \r
+               auto frames = msg->value();\r
+\r
                {\r
-                       decltype(mix_image(frames)) image;\r
-                       decltype(mix_audio(frames)) audio;\r
+                       critical_section::scoped_lock lock(mutex_);\r
 \r
-                       tbb::parallel_invoke(\r
-                                       [&]{image = mix_image(frames);}, \r
-                                       [&]{audio = mix_audio(frames);});\r
-                       \r
-                       return make_safe<read_frame>(channel_.ogl(), std::move(image.get()), std::move(audio));\r
+                       BOOST_FOREACH(auto& frame, frames)\r
+                       {\r
+                               auto blend_it = blend_modes_.find(frame.first);\r
+                               image_mixer_.begin_layer(blend_it != blend_modes_.end() ? blend_it->second : blend_mode::normal);\r
+                               \r
+                               auto frame1 = make_safe<core::basic_frame>(frame.second);\r
+                               frame1->get_frame_transform() = transforms_[frame.first].fetch_and_tick(1);\r
+\r
+                               if(format_desc_.field_mode != core::field_mode::progressive)\r
+                               {                               \r
+                                       auto frame2 = make_safe<core::basic_frame>(frame.second);\r
+                                       frame2->get_frame_transform() = transforms_[frame.first].fetch_and_tick(1);\r
+                                       frame1 = core::basic_frame::interlace(frame1, frame2, format_desc_.field_mode);\r
+                               }\r
+                                                                       \r
+                               frame1->accept(audio_mixer_);                                   \r
+                               frame1->accept(image_mixer_);\r
+\r
+                               image_mixer_.end_layer();\r
+                       }\r
                }\r
-               catch(...)\r
-               {\r
-                       channel_.ogl().gc().wait();\r
-                       image_mixer_ = image_mixer(channel_);\r
-                       audio_mixer_ = audio_mixer(channel_.get_format_desc());\r
-                       channel_.ogl().gc().wait();\r
 \r
-                       CASPAR_LOG_CURRENT_EXCEPTION();\r
-                       return make_safe<read_frame>();\r
+               auto image = image_mixer_.render();\r
+               auto audio = audio_mixer_.mix();\r
+                       \r
+               {\r
+                       scoped_oversubcription_token oversubscribe;\r
+                       image.wait();\r
                }\r
+\r
+               auto frame = make_safe<read_frame>(ogl_, format_desc_.size, std::move(image.get()), std::move(audio));\r
+\r
+               return msg->transfer<safe_ptr<core::read_frame>>(std::move(frame));     \r
        }\r
-                       \r
-       safe_ptr<core::write_frame> create_frame(const void* tag, const core::pixel_format_desc& desc)\r
+                                               \r
+       boost::unique_future<safe_ptr<core::write_frame>> async_create_frame(const void* tag, const core::pixel_format_desc& desc)\r
        {               \r
-               return image_mixer_.create_frame(tag, desc);\r
-       }\r
-\r
-       void reset_transforms()\r
-       {\r
-               channel_.execution().invoke([&]\r
-               {\r
-                       boost::fusion::at_key<image_transform>(transforms_).clear();\r
-                       boost::fusion::at_key<audio_transform>(transforms_).clear();\r
-               });\r
+               return image_mixer_.async_create_frame(tag, desc);\r
        }\r
                \r
-       template<typename T>\r
-       void set_transform(int index, const T& transform, unsigned int mix_duration, const std::wstring& tween)\r
+       void set_transform(int index, const frame_transform& transform, unsigned int mix_duration, const std::wstring& tween)\r
        {\r
-               channel_.execution().invoke([&]\r
-               {\r
-                       auto& transforms = boost::fusion::at_key<T>(transforms_);\r
+               critical_section::scoped_lock lock(mutex_);\r
 \r
-                       auto src = transforms[index].fetch();\r
-                       auto dst = transform;\r
-                       transforms[index] = tweened_transform<T>(src, dst, mix_duration, tween);\r
-               });\r
+               auto src = transforms_[index].fetch();\r
+               auto dst = transform;\r
+               transforms_[index] = tweened_transform<frame_transform>(src, dst, mix_duration, tween);\r
        }\r
                                \r
-       template<typename T>\r
-       void apply_transform(int index, const std::function<T(T)>& transform, unsigned int mix_duration, const std::wstring& tween)\r
+       void apply_transform(int index, const std::function<frame_transform(frame_transform)>& transform, unsigned int mix_duration, const std::wstring& tween)\r
        {\r
-               channel_.execution().invoke([&]\r
-               {\r
-                       auto& transforms = boost::fusion::at_key<T>(transforms_);\r
+               critical_section::scoped_lock lock(mutex_);\r
 \r
-                       auto src = transforms[index].fetch();\r
-                       auto dst = transform(src);\r
-                       transforms[index] = tweened_transform<T>(src, dst, mix_duration, tween);\r
-               });\r
+               auto src = transforms_[index].fetch();\r
+               auto dst = transform(src);\r
+               transforms_[index] = tweened_transform<frame_transform>(src, dst, mix_duration, tween);\r
        }\r
-               \r
-       std::wstring print() const\r
+\r
+       void clear_transforms()\r
        {\r
-               return L"mixer";\r
-       }\r
+               critical_section::scoped_lock lock(mutex_);\r
 \r
-private:\r
-               \r
-       boost::unique_future<safe_ptr<host_buffer>> mix_image(std::map<int, safe_ptr<core::basic_frame>> frames)\r
-       {               \r
-               auto& image_transforms = boost::fusion::at_key<core::image_transform>(transforms_);\r
+               transforms_.clear();\r
+               blend_modes_.clear();\r
+       }\r
                \r
-               BOOST_FOREACH(auto& frame, frames)\r
-               {\r
-                       image_mixer_.begin_layer();\r
-\r
-                       auto frame1 = make_safe<core::basic_frame>(frame.second);\r
-                       frame1->get_image_transform() = image_transforms[frame.first].fetch_and_tick(1);\r
-                                               \r
-                       if(channel_.get_format_desc().mode != core::video_mode::progressive)\r
-                       {\r
-                               auto frame2 = make_safe<core::basic_frame>(frame.second);\r
-                               frame2->get_image_transform() = image_transforms[frame.first].fetch_and_tick(1);\r
-                               if(frame1->get_image_transform() != frame2->get_image_transform())\r
-                                       frame1 = core::basic_frame::interlace(frame1, frame2, channel_.get_format_desc().mode);\r
-                       }\r
-\r
-                       frame1->accept(image_mixer_);\r
-\r
-                       image_mixer_.end_layer();\r
-               }\r
+       void set_blend_mode(int index, blend_mode::type value)\r
+       {\r
+               critical_section::scoped_lock lock(mutex_);\r
 \r
-               return image_mixer_.render();\r
+               blend_modes_[index] = value;\r
        }\r
 \r
-       std::vector<int16_t> mix_audio(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
+       std::wstring print() const\r
        {\r
-               auto& audio_transforms = boost::fusion::at_key<core::audio_transform>(transforms_);\r
-\r
-               BOOST_FOREACH(auto& frame, frames)\r
-               {\r
-                       const unsigned int num = channel_.get_format_desc().mode == core::video_mode::progressive ? 1 : 2;\r
-\r
-                       auto frame1 = make_safe<core::basic_frame>(frame.second);\r
-                       frame1->get_audio_transform() = audio_transforms[frame.first].fetch_and_tick(num);\r
-                       frame1->accept(audio_mixer_);\r
-               }\r
-\r
-               return audio_mixer_.mix();\r
+               return L"mixer";\r
        }\r
 };\r
        \r
-mixer::mixer(video_channel_context& video_channel) : impl_(new implementation(video_channel)){}\r
-safe_ptr<core::read_frame> mixer::execute(const std::map<int, safe_ptr<core::basic_frame>>& frames){ return impl_->execute(frames);}\r
-core::video_format_desc mixer::get_video_format_desc() const { return impl_->channel_.get_format_desc(); }\r
-safe_ptr<core::write_frame> mixer::create_frame(const void* tag, const core::pixel_format_desc& desc){ return impl_->create_frame(tag, desc); }                \r
-safe_ptr<core::write_frame> mixer::create_frame(const void* tag, size_t width, size_t height, core::pixel_format::type pix_fmt)\r
-{\r
-       // Create bgra frame\r
-       core::pixel_format_desc desc;\r
-       desc.pix_fmt = pix_fmt;\r
-       desc.planes.push_back( core::pixel_format_desc::plane(width, height, 4));\r
-       return create_frame(tag, desc);\r
-}\r
-void mixer::reset_transforms(){impl_->reset_transforms();}\r
-void mixer::set_image_transform(int index, const core::image_transform& transform, unsigned int mix_duration, const std::wstring& tween){impl_->set_transform<core::image_transform>(index, transform, mix_duration, tween);}\r
-void mixer::set_audio_transform(int index, const core::audio_transform& transform, unsigned int mix_duration, const std::wstring& tween){impl_->set_transform<core::audio_transform>(index, transform, mix_duration, tween);}\r
-void mixer::apply_image_transform(int index, const std::function<core::image_transform(core::image_transform)>& transform, unsigned int mix_duration, const std::wstring& tween){impl_->apply_transform<core::image_transform>(index, transform, mix_duration, tween);}\r
-void mixer::apply_audio_transform(int index, const std::function<core::audio_transform(core::audio_transform)>& transform, unsigned int mix_duration, const std::wstring& tween){impl_->apply_transform<core::audio_transform>(index, transform, mix_duration, tween);}\r
-\r
+mixer::mixer(mixer::source_t& source, mixer::target_t& target, const video_format_desc& format_desc, ogl_device& ogl) : impl_(new implementation(source, target, format_desc, ogl)){}\r
+core::video_format_desc mixer::get_video_format_desc() const { return impl_->format_desc_; }\r
+boost::unique_future<safe_ptr<write_frame>> mixer::async_create_frame(const void* video_stream_tag, const pixel_format_desc& desc){ return impl_->async_create_frame(video_stream_tag, desc); }                        \r
+void mixer::set_frame_transform(int index, const core::frame_transform& transform, unsigned int mix_duration, const std::wstring& tween){impl_->set_transform(index, transform, mix_duration, tween);}\r
+void mixer::apply_frame_transform(int index, const std::function<core::frame_transform(core::frame_transform)>& transform, unsigned int mix_duration, const std::wstring& tween){impl_->apply_transform(index, transform, mix_duration, tween);}\r
+void mixer::clear_transforms(){impl_->clear_transforms();}\r
+void mixer::set_blend_mode(int index, blend_mode::type value){impl_->set_blend_mode(index, value);}\r
 }}
\ No newline at end of file