]> git.sesse.net Git - casparcg/blobdiff - core/mixer/mixer.cpp
2.0. audio: Audio pipeline is now in 32 bit.
[casparcg] / core / mixer / mixer.cpp
index fafcef02054e3a08d26de70114dd500fafe2ac40..95721d04400ab65464d8b8c6d86bd3e61ca9e552 100644 (file)
 #include <common/concurrency/executor.h>\r
 #include <common/utility/tweener.h>\r
 #include <common/env.h>\r
+#include <common/gl/gl_check.h>\r
 \r
 #include <core/mixer/read_frame.h>\r
 #include <core/mixer/write_frame.h>\r
 #include <core/producer/frame/basic_frame.h>\r
 #include <core/producer/frame/frame_factory.h>\r
 #include <core/producer/frame/pixel_format.h>\r
-#include <core/producer/frame/audio_transform.h>\r
-#include <core/producer/frame/image_transform.h>\r
+#include <core/producer/frame/frame_transform.h>\r
 \r
 #include <core/video_format.h>\r
 \r
@@ -93,13 +93,10 @@ struct mixer::implementation : boost::noncopyable
        audio_mixer     audio_mixer_;\r
        image_mixer image_mixer_;\r
        \r
-       typedef std::unordered_map<int, tweened_transform<core::image_transform>> image_transforms;\r
-       typedef std::unordered_map<int, tweened_transform<core::audio_transform>> audio_transforms;\r
+       std::unordered_map<int, tweened_transform<core::frame_transform>> transforms_;  \r
+       std::unordered_map<int, blend_mode::type> blend_modes_;\r
 \r
-       boost::fusion::map<boost::fusion::pair<core::image_transform, image_transforms>,\r
-                                       boost::fusion::pair<core::audio_transform, audio_transforms>> transforms_;\r
-       \r
-       std::queue<std::pair<boost::unique_future<safe_ptr<host_buffer>>, std::vector<int16_t>>> buffer_;\r
+       std::queue<std::pair<boost::unique_future<safe_ptr<host_buffer>>, std::vector<int32_t>>> buffer_;\r
        \r
        const size_t buffer_size_;\r
 \r
@@ -107,7 +104,7 @@ public:
        implementation(video_channel_context& video_channel) \r
                : channel_(video_channel)\r
                , audio_mixer_(channel_.get_format_desc())\r
-               , image_mixer_(channel_)\r
+               , image_mixer_(channel_.ogl(), channel_.get_format_desc())\r
                , buffer_size_(env::properties().get("configuration.producers.buffer-depth", 1))\r
        {       \r
                CASPAR_LOG(info) << print() << L" Successfully initialized . Buffer-depth: " << buffer_size_;   \r
@@ -117,124 +114,92 @@ public:
        {                       \r
                try\r
                {\r
-                       decltype(mix_image(frames)) image;\r
-                       decltype(mix_audio(frames)) audio;\r
+                       BOOST_FOREACH(auto& frame, frames)\r
+                       {\r
+                               auto blend_it = blend_modes_.find(frame.first);\r
+                               image_mixer_.begin_layer(blend_it != blend_modes_.end() ? blend_it->second : blend_mode::normal);\r
+                               \r
+                               auto frame1 = make_safe<core::basic_frame>(frame.second);\r
+                               frame1->get_frame_transform() = transforms_[frame.first].fetch_and_tick(1);\r
+\r
+                               if(channel_.get_format_desc().field_mode != core::field_mode::progressive)\r
+                               {                               \r
+                                       auto frame2 = make_safe<core::basic_frame>(frame.second);\r
+                                       frame2->get_frame_transform() = transforms_[frame.first].fetch_and_tick(1);\r
+                                       frame1 = core::basic_frame::interlace(frame1, frame2, channel_.get_format_desc().field_mode);\r
+                               }\r
+                                                                       \r
+                               frame1->accept(audio_mixer_);                                   \r
+                               frame1->accept(image_mixer_);\r
+\r
+                               image_mixer_.end_layer();\r
+                       }\r
 \r
-                       tbb::parallel_invoke(\r
-                                       [&]{image = mix_image(frames);}, \r
-                                       [&]{audio = mix_audio(frames);});\r
+                       auto image = image_mixer_.render();\r
+                       auto audio = audio_mixer_.mix();\r
                        \r
                        buffer_.push(std::make_pair(std::move(image), audio));\r
-               }\r
-               catch(...)\r
-               {\r
-                       channel_.ogl().gc().wait();\r
-                       image_mixer_ = image_mixer(channel_);\r
-                       audio_mixer_ = audio_mixer(channel_.get_format_desc());\r
-                       channel_.ogl().gc().wait();\r
 \r
-                       CASPAR_LOG_CURRENT_EXCEPTION();\r
-               }\r
-\r
-               if(buffer_.size() > buffer_size_)\r
-               {\r
+                       if(buffer_.size()-1 < buffer_size_)                     \r
+                               return make_safe<read_frame>();\r
+               \r
                        auto res = std::move(buffer_.front());\r
                        buffer_.pop();\r
-                       \r
-                       return make_safe<read_frame>(channel_.ogl(), channel_.get_format_desc().size, std::move(res.first.get()), std::move(res.second));\r
+\r
+                       return make_safe<read_frame>(channel_.ogl(), channel_.get_format_desc().size, std::move(res.first.get()), std::move(res.second));       \r
                }\r
-               \r
-               return make_safe<read_frame>();\r
+               catch(...)\r
+               {\r
+                       CASPAR_LOG(error) << L"[mixer] Error detected.";\r
+                       throw;\r
+               }                               \r
        }\r
-                       \r
+                                       \r
        safe_ptr<core::write_frame> create_frame(const void* tag, const core::pixel_format_desc& desc)\r
        {               \r
                return image_mixer_.create_frame(tag, desc);\r
        }\r
-\r
-       void reset_transforms()\r
-       {\r
-               channel_.execution().invoke([&]\r
-               {\r
-                       boost::fusion::at_key<image_transform>(transforms_).clear();\r
-                       boost::fusion::at_key<audio_transform>(transforms_).clear();\r
-               });\r
-       }\r
                \r
-       template<typename T>\r
-       void set_transform(int index, const T& transform, unsigned int mix_duration, const std::wstring& tween)\r
+       void set_transform(int index, const frame_transform& transform, unsigned int mix_duration, const std::wstring& tween)\r
        {\r
                channel_.execution().invoke([&]\r
                {\r
-                       auto& transforms = boost::fusion::at_key<T>(transforms_);\r
-\r
-                       auto src = transforms[index].fetch();\r
+                       auto src = transforms_[index].fetch();\r
                        auto dst = transform;\r
-                       transforms[index] = tweened_transform<T>(src, dst, mix_duration, tween);\r
-               });\r
+                       transforms_[index] = tweened_transform<frame_transform>(src, dst, mix_duration, tween);\r
+               }, high_priority);\r
        }\r
                                \r
-       template<typename T>\r
-       void apply_transform(int index, const std::function<T(T)>& transform, unsigned int mix_duration, const std::wstring& tween)\r
+       void apply_transform(int index, const std::function<frame_transform(frame_transform)>& transform, unsigned int mix_duration, const std::wstring& tween)\r
        {\r
                channel_.execution().invoke([&]\r
                {\r
-                       auto& transforms = boost::fusion::at_key<T>(transforms_);\r
-\r
-                       auto src = transforms[index].fetch();\r
+                       auto src = transforms_[index].fetch();\r
                        auto dst = transform(src);\r
-                       transforms[index] = tweened_transform<T>(src, dst, mix_duration, tween);\r
-               });\r
+                       transforms_[index] = tweened_transform<frame_transform>(src, dst, mix_duration, tween);\r
+               }, high_priority);\r
        }\r
-               \r
-       std::wstring print() const\r
+\r
+       void clear_transforms()\r
        {\r
-               return L"mixer";\r
+               channel_.execution().invoke([&]\r
+               {\r
+                       transforms_.clear();\r
+                       blend_modes_.clear();\r
+               }, high_priority);\r
        }\r
-\r
-private:\r
                \r
-       boost::unique_future<safe_ptr<host_buffer>> mix_image(std::map<int, safe_ptr<core::basic_frame>> frames)\r
-       {               \r
-               auto& image_transforms = boost::fusion::at_key<core::image_transform>(transforms_);\r
-               \r
-               BOOST_FOREACH(auto& frame, frames)\r
+       void set_blend_mode(int index, blend_mode::type value)\r
+       {\r
+               channel_.execution().invoke([&]\r
                {\r
-                       image_mixer_.begin_layer();\r
-\r
-                       auto frame1 = make_safe<core::basic_frame>(frame.second);\r
-                       frame1->get_image_transform() = image_transforms[frame.first].fetch_and_tick(1);\r
-                                               \r
-                       if(channel_.get_format_desc().mode != core::video_mode::progressive)\r
-                       {\r
-                               auto frame2 = make_safe<core::basic_frame>(frame.second);\r
-                               frame2->get_image_transform() = image_transforms[frame.first].fetch_and_tick(1);\r
-                               if(frame1->get_image_transform() != frame2->get_image_transform())\r
-                                       frame1 = core::basic_frame::interlace(frame1, frame2, channel_.get_format_desc().mode);\r
-                       }\r
-\r
-                       frame1->accept(image_mixer_);\r
-\r
-                       image_mixer_.end_layer();\r
-               }\r
-\r
-               return image_mixer_.render();\r
+                       blend_modes_[index] = value;\r
+               }, high_priority);\r
        }\r
 \r
-       std::vector<int16_t> mix_audio(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
+       std::wstring print() const\r
        {\r
-               auto& audio_transforms = boost::fusion::at_key<core::audio_transform>(transforms_);\r
-\r
-               BOOST_FOREACH(auto& frame, frames)\r
-               {\r
-                       const unsigned int num = channel_.get_format_desc().mode == core::video_mode::progressive ? 1 : 2;\r
-\r
-                       auto frame1 = make_safe<core::basic_frame>(frame.second);\r
-                       frame1->get_audio_transform() = audio_transforms[frame.first].fetch_and_tick(num);\r
-                       frame1->accept(audio_mixer_);\r
-               }\r
-\r
-               return audio_mixer_.mix();\r
+               return L"mixer";\r
        }\r
 };\r
        \r
@@ -250,10 +215,8 @@ safe_ptr<core::write_frame> mixer::create_frame(const void* tag, size_t width, s
        desc.planes.push_back( core::pixel_format_desc::plane(width, height, 4));\r
        return create_frame(tag, desc);\r
 }\r
-void mixer::reset_transforms(){impl_->reset_transforms();}\r
-void mixer::set_image_transform(int index, const core::image_transform& transform, unsigned int mix_duration, const std::wstring& tween){impl_->set_transform<core::image_transform>(index, transform, mix_duration, tween);}\r
-void mixer::set_audio_transform(int index, const core::audio_transform& transform, unsigned int mix_duration, const std::wstring& tween){impl_->set_transform<core::audio_transform>(index, transform, mix_duration, tween);}\r
-void mixer::apply_image_transform(int index, const std::function<core::image_transform(core::image_transform)>& transform, unsigned int mix_duration, const std::wstring& tween){impl_->apply_transform<core::image_transform>(index, transform, mix_duration, tween);}\r
-void mixer::apply_audio_transform(int index, const std::function<core::audio_transform(core::audio_transform)>& transform, unsigned int mix_duration, const std::wstring& tween){impl_->apply_transform<core::audio_transform>(index, transform, mix_duration, tween);}\r
-\r
+void mixer::set_frame_transform(int index, const core::frame_transform& transform, unsigned int mix_duration, const std::wstring& tween){impl_->set_transform(index, transform, mix_duration, tween);}\r
+void mixer::apply_frame_transform(int index, const std::function<core::frame_transform(core::frame_transform)>& transform, unsigned int mix_duration, const std::wstring& tween){impl_->apply_transform(index, transform, mix_duration, tween);}\r
+void mixer::clear_transforms(){impl_->clear_transforms();}\r
+void mixer::set_blend_mode(int index, blend_mode::type value){impl_->set_blend_mode(index, value);}\r
 }}
\ No newline at end of file