]> git.sesse.net Git - casparcg/blobdiff - core/mixer/mixer.cpp
2.1.0: Seal of appropriate classes.
[casparcg] / core / mixer / mixer.cpp
index f539fba1d25657ba5b925ecad74f209ac7bba133..6751375e648a5a79f941f95a50d9444fe4055593 100644 (file)
@@ -1,22 +1,24 @@
 /*\r
-* copyright (c) 2010 Sveriges Television AB <info@casparcg.com>\r
+* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>\r
 *\r
-*  This file is part of CasparCG.\r
+* This file is part of CasparCG (www.casparcg.com).\r
 *\r
-*    CasparCG is free software: you can redistribute it and/or modify\r
-*    it under the terms of the GNU General Public License as published by\r
-*    the Free Software Foundation, either version 3 of the License, or\r
-*    (at your option) any later version.\r
+* CasparCG is free software: you can redistribute it and/or modify\r
+* it under the terms of the GNU General Public License as published by\r
+* the Free Software Foundation, either version 3 of the License, or\r
+* (at your option) any later version.\r
 *\r
-*    CasparCG is distributed in the hope that it will be useful,\r
-*    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-*    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
-*    GNU General Public License for more details.\r
-\r
-*    You should have received a copy of the GNU General Public License\r
-*    along with CasparCG.  If not, see <http://www.gnu.org/licenses/>.\r
+* CasparCG is distributed in the hope that it will be useful,\r
+* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
+* GNU General Public License for more details.\r
+*\r
+* You should have received a copy of the GNU General Public License\r
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
 *\r
+* Author: Robert Nagy, ronag89@gmail.com\r
 */\r
+\r
 #include "../StdAfx.h"\r
 \r
 #include "mixer.h"\r
 #include "audio/audio_mixer.h"\r
 #include "image/image_mixer.h"\r
 \r
-#include "../video_channel_context.h"\r
-\r
-#include <common/exception/exceptions.h>\r
-#include <common/concurrency/executor.h>\r
-#include <common/utility/tweener.h>\r
 #include <common/env.h>\r
+#include <common/concurrency/executor.h>\r
+#include <common/exception/exceptions.h>\r
 #include <common/gl/gl_check.h>\r
+#include <common/utility/tweener.h>\r
 \r
 #include <core/mixer/read_frame.h>\r
 #include <core/mixer/write_frame.h>\r
 #include <core/producer/frame/basic_frame.h>\r
 #include <core/producer/frame/frame_factory.h>\r
-#include <core/producer/frame/pixel_format.h>\r
 #include <core/producer/frame/frame_transform.h>\r
+#include <core/producer/frame/pixel_format.h>\r
 \r
 #include <core/video_format.h>\r
 \r
-#include <boost/fusion/container/map.hpp>\r
-#include <boost/fusion/include/at_key.hpp>\r
 #include <boost/foreach.hpp>\r
+#include <boost/timer.hpp>\r
+#include <boost/property_tree/ptree.hpp>\r
 \r
-#include <tbb/parallel_invoke.h>\r
+#include <tbb/concurrent_queue.h>\r
+#include <tbb/spin_mutex.h>\r
 \r
 #include <unordered_map>\r
 \r
 namespace caspar { namespace core {\r
                \r
-template<typename T>\r
-class tweened_transform\r
-{\r
-       T source_;\r
-       T dest_;\r
-       int duration_;\r
-       int time_;\r
-       tweener_t tweener_;\r
-public:        \r
-       tweened_transform()\r
-               : duration_(0)\r
-               , time_(0)\r
-               , tweener_(get_tweener(L"linear")){}\r
-       tweened_transform(const T& source, const T& dest, int duration, const std::wstring& tween = L"linear")\r
-               : source_(source)\r
-               , dest_(dest)\r
-               , duration_(duration)\r
-               , time_(0)\r
-               , tweener_(get_tweener(tween)){}\r
-       \r
-       T fetch()\r
-       {\r
-               return time_ == duration_ ? dest_ : tween(static_cast<double>(time_), source_, dest_, static_cast<double>(duration_), tweener_);\r
-       }\r
-\r
-       T fetch_and_tick(int num)\r
-       {                                               \r
-               time_ = std::min(time_+num, duration_);\r
-               return fetch();\r
-       }\r
-};\r
-\r
 struct mixer::implementation : boost::noncopyable\r
 {              \r
-       video_channel_context& channel_;\r
+       safe_ptr<diagnostics::graph>    graph_;\r
+       boost::timer                                    mix_timer_;\r
+\r
+       safe_ptr<mixer::target_t>               target_;\r
+       mutable tbb::spin_mutex                 format_desc_mutex_;\r
+       video_format_desc                               format_desc_;\r
+       safe_ptr<ogl_device>                    ogl_;\r
        \r
        audio_mixer     audio_mixer_;\r
        image_mixer image_mixer_;\r
        \r
-       std::unordered_map<int, tweened_transform<core::frame_transform>> transforms_;  \r
        std::unordered_map<int, blend_mode::type> blend_modes_;\r
-\r
-       std::queue<std::pair<boost::unique_future<safe_ptr<host_buffer>>, std::vector<int16_t>>> buffer_;\r
-       \r
-       const size_t buffer_size_;\r
+                       \r
+       executor executor_;\r
 \r
 public:\r
-       implementation(video_channel_context& video_channel) \r
-               : channel_(video_channel)\r
-               , audio_mixer_(channel_.get_format_desc())\r
-               , image_mixer_(channel_)\r
-               , buffer_size_(env::properties().get("configuration.producers.buffer-depth", 1))\r
-       {       \r
-               CASPAR_LOG(info) << print() << L" Successfully initialized . Buffer-depth: " << buffer_size_;   \r
+       implementation(const safe_ptr<diagnostics::graph>& graph, const safe_ptr<mixer::target_t>& target, const video_format_desc& format_desc, const safe_ptr<ogl_device>& ogl) \r
+               : graph_(graph)\r
+               , target_(target)\r
+               , format_desc_(format_desc)\r
+               , ogl_(ogl)\r
+               , image_mixer_(ogl)\r
+               , audio_mixer_(graph_)\r
+               , executor_(L"mixer")\r
+       {                       \r
+               graph_->set_color("mix-time", diagnostics::color(1.0f, 0.0f, 0.9f));\r
        }\r
-                       \r
-       safe_ptr<read_frame> execute(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
+       \r
+       void send(const std::pair<std::map<int, safe_ptr<core::basic_frame>>, std::shared_ptr<void>>& packet)\r
        {                       \r
-               try\r
-               {\r
-                       BOOST_FOREACH(auto& frame, frames)\r
+               executor_.begin_invoke([=]\r
+               {               \r
+                       try\r
                        {\r
-                               auto blend_it = blend_modes_.find(frame.first);\r
-                               image_mixer_.begin_layer(blend_it != blend_modes_.end() ? blend_it->second : blend_mode::normal);\r
+                               mix_timer_.restart();\r
+\r
+                               auto frames = packet.first;\r
                                \r
-                               auto frame1 = make_safe<core::basic_frame>(frame.second);\r
-                               frame1->get_frame_transform() = transforms_[frame.first].fetch_and_tick(1);\r
-\r
-                               if(channel_.get_format_desc().field_mode != core::field_mode::progressive)\r
-                               {                               \r
-                                       auto frame2 = make_safe<core::basic_frame>(frame.second);\r
-                                       frame2->get_frame_transform() = transforms_[frame.first].fetch_and_tick(1);\r
-                                               \r
-                                       if(frame1->get_frame_transform() != frame2->get_frame_transform())\r
-                                               frame1 = core::basic_frame::interlace(frame1, frame2, channel_.get_format_desc().field_mode);\r
+                               BOOST_FOREACH(auto& frame, frames)\r
+                               {\r
+                                       auto blend_it = blend_modes_.find(frame.first);\r
+                                       image_mixer_.begin_layer(blend_it != blend_modes_.end() ? blend_it->second : blend_mode::normal);\r
+                                                                                                       \r
+                                       frame.second->accept(audio_mixer_);                                     \r
+                                       frame.second->accept(image_mixer_);\r
+\r
+                                       image_mixer_.end_layer();\r
                                }\r
-                                                                       \r
-                               frame1->accept(audio_mixer_);                                   \r
-                               frame1->accept(image_mixer_);\r
 \r
-                               image_mixer_.end_layer();\r
-                       }\r
+                               auto image = image_mixer_(format_desc_);\r
+                               auto audio = audio_mixer_(format_desc_);\r
+                               image.wait();\r
 \r
-                       auto image = image_mixer_.render();\r
-                       auto audio = audio_mixer_.mix();\r
-                       \r
-                       buffer_.push(std::make_pair(std::move(image), audio));\r
+                               graph_->update_value("mix-time", mix_timer_.elapsed()*format_desc_.fps*0.5);\r
 \r
-                       if(buffer_.size()-1 < buffer_size_)                     \r
-                               return make_safe<read_frame>();\r
-               \r
-                       auto res = std::move(buffer_.front());\r
-                       buffer_.pop();\r
-\r
-                       return make_safe<read_frame>(channel_.ogl(), channel_.get_format_desc().size, std::move(res.first.get()), std::move(res.second));       \r
-               }\r
-               catch(...)\r
-               {\r
-                       CASPAR_LOG(error) << L"[mixer] Error detected.";\r
-                       throw;\r
-               }                               \r
+                               target_->send(std::make_pair(make_safe<read_frame>(ogl_, format_desc_.width, format_desc_.height, std::move(image.get()), std::move(audio)), packet.second));                                   \r
+                       }\r
+                       catch(...)\r
+                       {\r
+                               CASPAR_LOG_CURRENT_EXCEPTION();\r
+                       }       \r
+               });             \r
        }\r
                                        \r
        safe_ptr<core::write_frame> create_frame(const void* tag, const core::pixel_format_desc& desc)\r
        {               \r
-               return image_mixer_.create_frame(tag, desc);\r
-       }\r
-               \r
-       void set_transform(int index, const frame_transform& transform, unsigned int mix_duration, const std::wstring& tween)\r
-       {\r
-               channel_.execution().invoke([&]\r
-               {\r
-                       auto src = transforms_[index].fetch();\r
-                       auto dst = transform;\r
-                       transforms_[index] = tweened_transform<frame_transform>(src, dst, mix_duration, tween);\r
-               }, high_priority);\r
+               return make_safe<write_frame>(ogl_, tag, desc);\r
        }\r
                                \r
-       void apply_transform(int index, const std::function<frame_transform(frame_transform)>& transform, unsigned int mix_duration, const std::wstring& tween)\r
+       void set_blend_mode(int index, blend_mode::type value)\r
        {\r
-               channel_.execution().invoke([&]\r
+               executor_.begin_invoke([=]\r
                {\r
-                       auto src = transforms_[index].fetch();\r
-                       auto dst = transform(src);\r
-                       transforms_[index] = tweened_transform<frame_transform>(src, dst, mix_duration, tween);\r
+                       blend_modes_[index] = value;\r
                }, high_priority);\r
        }\r
-\r
-       void clear_transforms()\r
+       \r
+       void set_video_format_desc(const video_format_desc& format_desc)\r
        {\r
-               channel_.execution().invoke([&]\r
+               executor_.begin_invoke([=]\r
                {\r
-                       transforms_.clear();\r
-                       blend_modes_.clear();\r
-               }, high_priority);\r
+                       tbb::spin_mutex::scoped_lock lock(format_desc_mutex_);\r
+                       format_desc_ = format_desc;\r
+               });\r
        }\r
-               \r
-       void set_blend_mode(int index, blend_mode::type value)\r
+\r
+       core::video_format_desc get_video_format_desc() const // nothrow\r
        {\r
-               channel_.execution().invoke([&]\r
-               {\r
-                       blend_modes_[index] = value;\r
-               }, high_priority);\r
+               tbb::spin_mutex::scoped_lock lock(format_desc_mutex_);\r
+               return format_desc_;\r
        }\r
 \r
-       std::wstring print() const\r
+       boost::unique_future<boost::property_tree::wptree> info() const\r
        {\r
-               return L"mixer";\r
+               boost::promise<boost::property_tree::wptree> info;\r
+               info.set_value(boost::property_tree::wptree());\r
+               return info.get_future();\r
        }\r
 };\r
        \r
-mixer::mixer(video_channel_context& video_channel) : impl_(new implementation(video_channel)){}\r
-safe_ptr<core::read_frame> mixer::execute(const std::map<int, safe_ptr<core::basic_frame>>& frames){ return impl_->execute(frames);}\r
-core::video_format_desc mixer::get_video_format_desc() const { return impl_->channel_.get_format_desc(); }\r
+mixer::mixer(const safe_ptr<diagnostics::graph>& graph, const safe_ptr<target_t>& target, const video_format_desc& format_desc, const safe_ptr<ogl_device>& ogl) \r
+       : impl_(new implementation(graph, target, format_desc, ogl)){}\r
+void mixer::send(const std::pair<std::map<int, safe_ptr<core::basic_frame>>, std::shared_ptr<void>>& frames){ impl_->send(frames);}\r
+core::video_format_desc mixer::get_video_format_desc() const { return impl_->get_video_format_desc(); }\r
 safe_ptr<core::write_frame> mixer::create_frame(const void* tag, const core::pixel_format_desc& desc){ return impl_->create_frame(tag, desc); }                \r
-safe_ptr<core::write_frame> mixer::create_frame(const void* tag, size_t width, size_t height, core::pixel_format::type pix_fmt)\r
-{\r
-       // Create bgra frame\r
-       core::pixel_format_desc desc;\r
-       desc.pix_fmt = pix_fmt;\r
-       desc.planes.push_back( core::pixel_format_desc::plane(width, height, 4));\r
-       return create_frame(tag, desc);\r
-}\r
-void mixer::set_frame_transform(int index, const core::frame_transform& transform, unsigned int mix_duration, const std::wstring& tween){impl_->set_transform(index, transform, mix_duration, tween);}\r
-void mixer::apply_frame_transform(int index, const std::function<core::frame_transform(core::frame_transform)>& transform, unsigned int mix_duration, const std::wstring& tween){impl_->apply_transform(index, transform, mix_duration, tween);}\r
-void mixer::clear_transforms(){impl_->clear_transforms();}\r
 void mixer::set_blend_mode(int index, blend_mode::type value){impl_->set_blend_mode(index, value);}\r
+void mixer::set_video_format_desc(const video_format_desc& format_desc){impl_->set_video_format_desc(format_desc);}\r
+boost::unique_future<boost::property_tree::wptree> mixer::info() const{return impl_->info();}\r
 }}
\ No newline at end of file