/*\r
-* copyright (c) 2010 Sveriges Television AB <info@casparcg.com>\r
+* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>\r
*\r
-* This file is part of CasparCG.\r
+* This file is part of CasparCG (www.casparcg.com).\r
*\r
-* CasparCG is free software: you can redistribute it and/or modify\r
-* it under the terms of the GNU General Public License as published by\r
-* the Free Software Foundation, either version 3 of the License, or\r
-* (at your option) any later version.\r
+* CasparCG is free software: you can redistribute it and/or modify\r
+* it under the terms of the GNU General Public License as published by\r
+* the Free Software Foundation, either version 3 of the License, or\r
+* (at your option) any later version.\r
*\r
-* CasparCG is distributed in the hope that it will be useful,\r
-* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
-* GNU General Public License for more details.\r
-\r
-* You should have received a copy of the GNU General Public License\r
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
+* CasparCG is distributed in the hope that it will be useful,\r
+* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
+* GNU General Public License for more details.\r
+*\r
+* You should have received a copy of the GNU General Public License\r
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
*\r
+* Author: Robert Nagy, ronag89@gmail.com\r
*/\r
+\r
#include "../StdAfx.h"\r
\r
#include "mixer.h"\r
#include "audio/audio_mixer.h"\r
#include "image/image_mixer.h"\r
\r
-#include <common/exception/exceptions.h>\r
-#include <common/concurrency/executor.h>\r
-#include <common/utility/tweener.h>\r
#include <common/env.h>\r
+#include <common/concurrency/executor.h>\r
+#include <common/exception/exceptions.h>\r
#include <common/gl/gl_check.h>\r
+#include <common/utility/tweener.h>\r
\r
#include <core/mixer/read_frame.h>\r
#include <core/mixer/write_frame.h>\r
#include <core/producer/frame/basic_frame.h>\r
#include <core/producer/frame/frame_factory.h>\r
-#include <core/producer/frame/pixel_format.h>\r
#include <core/producer/frame/frame_transform.h>\r
+#include <core/producer/frame/pixel_format.h>\r
\r
#include <core/video_format.h>\r
\r
-#include <boost/fusion/container/map.hpp>\r
-#include <boost/fusion/include/at_key.hpp>\r
#include <boost/foreach.hpp>\r
+#include <boost/timer.hpp>\r
+#include <boost/property_tree/ptree.hpp>\r
\r
-#include <unordered_map>\r
+#include <tbb/concurrent_queue.h>\r
+#include <tbb/spin_mutex.h>\r
\r
-using namespace Concurrency;\r
+#include <unordered_map>\r
\r
namespace caspar { namespace core {\r
\r
-template<typename T>\r
-class tweened_transform\r
-{\r
- T source_;\r
- T dest_;\r
- int duration_;\r
- int time_;\r
- tweener_t tweener_;\r
-public: \r
- tweened_transform()\r
- : duration_(0)\r
- , time_(0)\r
- , tweener_(get_tweener(L"linear")){}\r
- tweened_transform(const T& source, const T& dest, int duration, const std::wstring& tween = L"linear")\r
- : source_(source)\r
- , dest_(dest)\r
- , duration_(duration)\r
- , time_(0)\r
- , tweener_(get_tweener(tween)){}\r
- \r
- T fetch()\r
- {\r
- return time_ == duration_ ? dest_ : tween(static_cast<double>(time_), source_, dest_, static_cast<double>(duration_), tweener_);\r
- }\r
-\r
- T fetch_and_tick(int num)\r
- { \r
- time_ = std::min(time_+num, duration_);\r
- return fetch();\r
- }\r
-};\r
-\r
struct mixer::implementation : boost::noncopyable\r
{ \r
- const video_format_desc format_desc_;\r
- ogl_device& ogl_;\r
+ safe_ptr<diagnostics::graph> graph_;\r
+ boost::timer mix_timer_;\r
+\r
+ safe_ptr<mixer::target_t> target_;\r
+ mutable tbb::spin_mutex format_desc_mutex_;\r
+ video_format_desc format_desc_;\r
+ safe_ptr<ogl_device> ogl_;\r
\r
audio_mixer audio_mixer_;\r
image_mixer image_mixer_;\r
\r
- std::unordered_map<int, tweened_transform<core::frame_transform>> transforms_; \r
std::unordered_map<int, blend_mode::type> blend_modes_;\r
- \r
- critical_section mutex_;\r
- Concurrency::transformer<safe_ptr<message<std::map<int, safe_ptr<basic_frame>>>>, \r
- safe_ptr<message<safe_ptr<core::read_frame>>>> mixer_;\r
+ \r
+ executor executor_;\r
+\r
public:\r
- implementation(mixer::source_t& source, mixer::target_t& target, const video_format_desc& format_desc, ogl_device& ogl) \r
- : format_desc_(format_desc)\r
+ implementation(const safe_ptr<diagnostics::graph>& graph, const safe_ptr<mixer::target_t>& target, const video_format_desc& format_desc, const safe_ptr<ogl_device>& ogl) \r
+ : graph_(graph)\r
+ , target_(target)\r
+ , format_desc_(format_desc)\r
, ogl_(ogl)\r
- , audio_mixer_(format_desc)\r
- , image_mixer_(ogl, format_desc)\r
- , mixer_(std::bind(&implementation::mix, this, std::placeholders::_1), &target)\r
- { \r
- CASPAR_LOG(info) << print() << L" Successfully initialized."; \r
- source.link_target(&mixer_);\r
+ , image_mixer_(ogl)\r
+ , audio_mixer_(graph_)\r
+ , executor_(L"mixer")\r
+ { \r
+ graph_->set_color("mix-time", diagnostics::color(1.0f, 0.0f, 0.9f));\r
}\r
- \r
- safe_ptr<message<safe_ptr<core::read_frame>>> mix(const safe_ptr<message<std::map<int, safe_ptr<basic_frame>>>>& msg)\r
- { \r
- auto frames = msg->value();\r
-\r
- {\r
- critical_section::scoped_lock lock(mutex_);\r
-\r
- BOOST_FOREACH(auto& frame, frames)\r
+ \r
+ void send(const std::pair<std::map<int, safe_ptr<core::basic_frame>>, std::shared_ptr<void>>& packet)\r
+ { \r
+ executor_.begin_invoke([=]\r
+ { \r
+ try\r
{\r
- auto blend_it = blend_modes_.find(frame.first);\r
- image_mixer_.begin_layer(blend_it != blend_modes_.end() ? blend_it->second : blend_mode::normal);\r
+ mix_timer_.restart();\r
+\r
+ auto frames = packet.first;\r
\r
- auto frame1 = make_safe<core::basic_frame>(frame.second);\r
- frame1->get_frame_transform() = transforms_[frame.first].fetch_and_tick(1);\r
-\r
- if(format_desc_.field_mode != core::field_mode::progressive)\r
- { \r
- auto frame2 = make_safe<core::basic_frame>(frame.second);\r
- frame2->get_frame_transform() = transforms_[frame.first].fetch_and_tick(1);\r
- frame1 = core::basic_frame::interlace(frame1, frame2, format_desc_.field_mode);\r
+ BOOST_FOREACH(auto& frame, frames)\r
+ {\r
+ auto blend_it = blend_modes_.find(frame.first);\r
+ image_mixer_.begin_layer(blend_it != blend_modes_.end() ? blend_it->second : blend_mode::normal);\r
+ \r
+ frame.second->accept(audio_mixer_); \r
+ frame.second->accept(image_mixer_);\r
+\r
+ image_mixer_.end_layer();\r
}\r
- \r
- frame1->accept(audio_mixer_); \r
- frame1->accept(image_mixer_);\r
\r
- image_mixer_.end_layer();\r
- }\r
- }\r
-\r
- auto image = image_mixer_.render();\r
- auto audio = audio_mixer_.mix();\r
- \r
- {\r
- scoped_oversubcription_token oversubscribe;\r
- image.wait();\r
- }\r
+ auto image = image_mixer_(format_desc_);\r
+ auto audio = audio_mixer_(format_desc_);\r
+ image.wait();\r
\r
- auto frame = make_safe<read_frame>(ogl_, format_desc_.size, std::move(image.get()), std::move(audio));\r
+ graph_->update_value("mix-time", mix_timer_.elapsed()*format_desc_.fps*0.5);\r
\r
- return msg->transfer<safe_ptr<core::read_frame>>(std::move(frame)); \r
+ target_->send(std::make_pair(make_safe<read_frame>(ogl_, format_desc_.width, format_desc_.height, std::move(image.get()), std::move(audio)), packet.second)); \r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
+ } \r
+ }); \r
}\r
- \r
+ \r
safe_ptr<core::write_frame> create_frame(const void* tag, const core::pixel_format_desc& desc)\r
{ \r
- return image_mixer_.create_frame(tag, desc);\r
- }\r
-\r
- boost::unique_future<safe_ptr<core::write_frame>> create_frame2(const void* tag, const core::pixel_format_desc& desc)\r
- { \r
- return image_mixer_.create_frame2(tag, desc);\r
- }\r
- \r
- void set_transform(int index, const frame_transform& transform, unsigned int mix_duration, const std::wstring& tween)\r
- {\r
- critical_section::scoped_lock lock(mutex_);\r
-\r
- auto src = transforms_[index].fetch();\r
- auto dst = transform;\r
- transforms_[index] = tweened_transform<frame_transform>(src, dst, mix_duration, tween);\r
+ return make_safe<write_frame>(ogl_, tag, desc);\r
}\r
\r
- void apply_transform(int index, const std::function<frame_transform(frame_transform)>& transform, unsigned int mix_duration, const std::wstring& tween)\r
+ void set_blend_mode(int index, blend_mode::type value)\r
{\r
- critical_section::scoped_lock lock(mutex_);\r
-\r
- auto src = transforms_[index].fetch();\r
- auto dst = transform(src);\r
- transforms_[index] = tweened_transform<frame_transform>(src, dst, mix_duration, tween);\r
+ executor_.begin_invoke([=]\r
+ {\r
+ blend_modes_[index] = value;\r
+ }, high_priority);\r
}\r
-\r
- void clear_transforms()\r
+ \r
+ void set_video_format_desc(const video_format_desc& format_desc)\r
{\r
- critical_section::scoped_lock lock(mutex_);\r
-\r
- transforms_.clear();\r
- blend_modes_.clear();\r
+ executor_.begin_invoke([=]\r
+ {\r
+ tbb::spin_mutex::scoped_lock lock(format_desc_mutex_);\r
+ format_desc_ = format_desc;\r
+ });\r
}\r
- \r
- void set_blend_mode(int index, blend_mode::type value)\r
- {\r
- critical_section::scoped_lock lock(mutex_);\r
\r
- blend_modes_[index] = value;\r
+ core::video_format_desc get_video_format_desc() const // nothrow\r
+ {\r
+ tbb::spin_mutex::scoped_lock lock(format_desc_mutex_);\r
+ return format_desc_;\r
}\r
\r
- std::wstring print() const\r
+ boost::unique_future<boost::property_tree::wptree> info() const\r
{\r
- return L"mixer";\r
+ boost::promise<boost::property_tree::wptree> info;\r
+ info.set_value(boost::property_tree::wptree());\r
+ return info.get_future();\r
}\r
};\r
\r
-mixer::mixer(mixer::source_t& source, mixer::target_t& target, const video_format_desc& format_desc, ogl_device& ogl) : impl_(new implementation(source, target, format_desc, ogl)){}\r
-core::video_format_desc mixer::get_video_format_desc() const { return impl_->format_desc_; }\r
+mixer::mixer(const safe_ptr<diagnostics::graph>& graph, const safe_ptr<target_t>& target, const video_format_desc& format_desc, const safe_ptr<ogl_device>& ogl) \r
+ : impl_(new implementation(graph, target, format_desc, ogl)){}\r
+void mixer::send(const std::pair<std::map<int, safe_ptr<core::basic_frame>>, std::shared_ptr<void>>& frames){ impl_->send(frames);}\r
+core::video_format_desc mixer::get_video_format_desc() const { return impl_->get_video_format_desc(); }\r
safe_ptr<core::write_frame> mixer::create_frame(const void* tag, const core::pixel_format_desc& desc){ return impl_->create_frame(tag, desc); } \r
-boost::unique_future<safe_ptr<write_frame>> mixer::create_frame2(const void* video_stream_tag, const pixel_format_desc& desc){ return impl_->create_frame2(video_stream_tag, desc); } \r
-void mixer::set_frame_transform(int index, const core::frame_transform& transform, unsigned int mix_duration, const std::wstring& tween){impl_->set_transform(index, transform, mix_duration, tween);}\r
-void mixer::apply_frame_transform(int index, const std::function<core::frame_transform(core::frame_transform)>& transform, unsigned int mix_duration, const std::wstring& tween){impl_->apply_transform(index, transform, mix_duration, tween);}\r
-void mixer::clear_transforms(){impl_->clear_transforms();}\r
void mixer::set_blend_mode(int index, blend_mode::type value){impl_->set_blend_mode(index, value);}\r
+void mixer::set_video_format_desc(const video_format_desc& format_desc){impl_->set_video_format_desc(format_desc);}\r
+boost::unique_future<boost::property_tree::wptree> mixer::info() const{return impl_->info();}\r
}}
\ No newline at end of file