Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "image", "modules\image\image.vcxproj", "{3E11FF65-A9DA-4F80-87F2-A7C6379ED5E2}"\r
EndProject\r
Global\r
+ GlobalSection(SubversionScc) = preSolution\r
+ Svn-Managed = True\r
+ Manager = AnkhSVN - Subversion Support for Visual Studio\r
+ EndGlobalSection\r
GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
Debug|Win32 = Debug|Win32\r
Develop|Win32 = Develop|Win32\r
{88F974F0-D09F-4788-8CF8-F563209E60C1} = {C54DA43E-4878-45DB-B76D-35970553672C}\r
{3E11FF65-A9DA-4F80-87F2-A7C6379ED5E2} = {C54DA43E-4878-45DB-B76D-35970553672C}\r
EndGlobalSection\r
- GlobalSection(SubversionScc) = preSolution\r
- Svn-Managed = True\r
- Manager = AnkhSVN - Subversion Support for Visual Studio\r
- EndGlobalSection\r
EndGlobal\r
<ClInclude Include="compiler\vs\disable_silly_warnings.h" />\r
<ClInclude Include="concurrency\com_context.h" />\r
<ClInclude Include="concurrency\executor.h" />\r
+ <ClInclude Include="concurrency\governor.h" />\r
+ <ClInclude Include="concurrency\target.h" />\r
<ClInclude Include="diagnostics\graph.h" />\r
<ClInclude Include="exception\exceptions.h" />\r
<ClInclude Include="exception\win32_exception.h" />\r
<ClInclude Include="utility\move_on_copy.h">\r
<Filter>source\utility</Filter>\r
</ClInclude>\r
+ <ClInclude Include="concurrency\target.h">\r
+ <Filter>source\concurrency</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="concurrency\governor.h">\r
+ <Filter>source\concurrency</Filter>\r
+ </ClInclude>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
\r
execution_queue_[priority].push([=]\r
{\r
- try{task_adaptor.value();}\r
- catch(boost::task_already_started&){}\r
- catch(...){CASPAR_LOG_CURRENT_EXCEPTION();}\r
+ try\r
+ {\r
+ task_adaptor.value();\r
+ }\r
+ catch(boost::task_already_started&)\r
+ {\r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
+ }\r
});\r
\r
if(priority != normal_priority)\r
--- /dev/null
+#pragma once\r
+\r
+#include <memory>\r
+\r
+#include <boost/thread/condition_variable.hpp>\r
+#include <boost/thread/mutex.hpp>\r
+\r
+#include <tbb/atomic.h>\r
+\r
+namespace caspar {\r
+\r
+typedef std::shared_ptr<void> ticket;\r
+\r
+namespace detail\r
+{ \r
+ class governor_impl : public std::enable_shared_from_this<governor_impl>\r
+ {\r
+ boost::mutex mutex_;\r
+ boost::condition_variable cond_;\r
+ tbb::atomic<int> count_;\r
+ public:\r
+ governor_impl(size_t count) \r
+ {\r
+ count_ = count;\r
+ }\r
+\r
+ ticket acquire()\r
+ {\r
+ {\r
+ boost::unique_lock<boost::mutex> lock(mutex_);\r
+ while(count_ < 0)\r
+ cond_.wait(lock);\r
+ --count_;\r
+ }\r
+\r
+ auto self = shared_from_this();\r
+ return ticket(nullptr, [self](void*)\r
+ {\r
+ ++self->count_;\r
+ self->cond_.notify_one();\r
+ });\r
+ }\r
+\r
+ void cancel()\r
+ {\r
+ count_ = std::numeric_limits<int>::max(); \r
+ cond_.notify_all();\r
+ }\r
+ };\r
+}\r
+\r
+class governor\r
+{\r
+ std::shared_ptr<detail::governor_impl> impl_;\r
+public:\r
+\r
+ governor(size_t count) \r
+ : impl_(new detail::governor_impl(count))\r
+ {\r
+ }\r
+\r
+ ticket acquire()\r
+ {\r
+ return impl_->acquire();\r
+ }\r
+\r
+ void cancel()\r
+ {\r
+ impl_->cancel();\r
+ }\r
+\r
+};\r
+\r
+}
\ No newline at end of file
--- /dev/null
+#pragma once\r
+\r
+namespace caspar {\r
+\r
+template<typename T>\r
+struct target\r
+{\r
+ virtual void send(const T&) = 0;\r
+};\r
+\r
+}
\ No newline at end of file
#include <common/exception/exceptions.h>\r
\r
namespace caspar { namespace core {\r
-\r
-size_t consumer_buffer_depth()\r
-{\r
- return env::properties().get("configuration.consumers.buffer-depth", 5);\r
-}\r
- \r
+ \r
std::vector<const consumer_factory_t> g_factories;\r
\r
void register_consumer_factory(const consumer_factory_t& factory)\r
class read_frame;\r
struct video_format_desc;\r
\r
-size_t consumer_buffer_depth();\r
-\r
struct frame_consumer : boost::noncopyable\r
{\r
virtual ~frame_consumer() {}\r
virtual std::wstring print() const = 0;\r
virtual bool has_synchronization_clock() const {return true;}\r
virtual const core::video_format_desc& get_video_format_desc() const = 0; // nothrow\r
+ virtual size_t buffer_depth() const = 0;\r
\r
static const safe_ptr<frame_consumer>& empty()\r
{\r
virtual std::wstring print() const {return L"empty";}\r
virtual bool has_synchronization_clock() const {return false;}\r
virtual const core::video_format_desc& get_video_format_desc() const{return format_desc;}; // nothrow\r
+ virtual size_t buffer_depth() const {return 0;};\r
};\r
static safe_ptr<frame_consumer> consumer = make_safe<empty_frame_consumer>();\r
return consumer;\r
\r
#include "output.h"\r
\r
-#include "../video_channel_context.h"\r
-\r
#include "../video_format.h"\r
#include "../mixer/gpu/ogl_device.h"\r
#include "../mixer/read_frame.h"\r
#include <common/utility/assert.h>\r
#include <common/utility/timer.h>\r
#include <common/memory/memshfl.h>\r
+#include <common/env.h>\r
\r
-#include <tbb/mutex.h>\r
+#include <boost/circular_buffer.hpp>\r
+#include <boost/timer.hpp>\r
\r
namespace caspar { namespace core {\r
\r
{ \r
typedef std::pair<safe_ptr<read_frame>, safe_ptr<read_frame>> fill_and_key;\r
\r
- video_channel_context& channel_;\r
- const std::function<void()> restart_channel_;\r
+ safe_ptr<diagnostics::graph> graph_;\r
+ boost::timer consume_timer_;\r
+\r
+ video_format_desc format_desc_;\r
\r
std::map<int, safe_ptr<frame_consumer>> consumers_;\r
typedef std::map<int, safe_ptr<frame_consumer>>::value_type layer_t;\r
\r
high_prec_timer timer_;\r
+\r
+ boost::circular_buffer<safe_ptr<read_frame>> frames_;\r
+\r
+ tbb::concurrent_bounded_queue<std::shared_ptr<read_frame>> input_;\r
+\r
+ executor executor_;\r
\r
public:\r
- implementation(video_channel_context& video_channel, const std::function<void()>& restart_channel) \r
- : channel_(video_channel)\r
- , restart_channel_(restart_channel)\r
+ implementation(const safe_ptr<diagnostics::graph>& graph, const video_format_desc& format_desc) \r
+ : graph_(graph)\r
+ , format_desc_(format_desc)\r
+ , executor_(L"output")\r
{\r
+ graph_->set_color("consume-time", diagnostics::color(1.0f, 0.4f, 0.0f));\r
} \r
\r
void add(int index, safe_ptr<frame_consumer>&& consumer)\r
{ \r
- channel_.execution().invoke([&]\r
+ executor_.invoke([&]\r
{\r
consumers_.erase(index);\r
});\r
\r
- consumer->initialize(channel_.get_format_desc());\r
+ consumer->initialize(format_desc_);\r
\r
- channel_.execution().invoke([&]\r
+ executor_.invoke([&]\r
{\r
consumers_.insert(std::make_pair(index, consumer));\r
\r
\r
void remove(int index)\r
{\r
- channel_.execution().invoke([&]\r
+ executor_.invoke([&]\r
{\r
auto it = consumers_.find(index);\r
if(it != consumers_.end())\r
}\r
});\r
}\r
- \r
- void execute(const safe_ptr<read_frame>& frame)\r
- { \r
- if(!has_synchronization_clock())\r
- timer_.tick(1.0/channel_.get_format_desc().fps);\r
-\r
- if(frame->image_size() != channel_.get_format_desc().size)\r
- {\r
- timer_.tick(1.0/channel_.get_format_desc().fps);\r
- return;\r
- }\r
- \r
- auto it = consumers_.begin();\r
- while(it != consumers_.end())\r
+ \r
+ void set_video_format_desc(const video_format_desc& format_desc)\r
+ {\r
+ executor_.invoke([&]\r
{\r
- auto consumer = it->second;\r
-\r
- if(consumer->get_video_format_desc() != channel_.get_format_desc())\r
- consumer->initialize(channel_.get_format_desc());\r
+ format_desc_ = format_desc;\r
\r
- try\r
- {\r
- if(consumer->send(frame))\r
+ auto it = consumers_.begin();\r
+ while(it != consumers_.end())\r
+ { \r
+ try\r
+ {\r
+ it->second->initialize(format_desc_);\r
++it;\r
- else\r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
consumers_.erase(it++);\r
+ }\r
}\r
- catch(...)\r
+\r
+ frames_.clear();\r
+ });\r
+ }\r
+ \r
+ std::pair<size_t, size_t> minmax_buffer_depth() const\r
+ { \r
+ if(consumers_.empty())\r
+ return std::make_pair(0, 0);\r
+ std::vector<size_t> buffer_depths;\r
+ std::transform(consumers_.begin(), consumers_.end(), std::back_inserter(buffer_depths), [](const decltype(*consumers_.begin())& pair)\r
+ {\r
+ return pair.second->buffer_depth();\r
+ });\r
+ std::sort(buffer_depths.begin(), buffer_depths.end());\r
+ auto min = buffer_depths.front();\r
+ auto max = buffer_depths.back();\r
+ return std::make_pair(min, max);\r
+ }\r
+\r
+ void send(const std::pair<safe_ptr<read_frame>, ticket>& packet)\r
+ {\r
+ executor_.begin_invoke([=]\r
+ {\r
+ try\r
{\r
- CASPAR_LOG_CURRENT_EXCEPTION();\r
- CASPAR_LOG(warning) << "Trying to restart consumer: " << consumer->print() << L".";\r
- try\r
+ consume_timer_.restart();\r
+\r
+ auto input_frame = packet.first;\r
+\r
+ if(!has_synchronization_clock())\r
+ timer_.tick(1.0/format_desc_.fps);\r
+\r
+ if(input_frame->image_size() != format_desc_.size)\r
{\r
- consumer->initialize(channel_.get_format_desc());\r
- consumer->send(frame);\r
+ timer_.tick(1.0/format_desc_.fps);\r
+ return;\r
}\r
- catch(...)\r
- { \r
- CASPAR_LOG_CURRENT_EXCEPTION(); \r
- CASPAR_LOG(warning) << "Consumer restart failed, trying to restart channel: " << consumer->print() << L"."; \r
+ \r
+ const auto minmax = minmax_buffer_depth();\r
+\r
+ frames_.set_capacity(minmax.second - minmax.first + 1);\r
+ frames_.push_back(input_frame);\r
\r
+ if(!frames_.full())\r
+ return;\r
+\r
+ auto it = consumers_.begin();\r
+ while(it != consumers_.end())\r
+ {\r
+ auto consumer = it->second;\r
+ auto frame = frames_.at(consumer->buffer_depth()-minmax.first);\r
+ \r
try\r
{\r
- restart_channel_();\r
- consumer->initialize(channel_.get_format_desc());\r
- consumer->send(frame);\r
+ if(consumer->send(frame))\r
+ ++it;\r
+ else\r
+ consumers_.erase(it++);\r
}\r
catch(...)\r
{\r
CASPAR_LOG_CURRENT_EXCEPTION();\r
- CASPAR_LOG(error) << "Failed to recover consumer: " << consumer->print() << L". Removing it.";\r
- consumers_.erase(it++);\r
+ try\r
+ {\r
+ consumer->initialize(format_desc_);\r
+ if(consumer->send(frame))\r
+ ++it;\r
+ else\r
+ consumers_.erase(it++);\r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
+ CASPAR_LOG(error) << "Failed to recover consumer: " << consumer->print() << L". Removing it.";\r
+ consumers_.erase(it++);\r
+ }\r
}\r
}\r
+ \r
+ graph_->update_value("consume-time", consume_timer_.elapsed()*format_desc_.fps*0.5);\r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
}\r
- }\r
+ });\r
}\r
\r
private:\r
}\r
};\r
\r
-output::output(video_channel_context& video_channel, const std::function<void()>& restart_channel) : impl_(new implementation(video_channel, restart_channel)){}\r
+output::output(const safe_ptr<diagnostics::graph>& graph, const video_format_desc& format_desc) : impl_(new implementation(graph, format_desc)){}\r
void output::add(int index, safe_ptr<frame_consumer>&& consumer){impl_->add(index, std::move(consumer));}\r
void output::remove(int index){impl_->remove(index);}\r
-void output::execute(const safe_ptr<read_frame>& frame) {impl_->execute(frame); }\r
+void output::send(const std::pair<safe_ptr<read_frame>, ticket>& frame) {impl_->send(frame); }\r
+void output::set_video_format_desc(const video_format_desc& format_desc){impl_->set_video_format_desc(format_desc);}\r
}}
\ No newline at end of file
#include "../consumer/frame_consumer.h"\r
\r
#include <common/memory/safe_ptr.h>\r
+#include <common/concurrency/target.h>\r
+#include <common/concurrency/governor.h>\r
+#include <common/diagnostics/graph.h>\r
\r
#include <boost/noncopyable.hpp>\r
\r
\r
class video_channel_context;\r
\r
-class output : boost::noncopyable\r
+class output : public target<std::pair<safe_ptr<read_frame>, ticket>>, boost::noncopyable\r
{\r
public:\r
- explicit output(video_channel_context& video_channel, const std::function<void()>& restart_channel);\r
+ explicit output(const safe_ptr<diagnostics::graph>& graph, const video_format_desc& format_desc);\r
\r
void add(int index, safe_ptr<frame_consumer>&& consumer);\r
void remove(int index);\r
+ \r
+ void set_video_format_desc(const video_format_desc& format_desc);\r
\r
- void execute(const safe_ptr<read_frame>& frame); // nothrow\r
+ virtual void send(const std::pair<safe_ptr<read_frame>, ticket>& frame); // nothrow\r
private:\r
struct implementation;\r
safe_ptr<implementation> impl_;\r
<ClInclude Include="mixer\image\blend_modes.h" />\r
<ClInclude Include="mixer\image\image_shader.h" />\r
<ClInclude Include="video_channel.h" />\r
- <ClInclude Include="video_channel_context.h" />\r
<ClInclude Include="consumer\output.h" />\r
<ClInclude Include="consumer\frame_consumer.h" />\r
<ClInclude Include="mixer\audio\audio_mixer.h" />\r
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">Create</PrecompiledHeader>\r
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">Create</PrecompiledHeader>\r
</ClCompile>\r
- <ClCompile Include="video_channel_context.cpp" />\r
<ClCompile Include="video_format.cpp">\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">StdAfx.h</PrecompiledHeaderFile>\r
<ClInclude Include="video_channel.h">\r
<Filter>source</Filter>\r
</ClInclude>\r
- <ClInclude Include="video_channel_context.h">\r
- <Filter>source</Filter>\r
- </ClInclude>\r
<ClInclude Include="mixer\image\blending_glsl.h">\r
<Filter>source\mixer\image</Filter>\r
</ClInclude>\r
<ClCompile Include="video_channel.cpp">\r
<Filter>source</Filter>\r
</ClCompile>\r
- <ClCompile Include="video_channel_context.cpp">\r
- <Filter>source</Filter>\r
- </ClCompile>\r
<ClCompile Include="video_format.cpp">\r
<Filter>source</Filter>\r
</ClCompile>\r
{\r
std::stack<core::frame_transform> transform_stack_;\r
std::map<const void*, core::frame_transform> prev_frame_transforms_;\r
- const core::video_format_desc format_desc_;\r
std::vector<audio_item> items_;\r
\r
public:\r
- implementation(const core::video_format_desc& format_desc)\r
- : format_desc_(format_desc)\r
+ implementation()\r
{\r
transform_stack_.push(core::frame_transform());\r
}\r
\r
void visit(core::write_frame& frame)\r
{\r
- // We only care about the last field.\r
- if(format_desc_.field_mode == field_mode::upper && transform_stack_.top().field_mode == field_mode::upper)\r
- return;\r
-\r
- if(format_desc_.field_mode == field_mode::lower && transform_stack_.top().field_mode == field_mode::lower)\r
- return;\r
-\r
- // Skip empty audio.\r
if(transform_stack_.top().volume < 0.002 || frame.audio_data().empty())\r
return;\r
\r
audio_item item;\r
item.tag = frame.tag();\r
item.transform = transform_stack_.top();\r
- item.audio_data = std::move(frame.audio_data());\r
+ item.audio_data = std::move(frame.audio_data()); // Note: We don't need to care about upper/lower since audio_data is removed/moved from the last field.\r
\r
items_.push_back(item); \r
}\r
transform_stack_.pop();\r
}\r
\r
- audio_buffer mix()\r
+ audio_buffer mix(const video_format_desc& format_desc)\r
{ \r
+ CASPAR_ASSERT(format_desc.audio_channels == 2);\r
+ CASPAR_ASSERT(format_desc.audio_samples_per_frame % 4 == 0);\r
+\r
// NOTE: auto data should be larger than format_desc_.audio_samples_per_frame to allow sse to read/write beyond size.\r
\r
- auto intermediate = std::vector<float, tbb::cache_aligned_allocator<float>>(format_desc_.audio_samples_per_frame+128, 0.0f);\r
- auto result = audio_buffer(format_desc_.audio_samples_per_frame+128); \r
+ auto intermediate = std::vector<float, tbb::cache_aligned_allocator<float>>(format_desc.audio_samples_per_frame+128, 0.0f);\r
+ auto result = audio_buffer(format_desc.audio_samples_per_frame+128); \r
auto result_128 = reinterpret_cast<__m128i*>(result.data());\r
\r
std::map<const void*, core::frame_transform> next_frame_transforms;\r
- \r
+ \r
BOOST_FOREACH(auto& item, items_)\r
- { \r
- const auto next = item.transform;\r
+ { \r
+ auto next = item.transform;\r
auto prev = next;\r
\r
const auto it = prev_frame_transforms_.find(item.tag);\r
prev = it->second;\r
\r
next_frame_transforms[item.tag] = next; // Store all active tags, inactive tags will be removed at the end.\r
-\r
- if(next.volume < 0.001 && prev.volume < 0.001)\r
- continue;\r
- \r
- if(static_cast<size_t>(item.audio_data.size()) != format_desc_.audio_samples_per_frame)\r
+ \r
+ if(prev.volume < 0.001 && next.volume < 0.001)\r
continue;\r
\r
- CASPAR_ASSERT(format_desc_.audio_channels == 2);\r
- CASPAR_ASSERT(format_desc_.audio_samples_per_frame % 4 == 0);\r
- \r
const float prev_volume = static_cast<float>(prev.volume);\r
const float next_volume = static_cast<float>(next.volume);\r
\r
- auto alpha = (next_volume-prev_volume)/static_cast<float>(format_desc_.audio_samples_per_frame/format_desc_.audio_channels);\r
+ auto alpha = (next_volume-prev_volume)/static_cast<float>(format_desc.audio_samples_per_frame/format_desc.audio_channels);\r
auto alpha_ps = _mm_set_ps1(alpha*2.0f);\r
auto volume_ps = _mm_setr_ps(prev_volume, prev_volume, prev_volume+alpha, prev_volume+alpha);\r
\r
if(&item != &items_.back())\r
{\r
- for(size_t n = 0; n < format_desc_.audio_samples_per_frame/4; ++n)\r
+ for(size_t n = 0; n < format_desc.audio_samples_per_frame/4; ++n)\r
{ \r
auto sample_ps = _mm_cvtepi32_ps(_mm_load_si128(reinterpret_cast<__m128i*>(&item.audio_data[n*4])));\r
auto res_sample_ps = _mm_load_ps(&intermediate[n*4]); \r
}\r
else\r
{\r
- for(size_t n = 0; n < format_desc_.audio_samples_per_frame/4; ++n)\r
+ for(size_t n = 0; n < format_desc.audio_samples_per_frame/4; ++n)\r
{ \r
auto sample_ps = _mm_cvtepi32_ps(_mm_load_si128(reinterpret_cast<__m128i*>(&item.audio_data[n*4])));\r
auto res_sample_ps = _mm_load_ps(&intermediate[n*4]); \r
items_.clear();\r
prev_frame_transforms_ = std::move(next_frame_transforms); \r
\r
- result.resize(format_desc_.audio_samples_per_frame);\r
+ result.resize(format_desc.audio_samples_per_frame);\r
return std::move(result);\r
}\r
};\r
\r
-audio_mixer::audio_mixer(const core::video_format_desc& format_desc) : impl_(new implementation(format_desc)){}\r
+audio_mixer::audio_mixer() : impl_(new implementation()){}\r
void audio_mixer::begin(core::basic_frame& frame){impl_->begin(frame);}\r
void audio_mixer::visit(core::write_frame& frame){impl_->visit(frame);}\r
void audio_mixer::end(){impl_->end();}\r
-audio_buffer audio_mixer::mix(){return impl_->mix();}\r
+audio_buffer audio_mixer::mix(const video_format_desc& format_desc){return impl_->mix(format_desc);}\r
audio_mixer& audio_mixer::operator=(audio_mixer&& other)\r
{\r
impl_ = std::move(other.impl_);\r
class audio_mixer : public core::frame_visitor, boost::noncopyable\r
{\r
public:\r
- audio_mixer(const core::video_format_desc& format_desc);\r
+ audio_mixer();\r
\r
virtual void begin(core::basic_frame& frame);\r
virtual void visit(core::write_frame& frame);\r
virtual void end();\r
\r
- audio_buffer mix();\r
+ audio_buffer mix(const video_format_desc& format_desc);\r
\r
audio_mixer& operator=(audio_mixer&& other);\r
private:\r
static tbb::atomic<size_t> count;\r
static tbb::atomic<bool> warned;\r
\r
- if(delay > 2 && ++count > 32)\r
+ if(delay > 2 && ++count > 50)\r
{\r
if(!warned.fetch_and_store(true))\r
{\r
if (glewInit() != GLEW_OK)\r
BOOST_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to initialize GLEW."));\r
\r
+ CASPAR_LOG(info) << L"OpenGL " << version();\r
+\r
if(!GLEW_VERSION_3_0)\r
CASPAR_LOG(warning) << "Missing OpenGL 3.0 support.";\r
\r
}, high_priority);\r
}\r
\r
-std::wstring ogl_device::get_version()\r
+std::wstring ogl_device::version()\r
{ \r
static std::wstring ver = L"Not found";\r
try\r
{\r
- auto tmp = ogl_device::create();\r
- ver = widen(tmp->invoke([]{return std::string(reinterpret_cast<const char*>(glGetString(GL_VERSION)));})\r
- + " " + tmp->invoke([]{return std::string(reinterpret_cast<const char*>(glGetString(GL_VENDOR)));})); \r
+ ver = widen(invoke([]{return std::string(reinterpret_cast<const char*>(glGetString(GL_VERSION)));})\r
+ + " " + invoke([]{return std::string(reinterpret_cast<const char*>(glGetString(GL_VENDOR)));})); \r
}\r
catch(...){}\r
\r
void yield();\r
boost::unique_future<void> gc();\r
\r
- static std::wstring get_version();\r
+ std::wstring version();\r
\r
private:\r
safe_ptr<device_buffer> allocate_device_buffer(size_t width, size_t height, size_t stride);\r
#include <core/producer/frame/pixel_format.h>\r
#include <core/producer/frame/frame_transform.h>\r
\r
-#include <GL/glew.h>\r
-\r
#include <boost/noncopyable.hpp>\r
\r
-#include <unordered_map>\r
-\r
namespace caspar { namespace core {\r
\r
GLubyte upper_pattern[] = {\r
\r
struct image_kernel::implementation : boost::noncopyable\r
{ \r
- std::shared_ptr<shader> shader_;\r
+ safe_ptr<ogl_device> ogl_;\r
+ safe_ptr<shader> shader_;\r
bool blend_modes_;\r
\r
- void draw(ogl_device& ogl, draw_params&& params)\r
+ implementation(const safe_ptr<ogl_device>& ogl)\r
+ : ogl_(ogl)\r
+ , shader_(ogl_->invoke([&]{return get_image_shader(*ogl, blend_modes_);}))\r
+ {\r
+ }\r
+\r
+ void draw(draw_params&& params)\r
{\r
static const double epsilon = 0.001;\r
\r
\r
if(!std::all_of(params.textures.begin(), params.textures.end(), std::mem_fn(&device_buffer::ready)))\r
{\r
- CASPAR_LOG(warning) << L"[image_mixer] Performance warning. Host to device transfer not complete, GPU will be stalled";\r
- ogl.yield(); // Try to give it some more time.\r
+ CASPAR_LOG(trace) << L"[image_mixer] Performance warning. Host to device transfer not complete, GPU will be stalled";\r
+ ogl_->yield(); // Try to give it some more time.\r
} \r
\r
// Bind textures\r
params.layer_key->bind(texture_id::layer_key);\r
\r
// Setup shader\r
-\r
- if(!shader_)\r
- shader_ = get_image_shader(ogl, blend_modes_);\r
- \r
- ogl.use(*shader_);\r
+ \r
+ ogl_->use(*shader_);\r
\r
shader_->set("plane[0]", texture_id::plane0);\r
shader_->set("plane[1]", texture_id::plane1);\r
switch(params.keyer)\r
{\r
case keyer::additive:\r
- ogl.blend_func(GL_ONE, GL_ONE); \r
+ ogl_->blend_func(GL_ONE, GL_ONE); \r
break;\r
case keyer::linear:\r
default: \r
- ogl.blend_func(GL_ONE, GL_ONE_MINUS_SRC_ALPHA); \r
+ ogl_->blend_func(GL_ONE, GL_ONE_MINUS_SRC_ALPHA); \r
} \r
}\r
\r
// Setup interlacing\r
\r
if(params.transform.field_mode == core::field_mode::progressive) \r
- ogl.disable(GL_POLYGON_STIPPLE); \r
+ ogl_->disable(GL_POLYGON_STIPPLE); \r
else \r
{\r
- ogl.enable(GL_POLYGON_STIPPLE);\r
+ ogl_->enable(GL_POLYGON_STIPPLE);\r
\r
if(params.transform.field_mode == core::field_mode::upper)\r
- ogl.stipple_pattern(upper_pattern);\r
+ ogl_->stipple_pattern(upper_pattern);\r
else if(params.transform.field_mode == core::field_mode::lower)\r
- ogl.stipple_pattern(lower_pattern);\r
+ ogl_->stipple_pattern(lower_pattern);\r
}\r
\r
// Setup drawing area\r
\r
- ogl.viewport(0, 0, params.background->width(), params.background->height());\r
+ ogl_->viewport(0, 0, params.background->width(), params.background->height());\r
\r
auto m_p = params.transform.clip_translation;\r
auto m_s = params.transform.clip_scale;\r
double w = static_cast<double>(params.background->width());\r
double h = static_cast<double>(params.background->height());\r
\r
- ogl.enable(GL_SCISSOR_TEST);\r
- ogl.scissor(static_cast<size_t>(m_p[0]*w), static_cast<size_t>(m_p[1]*h), static_cast<size_t>(m_s[0]*w), static_cast<size_t>(m_s[1]*h));\r
+ ogl_->enable(GL_SCISSOR_TEST);\r
+ ogl_->scissor(static_cast<size_t>(m_p[0]*w), static_cast<size_t>(m_p[1]*h), static_cast<size_t>(m_s[0]*w), static_cast<size_t>(m_s[1]*h));\r
}\r
\r
auto f_p = params.transform.fill_translation;\r
\r
// Set render target\r
\r
- ogl.attach(*params.background);\r
+ ogl_->attach(*params.background);\r
\r
// Draw\r
-\r
+ \r
glBegin(GL_QUADS);\r
glMultiTexCoord2d(GL_TEXTURE0, 0.0, 0.0); glMultiTexCoord2d(GL_TEXTURE1, f_p[0] , f_p[1] ); glVertex2d( f_p[0] *2.0-1.0, f_p[1] *2.0-1.0);\r
glMultiTexCoord2d(GL_TEXTURE0, 1.0, 0.0); glMultiTexCoord2d(GL_TEXTURE1, (f_p[0]+f_s[0]), f_p[1] ); glVertex2d((f_p[0]+f_s[0])*2.0-1.0, f_p[1] *2.0-1.0);\r
glMultiTexCoord2d(GL_TEXTURE0, 1.0, 1.0); glMultiTexCoord2d(GL_TEXTURE1, (f_p[0]+f_s[0]), (f_p[1]+f_s[1])); glVertex2d((f_p[0]+f_s[0])*2.0-1.0, (f_p[1]+f_s[1])*2.0-1.0);\r
glMultiTexCoord2d(GL_TEXTURE0, 0.0, 1.0); glMultiTexCoord2d(GL_TEXTURE1, f_p[0] , (f_p[1]+f_s[1])); glVertex2d( f_p[0] *2.0-1.0, (f_p[1]+f_s[1])*2.0-1.0);\r
glEnd();\r
-\r
+ \r
// Cleanup\r
\r
- ogl.disable(GL_SCISSOR_TEST); \r
- \r
+ ogl_->disable(GL_SCISSOR_TEST); \r
+ \r
params.textures.clear();\r
- ogl.yield(); // Return resources to pool as early as possible.\r
+ ogl_->yield(); // Return resources to pool as early as possible.\r
\r
if(blend_modes_)\r
{\r
}\r
};\r
\r
-image_kernel::image_kernel() : impl_(new implementation()){}\r
-void image_kernel::draw(ogl_device& ogl, draw_params&& params)\r
+image_kernel::image_kernel(const safe_ptr<ogl_device>& ogl) : impl_(new implementation(ogl)){}\r
+void image_kernel::draw(draw_params&& params)\r
{\r
- impl_->draw(ogl, std::move(params));\r
+ impl_->draw(std::move(params));\r
}\r
\r
}}
\ No newline at end of file
class image_kernel : boost::noncopyable\r
{\r
public:\r
- image_kernel();\r
- void draw(ogl_device& ogl, draw_params&& params);\r
+ image_kernel(const safe_ptr<ogl_device>& ogl);\r
+ void draw(draw_params&& params);\r
private:\r
struct implementation;\r
safe_ptr<implementation> impl_;\r
#include "../gpu/ogl_device.h"\r
#include "../gpu/host_buffer.h"\r
#include "../gpu/device_buffer.h"\r
-#include "../../video_channel_context.h"\r
\r
#include <common/exception/exceptions.h>\r
#include <common/gl/gl_check.h>\r
\r
class image_renderer\r
{\r
- safe_ptr<ogl_device> ogl_;\r
- const video_format_desc format_desc_;\r
- image_kernel kernel_; \r
- std::shared_ptr<device_buffer> transferring_buffer_;\r
+ safe_ptr<ogl_device> ogl_;\r
+ image_kernel kernel_; \r
+ std::shared_ptr<device_buffer> transferring_buffer_;\r
public:\r
- image_renderer(const safe_ptr<ogl_device>& ogl, const video_format_desc& format_desc)\r
+ image_renderer(const safe_ptr<ogl_device>& ogl)\r
: ogl_(ogl)\r
- , format_desc_(format_desc)\r
+ , kernel_(ogl_)\r
{\r
}\r
\r
- boost::unique_future<safe_ptr<host_buffer>> operator()(std::vector<layer>&& layers)\r
+ boost::unique_future<safe_ptr<host_buffer>> operator()(std::vector<layer>&& layers, const video_format_desc& format_desc)\r
{ \r
auto layers2 = make_move_on_copy(std::move(layers));\r
return ogl_->begin_invoke([=]\r
{\r
- return do_render(std::move(layers2.value));\r
+ return do_render(std::move(layers2.value), format_desc);\r
});\r
}\r
- \r
+\r
private:\r
- safe_ptr<host_buffer> do_render(std::vector<layer>&& layers)\r
+ safe_ptr<host_buffer> do_render(std::vector<layer>&& layers, const video_format_desc& format_desc)\r
{\r
- auto draw_buffer = create_mixer_buffer(4);\r
+ auto draw_buffer = create_mixer_buffer(4, format_desc);\r
\r
- if(format_desc_.field_mode != field_mode::progressive)\r
+ if(format_desc.field_mode != field_mode::progressive)\r
{\r
auto upper = layers;\r
auto lower = std::move(layers);\r
item.transform.field_mode = static_cast<field_mode::type>(item.transform.field_mode & field_mode::lower);\r
}\r
\r
- draw(std::move(upper), draw_buffer);\r
- draw(std::move(lower), draw_buffer);\r
+ draw(std::move(upper), draw_buffer, format_desc);\r
+ draw(std::move(lower), draw_buffer, format_desc);\r
}\r
else\r
{\r
- draw(std::move(layers), draw_buffer);\r
+ draw(std::move(layers), draw_buffer, format_desc);\r
}\r
\r
- auto host_buffer = ogl_->create_host_buffer(format_desc_.size, host_buffer::read_only);\r
+ auto host_buffer = ogl_->create_host_buffer(format_desc.size, host_buffer::read_only);\r
ogl_->attach(*draw_buffer);\r
host_buffer->begin_read(draw_buffer->width(), draw_buffer->height(), format(draw_buffer->stride()));\r
\r
}\r
\r
void draw(std::vector<layer>&& layers, \r
- safe_ptr<device_buffer>& draw_buffer)\r
+ safe_ptr<device_buffer>& draw_buffer, \r
+ const video_format_desc& format_desc)\r
{\r
std::shared_ptr<device_buffer> layer_key_buffer;\r
\r
BOOST_FOREACH(auto& layer, layers)\r
- draw_layer(std::move(layer), draw_buffer, layer_key_buffer);\r
+ draw_layer(std::move(layer), draw_buffer, layer_key_buffer, format_desc);\r
}\r
\r
void draw_layer(layer&& layer, \r
safe_ptr<device_buffer>& draw_buffer,\r
- std::shared_ptr<device_buffer>& layer_key_buffer)\r
+ std::shared_ptr<device_buffer>& layer_key_buffer,\r
+ const video_format_desc& format_desc)\r
{ \r
boost::remove_erase_if(layer.second, [](const item& item){return item.transform.field_mode == field_mode::empty;});\r
\r
\r
if(layer.first != blend_mode::normal)\r
{\r
- auto layer_draw_buffer = create_mixer_buffer(4);\r
+ auto layer_draw_buffer = create_mixer_buffer(4, format_desc);\r
\r
BOOST_FOREACH(auto& item, layer.second)\r
- draw_item(std::move(item), layer_draw_buffer, layer_key_buffer, local_key_buffer, local_mix_buffer); \r
+ draw_item(std::move(item), layer_draw_buffer, layer_key_buffer, local_key_buffer, local_mix_buffer, format_desc); \r
\r
draw_mixer_buffer(layer_draw_buffer, std::move(local_mix_buffer), blend_mode::normal); \r
draw_mixer_buffer(draw_buffer, std::move(layer_draw_buffer), layer.first);\r
else // fast path\r
{\r
BOOST_FOREACH(auto& item, layer.second) \r
- draw_item(std::move(item), draw_buffer, layer_key_buffer, local_key_buffer, local_mix_buffer); \r
+ draw_item(std::move(item), draw_buffer, layer_key_buffer, local_key_buffer, local_mix_buffer, format_desc); \r
\r
draw_mixer_buffer(draw_buffer, std::move(local_mix_buffer), blend_mode::normal);\r
} \r
safe_ptr<device_buffer>& draw_buffer, \r
std::shared_ptr<device_buffer>& layer_key_buffer, \r
std::shared_ptr<device_buffer>& local_key_buffer, \r
- std::shared_ptr<device_buffer>& local_mix_buffer)\r
+ std::shared_ptr<device_buffer>& local_mix_buffer,\r
+ const video_format_desc& format_desc)\r
{ \r
draw_params draw_params;\r
draw_params.pix_desc = std::move(item.pix_desc);\r
\r
if(item.transform.is_key)\r
{\r
- local_key_buffer = local_key_buffer ? local_key_buffer : create_mixer_buffer(1);\r
+ local_key_buffer = local_key_buffer ? local_key_buffer : create_mixer_buffer(1, format_desc);\r
\r
draw_params.background = local_key_buffer;\r
draw_params.local_key = nullptr;\r
draw_params.layer_key = nullptr;\r
\r
- kernel_.draw(*ogl_, std::move(draw_params));\r
+ kernel_.draw(std::move(draw_params));\r
}\r
else if(item.transform.is_mix)\r
{\r
- local_mix_buffer = local_mix_buffer ? local_mix_buffer : create_mixer_buffer(4);\r
+ local_mix_buffer = local_mix_buffer ? local_mix_buffer : create_mixer_buffer(4, format_desc);\r
\r
draw_params.background = local_mix_buffer;\r
draw_params.local_key = std::move(local_key_buffer);\r
\r
draw_params.keyer = keyer::additive;\r
\r
- kernel_.draw(*ogl_, std::move(draw_params));\r
+ kernel_.draw(std::move(draw_params));\r
}\r
else\r
{\r
draw_params.local_key = std::move(local_key_buffer);\r
draw_params.layer_key = layer_key_buffer;\r
\r
- kernel_.draw(*ogl_, std::move(draw_params));\r
+ kernel_.draw(std::move(draw_params));\r
} \r
}\r
\r
draw_params.blend_mode = blend_mode;\r
draw_params.background = draw_buffer;\r
\r
- kernel_.draw(*ogl_, std::move(draw_params));\r
+ kernel_.draw(std::move(draw_params));\r
}\r
\r
- safe_ptr<device_buffer> create_mixer_buffer(size_t stride)\r
+ safe_ptr<device_buffer> create_mixer_buffer(size_t stride, const video_format_desc& format_desc)\r
{\r
- auto buffer = ogl_->create_device_buffer(format_desc_.width, format_desc_.height, stride);\r
+ auto buffer = ogl_->create_device_buffer(format_desc.width, format_desc.height, stride);\r
ogl_->clear(*buffer);\r
return buffer;\r
}\r
std::vector<frame_transform> transform_stack_;\r
std::vector<layer> layers_; // layer/stream/items\r
public:\r
- implementation(const safe_ptr<ogl_device>& ogl, const video_format_desc& format_desc) \r
+ implementation(const safe_ptr<ogl_device>& ogl) \r
: ogl_(ogl)\r
- , renderer_(ogl, format_desc)\r
+ , renderer_(ogl)\r
, transform_stack_(1) \r
{\r
}\r
{ \r
}\r
\r
- boost::unique_future<safe_ptr<host_buffer>> render()\r
+ boost::unique_future<safe_ptr<host_buffer>> render(const video_format_desc& format_desc)\r
{\r
- return renderer_(std::move(layers_));\r
+ return renderer_(std::move(layers_), format_desc);\r
}\r
\r
safe_ptr<write_frame> create_frame(const void* tag, const pixel_format_desc& desc)\r
}\r
};\r
\r
-image_mixer::image_mixer(const safe_ptr<ogl_device>& ogl, const video_format_desc& format_desc) : impl_(new implementation(ogl, format_desc)){}\r
+image_mixer::image_mixer(const safe_ptr<ogl_device>& ogl) : impl_(new implementation(ogl)){}\r
void image_mixer::begin(basic_frame& frame){impl_->begin(frame);}\r
void image_mixer::visit(write_frame& frame){impl_->visit(frame);}\r
void image_mixer::end(){impl_->end();}\r
-boost::unique_future<safe_ptr<host_buffer>> image_mixer::render(){return impl_->render();}\r
+boost::unique_future<safe_ptr<host_buffer>> image_mixer::render(const video_format_desc& format_desc){return impl_->render(format_desc);}\r
safe_ptr<write_frame> image_mixer::create_frame(const void* tag, const pixel_format_desc& desc){return impl_->create_frame(tag, desc);}\r
void image_mixer::begin_layer(blend_mode::type blend_mode){impl_->begin_layer(blend_mode);}\r
void image_mixer::end_layer(){impl_->end_layer();}\r
class image_mixer : public core::frame_visitor, boost::noncopyable\r
{\r
public:\r
- image_mixer(const safe_ptr<ogl_device>& ogl, const video_format_desc& format_desc);\r
+ image_mixer(const safe_ptr<ogl_device>& ogl);\r
\r
virtual void begin(core::basic_frame& frame);\r
virtual void visit(core::write_frame& frame);\r
\r
image_mixer& operator=(image_mixer&& other);\r
\r
- boost::unique_future<safe_ptr<host_buffer>> render();\r
+ boost::unique_future<safe_ptr<host_buffer>> render(const video_format_desc& format_desc);\r
\r
safe_ptr<write_frame> create_frame(const void* tag, const pixel_format_desc& format);\r
-\r
+ \r
+ void set_video_format_desc(const video_format_desc& format_desc);\r
private:\r
struct implementation;\r
safe_ptr<implementation> impl_;\r
\r
try\r
{ \r
- g_blend_modes = glTextureBarrierNV ? env::properties().get("configuration.mixers.blend-modes", false) : false;\r
+ g_blend_modes = glTextureBarrierNV ? env::properties().get("configuration.blend-modes", false) : false;\r
g_shader.reset(new shader(get_vertex(), get_fragment(g_blend_modes)));\r
}\r
catch(...)\r
#include "audio/audio_mixer.h"\r
#include "image/image_mixer.h"\r
\r
-#include "../video_channel_context.h"\r
-\r
-#include <common/exception/exceptions.h>\r
-#include <common/concurrency/executor.h>\r
-#include <common/utility/tweener.h>\r
#include <common/env.h>\r
+#include <common/concurrency/executor.h>\r
+#include <common/exception/exceptions.h>\r
#include <common/gl/gl_check.h>\r
+#include <common/utility/tweener.h>\r
\r
#include <core/mixer/read_frame.h>\r
#include <core/mixer/write_frame.h>\r
#include <core/producer/frame/basic_frame.h>\r
#include <core/producer/frame/frame_factory.h>\r
-#include <core/producer/frame/pixel_format.h>\r
#include <core/producer/frame/frame_transform.h>\r
+#include <core/producer/frame/pixel_format.h>\r
\r
#include <core/video_format.h>\r
\r
-#include <boost/fusion/container/map.hpp>\r
-#include <boost/fusion/include/at_key.hpp>\r
#include <boost/foreach.hpp>\r
+#include <boost/timer.hpp>\r
\r
-#include <tbb/parallel_invoke.h>\r
+#include <tbb/concurrent_queue.h>\r
+#include <tbb/spin_mutex.h>\r
\r
#include <unordered_map>\r
\r
\r
struct mixer::implementation : boost::noncopyable\r
{ \r
- video_channel_context& channel_;\r
+ safe_ptr<diagnostics::graph> graph_;\r
+ boost::timer mix_timer_;\r
+\r
+ safe_ptr<mixer::target_t> target_;\r
+ mutable tbb::spin_mutex format_desc_mutex_;\r
+ video_format_desc format_desc_;\r
+ safe_ptr<ogl_device> ogl_;\r
\r
audio_mixer audio_mixer_;\r
image_mixer image_mixer_;\r
\r
std::unordered_map<int, tweened_transform<core::frame_transform>> transforms_; \r
std::unordered_map<int, blend_mode::type> blend_modes_;\r
-\r
- std::queue<std::pair<boost::unique_future<safe_ptr<host_buffer>>, core::audio_buffer>> buffer_;\r
- \r
- const size_t buffer_size_;\r
+ \r
+ executor executor_;\r
\r
public:\r
- implementation(video_channel_context& video_channel) \r
- : channel_(video_channel)\r
- , audio_mixer_(channel_.get_format_desc())\r
- , image_mixer_(channel_.ogl(), channel_.get_format_desc())\r
- , buffer_size_(env::properties().get("configuration.producers.buffer-depth", 1))\r
- { \r
- CASPAR_LOG(info) << print() << L" Successfully initialized."; \r
+ implementation(const safe_ptr<diagnostics::graph>& graph, const safe_ptr<mixer::target_t>& target, const video_format_desc& format_desc, const safe_ptr<ogl_device>& ogl) \r
+ : graph_(graph)\r
+ , target_(target)\r
+ , format_desc_(format_desc)\r
+ , ogl_(ogl)\r
+ , image_mixer_(ogl)\r
+ , executor_(L"mixer")\r
+ { \r
+ graph_->set_color("mix-time", diagnostics::color(1.0f, 0.0f, 0.9f));\r
}\r
- \r
- safe_ptr<read_frame> execute(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
+ \r
+ void send(const std::pair<std::map<int, safe_ptr<core::basic_frame>>, ticket>& packet)\r
{ \r
- try\r
- {\r
- BOOST_FOREACH(auto& frame, frames)\r
+ executor_.begin_invoke([=]\r
+ { \r
+ try\r
{\r
- auto blend_it = blend_modes_.find(frame.first);\r
- image_mixer_.begin_layer(blend_it != blend_modes_.end() ? blend_it->second : blend_mode::normal);\r
+ mix_timer_.restart();\r
+\r
+ auto frames = packet.first;\r
\r
- auto frame1 = make_safe<core::basic_frame>(frame.second);\r
- frame1->get_frame_transform() = transforms_[frame.first].fetch_and_tick(1);\r
-\r
- if(channel_.get_format_desc().field_mode != core::field_mode::progressive)\r
- { \r
- auto frame2 = make_safe<core::basic_frame>(frame.second);\r
- frame2->get_frame_transform() = transforms_[frame.first].fetch_and_tick(1);\r
- frame1 = core::basic_frame::interlace(frame1, frame2, channel_.get_format_desc().field_mode);\r
- }\r
+ BOOST_FOREACH(auto& frame, frames)\r
+ {\r
+ auto blend_it = blend_modes_.find(frame.first);\r
+ image_mixer_.begin_layer(blend_it != blend_modes_.end() ? blend_it->second : blend_mode::normal);\r
+ \r
+ auto frame1 = make_safe<core::basic_frame>(frame.second);\r
+ frame1->get_frame_transform() = transforms_[frame.first].fetch_and_tick(1);\r
+\r
+ if(format_desc_.field_mode != core::field_mode::progressive)\r
+ { \r
+ auto frame2 = make_safe<core::basic_frame>(frame.second);\r
+ frame2->get_frame_transform() = transforms_[frame.first].fetch_and_tick(1);\r
+ frame1 = core::basic_frame::interlace(frame1, frame2, format_desc_.field_mode);\r
+ }\r
\r
- frame1->accept(audio_mixer_); \r
- frame1->accept(image_mixer_);\r
+ frame1->accept(audio_mixer_); \r
+ frame1->accept(image_mixer_);\r
\r
- image_mixer_.end_layer();\r
- }\r
+ image_mixer_.end_layer();\r
+ }\r
\r
- auto image = image_mixer_.render();\r
- auto audio = audio_mixer_.mix();\r
- \r
- buffer_.push(std::make_pair(std::move(image), audio));\r
+ auto image = image_mixer_.render(format_desc_);\r
+ auto audio = audio_mixer_.mix(format_desc_);\r
+ image.wait();\r
\r
- if(buffer_.size()-1 < buffer_size_) \r
- return make_safe<read_frame>();\r
- \r
- auto res = std::move(buffer_.front());\r
- buffer_.pop();\r
+ graph_->update_value("mix-time", mix_timer_.elapsed()*format_desc_.fps*0.5);\r
\r
- return make_safe<read_frame>(channel_.ogl(), channel_.get_format_desc().size, std::move(res.first.get()), std::move(res.second)); \r
- }\r
- catch(...)\r
- {\r
- CASPAR_LOG(error) << L"[mixer] Error detected.";\r
- throw;\r
- } \r
+ target_->send(std::make_pair(make_safe<read_frame>(ogl_, format_desc_.size, std::move(image.get()), std::move(audio)), packet.second)); \r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
+ } \r
+ }); \r
}\r
\r
safe_ptr<core::write_frame> create_frame(const void* tag, const core::pixel_format_desc& desc)\r
{ \r
return image_mixer_.create_frame(tag, desc);\r
}\r
+ \r
+ safe_ptr<core::write_frame> create_frame(const void* tag, size_t width, size_t height, core::pixel_format::type pix_fmt)\r
+ {\r
+ // Create bgra frame\r
+ core::pixel_format_desc desc;\r
+ desc.pix_fmt = pix_fmt;\r
+ desc.planes.push_back(core::pixel_format_desc::plane(width, height, 4));\r
+ return create_frame(tag, desc);\r
+ }\r
\r
void set_transform(int index, const frame_transform& transform, unsigned int mix_duration, const std::wstring& tween)\r
{\r
- channel_.execution().invoke([&]\r
+ executor_.invoke([&]\r
{\r
auto src = transforms_[index].fetch();\r
auto dst = transform;\r
\r
void apply_transform(int index, const std::function<frame_transform(frame_transform)>& transform, unsigned int mix_duration, const std::wstring& tween)\r
{\r
- channel_.execution().invoke([&]\r
+ executor_.invoke([&]\r
{\r
auto src = transforms_[index].fetch();\r
auto dst = transform(src);\r
}, high_priority);\r
}\r
\r
+ void clear_transforms(int index)\r
+ {\r
+ executor_.invoke([&]\r
+ {\r
+ transforms_.erase(index);\r
+ blend_modes_.erase(index);\r
+ }, high_priority);\r
+ }\r
+\r
void clear_transforms()\r
{\r
- channel_.execution().invoke([&]\r
+ executor_.invoke([&]\r
{\r
transforms_.clear();\r
blend_modes_.clear();\r
\r
void set_blend_mode(int index, blend_mode::type value)\r
{\r
- channel_.execution().invoke([&]\r
+ executor_.invoke([&]\r
{\r
blend_modes_[index] = value;\r
}, high_priority);\r
}\r
+ \r
+ void set_video_format_desc(const video_format_desc& format_desc)\r
+ {\r
+ executor_.begin_invoke([=]\r
+ {\r
+ tbb::spin_mutex::scoped_lock lock(format_desc_mutex_);\r
+ format_desc_ = format_desc;\r
+ });\r
+ }\r
\r
- std::wstring print() const\r
+ core::video_format_desc get_video_format_desc() const // nothrow\r
{\r
- return L"mixer";\r
+ tbb::spin_mutex::scoped_lock lock(format_desc_mutex_);\r
+ return format_desc_;\r
}\r
};\r
\r
-mixer::mixer(video_channel_context& video_channel) : impl_(new implementation(video_channel)){}\r
-safe_ptr<core::read_frame> mixer::execute(const std::map<int, safe_ptr<core::basic_frame>>& frames){ return impl_->execute(frames);}\r
-core::video_format_desc mixer::get_video_format_desc() const { return impl_->channel_.get_format_desc(); }\r
+mixer::mixer(const safe_ptr<diagnostics::graph>& graph, const safe_ptr<target_t>& target, const video_format_desc& format_desc, const safe_ptr<ogl_device>& ogl) \r
+ : impl_(new implementation(graph, target, format_desc, ogl)){}\r
+void mixer::send(const std::pair<std::map<int, safe_ptr<core::basic_frame>>, ticket>& frames){ impl_->send(frames);}\r
+core::video_format_desc mixer::get_video_format_desc() const { return impl_->get_video_format_desc(); }\r
safe_ptr<core::write_frame> mixer::create_frame(const void* tag, const core::pixel_format_desc& desc){ return impl_->create_frame(tag, desc); } \r
safe_ptr<core::write_frame> mixer::create_frame(const void* tag, size_t width, size_t height, core::pixel_format::type pix_fmt)\r
{\r
// Create bgra frame\r
core::pixel_format_desc desc;\r
desc.pix_fmt = pix_fmt;\r
- desc.planes.push_back( core::pixel_format_desc::plane(width, height, 4));\r
+ desc.planes.push_back(core::pixel_format_desc::plane(width, height, 4));\r
return create_frame(tag, desc);\r
}\r
void mixer::set_frame_transform(int index, const core::frame_transform& transform, unsigned int mix_duration, const std::wstring& tween){impl_->set_transform(index, transform, mix_duration, tween);}\r
-void mixer::apply_frame_transform(int index, const std::function<core::frame_transform(core::frame_transform)>& transform, unsigned int mix_duration, const std::wstring& tween){impl_->apply_transform(index, transform, mix_duration, tween);}\r
+void mixer::apply_frame_transform(int index, const std::function<core::frame_transform(core::frame_transform)>& transform, unsigned int mix_duration, const std::wstring& tween)\r
+{impl_->apply_transform(index, transform, mix_duration, tween);}\r
+void mixer::clear_transforms(int index){impl_->clear_transforms(index);}\r
void mixer::clear_transforms(){impl_->clear_transforms();}\r
void mixer::set_blend_mode(int index, blend_mode::type value){impl_->set_blend_mode(index, value);}\r
+void mixer::set_video_format_desc(const video_format_desc& format_desc){impl_->set_video_format_desc(format_desc);}\r
}}
\ No newline at end of file
#include "../producer/frame/frame_factory.h"\r
\r
#include <common/memory/safe_ptr.h>\r
+#include <common/concurrency/target.h>\r
+#include <common/concurrency/governor.h>\r
+#include <common/diagnostics/graph.h>\r
\r
#include <map>\r
\r
class read_frame;\r
class write_frame;\r
class basic_frame;\r
+class ogl_device;\r
struct frame_transform;\r
-struct frame_transform;\r
-class video_channel_context;;\r
struct pixel_format;\r
\r
-class mixer : public core::frame_factory\r
+class mixer : public target<std::pair<std::map<int, safe_ptr<core::basic_frame>>, ticket>>, public core::frame_factory\r
{\r
public: \r
+ typedef target<std::pair<safe_ptr<read_frame>, ticket>> target_t;\r
\r
- explicit mixer(video_channel_context& video_channel);\r
+ explicit mixer(const safe_ptr<diagnostics::graph>& graph, const safe_ptr<target_t>& target, const video_format_desc& format_desc, const safe_ptr<ogl_device>& ogl);\r
\r
- safe_ptr<core::read_frame> execute(const std::map<int, safe_ptr<core::basic_frame>>& frames); // nothrow\r
+ virtual void send(const std::pair<std::map<int, safe_ptr<basic_frame>>, ticket>& frames); // nothrow\r
\r
safe_ptr<core::write_frame> create_frame(const void* tag, const core::pixel_format_desc& desc); \r
- safe_ptr<core::write_frame> create_frame(const void* tag, size_t width, size_t height, core::pixel_format::type pix_fmt = core::pixel_format::bgra); \r
+ safe_ptr<core::write_frame> create_frame(const void* tag, size_t width, size_t height, pixel_format::type pix_fmt = pixel_format::bgra); \r
\r
core::video_format_desc get_video_format_desc() const; // nothrow\r
-\r
+ void set_video_format_desc(const video_format_desc& format_desc);\r
\r
void set_frame_transform(int index, const core::frame_transform& transform, unsigned int mix_duration = 0, const std::wstring& tween = L"linear");\r
- void apply_frame_transform(int index, const std::function<core::frame_transform(core::frame_transform)>& transform, unsigned int mix_duration = 0, const std::wstring& tween = L"linear");\r
+ void apply_frame_transform(int index, const std::function<frame_transform(frame_transform)>& transform, unsigned int mix_duration = 0, const std::wstring& tween = L"linear");\r
+ void clear_transforms(int index);\r
void clear_transforms();\r
\r
void set_blend_mode(int index, blend_mode::type value);\r
-\r
+ \r
private:\r
struct implementation;\r
safe_ptr<implementation> impl_;\r
, tag_(tag)\r
, mode_(core::field_mode::progressive)\r
{\r
- ogl_->invoke([&]\r
+ std::transform(desc.planes.begin(), desc.planes.end(), std::back_inserter(buffers_), [&](const core::pixel_format_desc::plane& plane)\r
{\r
- std::transform(desc.planes.begin(), desc.planes.end(), std::back_inserter(buffers_), [&](const core::pixel_format_desc::plane& plane)\r
- {\r
- return ogl_->create_host_buffer(plane.size, host_buffer::write_only);\r
- });\r
- std::transform(desc.planes.begin(), desc.planes.end(), std::back_inserter(textures_), [&](const core::pixel_format_desc::plane& plane)\r
- {\r
- return ogl_->create_device_buffer(plane.width, plane.height, plane.channels); \r
- });\r
- }, high_priority);\r
+ return ogl_->create_host_buffer(plane.size, host_buffer::write_only);\r
+ });\r
+ std::transform(desc.planes.begin(), desc.planes.end(), std::back_inserter(textures_), [&](const core::pixel_format_desc::plane& plane)\r
+ {\r
+ return ogl_->create_device_buffer(plane.width, plane.height, plane.channels); \r
+ });\r
}\r
\r
void accept(write_frame& self, core::frame_visitor& visitor)\r
auto ptr = static_cast<uint8_t*>(buffers_[index]->data());\r
return boost::iterator_range<uint8_t*>(ptr, ptr+buffers_[index]->size());\r
}\r
-\r
- const boost::iterator_range<const uint8_t*> image_data(size_t index) const\r
- {\r
- if(index >= buffers_.size() || !buffers_[index]->data())\r
- return boost::iterator_range<const uint8_t*>();\r
- auto ptr = static_cast<const uint8_t*>(buffers_[index]->data());\r
- return boost::iterator_range<const uint8_t*>(ptr, ptr+buffers_[index]->size());\r
- }\r
-\r
+ \r
void commit()\r
{\r
for(size_t n = 0; n < buffers_.size(); ++n)\r
\r
boost::iterator_range<uint8_t*> write_frame::image_data(size_t index){return impl_->image_data(index);}\r
audio_buffer& write_frame::audio_data() { return impl_->audio_data_; }\r
-const boost::iterator_range<const uint8_t*> write_frame::image_data(size_t index) const\r
-{\r
- return boost::iterator_range<const uint8_t*>(impl_->image_data(index).begin(), impl_->image_data(index).end());\r
-}\r
const boost::iterator_range<const int32_t*> write_frame::audio_data() const\r
{\r
return boost::iterator_range<const int32_t*>(impl_->audio_data_.data(), impl_->audio_data_.data() + impl_->audio_data_.size());\r
void swap(write_frame& other);\r
\r
boost::iterator_range<uint8_t*> image_data(size_t plane_index = 0); \r
- const boost::iterator_range<const uint8_t*> image_data(size_t plane_index = 0) const;\r
\r
audio_buffer& audio_data();\r
const boost::iterator_range<const int32_t*> audio_data() const;\r
frame->accept(visitor);\r
visitor.end();\r
} \r
-\r
- std::wstring print() const\r
- {\r
- std::wstring str = L"\tbasic_frame[\n";\r
- BOOST_FOREACH(auto& frame, frames_)\r
- str += frame->print() + L"\n";\r
- str += L"\n]";\r
- return str;\r
- }\r
};\r
\r
basic_frame::basic_frame() : impl_(new implementation(std::vector<safe_ptr<basic_frame>>())){}\r
\r
const frame_transform& basic_frame::get_frame_transform() const { return impl_->frame_transform_;}\r
frame_transform& basic_frame::get_frame_transform() { return impl_->frame_transform_;}\r
-\r
-std::wstring basic_frame::print() const{return impl_->print();}\r
void basic_frame::accept(frame_visitor& visitor){impl_->accept(*this, visitor);}\r
\r
safe_ptr<basic_frame> basic_frame::interlace(const safe_ptr<basic_frame>& frame1, const safe_ptr<basic_frame>& frame2, field_mode::type mode)\r
}\r
\r
virtual void accept(frame_visitor& visitor);\r
-\r
- virtual std::wstring print() const;\r
private:\r
struct implementation;\r
safe_ptr<implementation> impl_;\r
virtual safe_ptr<basic_frame> receive(int hints) {return (*producer_)->receive(hints);}\r
virtual safe_ptr<basic_frame> last_frame() const {return (*producer_)->last_frame();}\r
virtual std::wstring print() const {return (*producer_)->print();}\r
- virtual void param(const std::wstring& str) {(*producer_)->param(str);}\r
+ virtual std::wstring param(const std::wstring& str) {return (*producer_)->param(str);}\r
virtual safe_ptr<frame_producer> get_following_producer() const {return (*producer_)->get_following_producer();}\r
virtual void set_leading_producer(const safe_ptr<frame_producer>& producer) {(*producer_)->set_leading_producer(producer);}\r
virtual int64_t nb_frames() const {return (*producer_)->nb_frames();}\r
\r
virtual std::wstring print() const = 0; // nothrow\r
\r
- virtual void param(const std::wstring&){}\r
+ virtual std::wstring param(const std::wstring&){return L"";}\r
\r
virtual safe_ptr<frame_producer> get_following_producer() const {return frame_producer::empty();} // nothrow\r
virtual void set_leading_producer(const safe_ptr<frame_producer>&) {} // nothrow\r
#include <boost/noncopyable.hpp>\r
\r
#include <string>\r
-#include <utility>\r
\r
namespace caspar { namespace core {\r
\r
void play(); // nothrow\r
void pause(); // nothrow\r
void stop(); // nothrow\r
- void param(const std::wstring& param);\r
\r
bool is_paused() const;\r
int64_t frame_number() const;\r
\r
#include "layer.h"\r
\r
-#include "../video_channel_context.h"\r
-\r
-#include <core/producer/frame/basic_frame.h>\r
-#include <core/producer/frame/frame_factory.h>\r
+#include "frame/basic_frame.h"\r
+#include "frame/frame_factory.h"\r
\r
#include <common/concurrency/executor.h>\r
-#include <common/utility/move_on_copy.h>\r
+#include <common/concurrency/governor.h>\r
+#include <common/env.h>\r
\r
#include <boost/foreach.hpp>\r
+#include <boost/timer.hpp>\r
\r
#include <tbb/parallel_for_each.h>\r
\r
#include <map>\r
-#include <set>\r
\r
namespace caspar { namespace core {\r
\r
struct stage::implementation : boost::noncopyable\r
{ \r
- std::map<int, layer> layers_; \r
- video_channel_context& channel_;\r
+ safe_ptr<diagnostics::graph> graph_;\r
+ safe_ptr<stage::target_t> target_;\r
+ video_format_desc format_desc_;\r
+\r
+ boost::timer produce_timer_;\r
+ boost::timer tick_timer_;\r
+\r
+ std::map<int, layer> layers_; \r
+\r
+ governor governor_;\r
+ executor executor_;\r
public:\r
- implementation(video_channel_context& video_channel) \r
- : channel_(video_channel)\r
+ implementation(const safe_ptr<diagnostics::graph>& graph, const safe_ptr<stage::target_t>& target, const video_format_desc& format_desc) \r
+ : graph_(graph)\r
+ , format_desc_(format_desc)\r
+ , target_(target)\r
+ , executor_(L"stage")\r
+ , governor_(std::max(1, env::properties().get("configuration.pipeline-tokens", 2)))\r
+ {\r
+ graph_->add_guide("tick-time", 0.5f); \r
+ graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f)); \r
+ graph_->set_color("produce-time", diagnostics::color(0.0f, 1.0f, 0.0f));\r
+\r
+ executor_.begin_invoke([this]{tick();});\r
+ }\r
+\r
+ ~implementation()\r
{\r
+ governor_.cancel();\r
}\r
\r
- std::map<int, safe_ptr<basic_frame>> execute()\r
+ void tick()\r
{ \r
try\r
{\r
+ auto ticket = governor_.acquire();\r
+\r
+ produce_timer_.restart();\r
+\r
std::map<int, safe_ptr<basic_frame>> frames;\r
\r
BOOST_FOREACH(auto& layer, layers_) \r
{\r
frames[layer.first] = layer.second.receive(); \r
});\r
+ \r
+ graph_->update_value("produce-time", produce_timer_.elapsed()*format_desc_.fps*0.5);\r
+ \r
+ target_->send(std::make_pair(frames, ticket));\r
\r
- return frames;\r
+ graph_->update_value("tick-time", tick_timer_.elapsed()*format_desc_.fps*0.5);\r
+ tick_timer_.restart();\r
}\r
catch(...)\r
{\r
- CASPAR_LOG(error) << L"[stage] Error detected";\r
- throw;\r
+ layers_.clear();\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
} \r
+ executor_.begin_invoke([this]{tick();});\r
}\r
\r
void load(int index, const safe_ptr<frame_producer>& producer, bool preview, int auto_play_delta)\r
{\r
- channel_.execution().invoke([&]\r
+ executor_.invoke([&]\r
{\r
layers_[index].load(producer, preview, auto_play_delta);\r
}, high_priority);\r
\r
void pause(int index)\r
{ \r
- channel_.execution().invoke([&]\r
+ executor_.invoke([&]\r
{\r
layers_[index].pause();\r
}, high_priority);\r
\r
void play(int index)\r
{ \r
- channel_.execution().invoke([&]\r
+ executor_.invoke([&]\r
{\r
layers_[index].play();\r
}, high_priority);\r
\r
void stop(int index)\r
{ \r
- channel_.execution().invoke([&]\r
+ executor_.invoke([&]\r
{\r
layers_[index].stop();\r
}, high_priority);\r
\r
void clear(int index)\r
{\r
- channel_.execution().invoke([&]\r
+ executor_.invoke([&]\r
{\r
layers_.erase(index);\r
}, high_priority);\r
\r
void clear()\r
{\r
- channel_.execution().invoke([&]\r
+ executor_.invoke([&]\r
{\r
layers_.clear();\r
}, high_priority);\r
\r
void swap_layer(int index, size_t other_index)\r
{\r
- channel_.execution().invoke([&]\r
+ executor_.invoke([&]\r
{\r
std::swap(layers_[index], layers_[other_index]);\r
}, high_priority);\r
{\r
std::swap(layers_[index], other.impl_->layers_[other_index]);\r
}; \r
- channel_.execution().invoke([&]{other.impl_->channel_.execution().invoke(func, high_priority);}, high_priority);\r
+ executor_.invoke([&]{other.impl_->executor_.invoke(func, high_priority);}, high_priority);\r
}\r
}\r
\r
{\r
std::swap(layers_, other.impl_->layers_);\r
}; \r
- channel_.execution().invoke([&]{other.impl_->channel_.execution().invoke(func, high_priority);}, high_priority);\r
+ executor_.invoke([&]{other.impl_->executor_.invoke(func, high_priority);}, high_priority);\r
}\r
\r
layer_status get_status(int index)\r
{ \r
- return channel_.execution().invoke([&]\r
+ return executor_.invoke([&]\r
{\r
return layers_[index].status();\r
}, high_priority );\r
\r
safe_ptr<frame_producer> foreground(int index)\r
{\r
- return channel_.execution().invoke([=]{return layers_[index].foreground();}, high_priority);\r
+ return executor_.invoke([=]{return layers_[index].foreground();}, high_priority);\r
}\r
\r
safe_ptr<frame_producer> background(int index)\r
{\r
- return channel_.execution().invoke([=]{return layers_[index].background();}, high_priority);\r
+ return executor_.invoke([=]{return layers_[index].background();}, high_priority);\r
}\r
-\r
- std::wstring print() const\r
+ \r
+ void set_video_format_desc(const video_format_desc& format_desc)\r
{\r
- return L"stage [" + boost::lexical_cast<std::wstring>(channel_.index()) + L"]";\r
+ executor_.begin_invoke([=]\r
+ {\r
+ format_desc_ = format_desc;\r
+ }, high_priority );\r
}\r
-\r
};\r
\r
-stage::stage(video_channel_context& video_channel) : impl_(new implementation(video_channel)){}\r
+stage::stage(const safe_ptr<diagnostics::graph>& graph, const safe_ptr<target_t>& target, const video_format_desc& format_desc) : impl_(new implementation(graph, target, format_desc)){}\r
void stage::swap(stage& other){impl_->swap(other);}\r
void stage::load(int index, const safe_ptr<frame_producer>& producer, bool preview, int auto_play_delta){impl_->load(index, producer, preview, auto_play_delta);}\r
void stage::pause(int index){impl_->pause(index);}\r
layer_status stage::get_status(int index){return impl_->get_status(index);}\r
safe_ptr<frame_producer> stage::foreground(size_t index) {return impl_->foreground(index);}\r
safe_ptr<frame_producer> stage::background(size_t index) {return impl_->background(index);}\r
-std::map<int, safe_ptr<basic_frame>> stage::execute(){return impl_->execute();}\r
+void stage::set_video_format_desc(const video_format_desc& format_desc){impl_->set_video_format_desc(format_desc);}\r
}}
\ No newline at end of file
#include "frame_producer.h"\r
\r
#include <common/memory/safe_ptr.h>\r
+#include <common/concurrency/target.h>\r
+#include <common/concurrency/governor.h>\r
+#include <common/diagnostics/graph.h>\r
\r
#include <boost/noncopyable.hpp>\r
\r
namespace caspar { namespace core {\r
\r
struct video_format_desc;\r
-class video_channel_context;\r
struct layer_status;\r
\r
class stage : boost::noncopyable\r
{\r
public:\r
- explicit stage(video_channel_context& video_channel);\r
+ typedef target<std::pair<std::map<int, safe_ptr<basic_frame>>, ticket>> target_t;\r
\r
- void swap(stage& other);\r
+ explicit stage(const safe_ptr<diagnostics::graph>& graph, const safe_ptr<target_t>& target, const video_format_desc& format_desc);\r
\r
- std::map<int, safe_ptr<basic_frame>> execute();\r
- \r
+ void swap(stage& other);\r
+ \r
void load(int index, const safe_ptr<frame_producer>& producer, bool preview = false, int auto_play_delta = -1);\r
void pause(int index);\r
void play(int index);\r
layer_status get_status(int index);\r
safe_ptr<frame_producer> foreground(size_t index);\r
safe_ptr<frame_producer> background(size_t index);\r
+ \r
+ void set_video_format_desc(const video_format_desc& format_desc);\r
\r
private:\r
struct implementation;\r
\r
#include "video_channel.h"\r
\r
-#include "video_channel_context.h"\r
#include "video_format.h"\r
\r
#include "consumer/output.h"\r
#include "mixer/mixer.h"\r
+#include "mixer/gpu/ogl_device.h"\r
#include "producer/stage.h"\r
\r
-#include <common/concurrency/executor.h>\r
#include <common/diagnostics/graph.h>\r
\r
-#include "mixer/gpu/ogl_device.h"\r
-\r
-#include <boost/timer.hpp>\r
-\r
-#ifdef _MSC_VER\r
-#pragma warning(disable : 4355)\r
-#endif\r
+#include <string>\r
\r
namespace caspar { namespace core {\r
\r
struct video_channel::implementation : boost::noncopyable\r
{\r
- video_channel_context context_;\r
-\r
- safe_ptr<caspar::core::output> output_;\r
- std::shared_ptr<caspar::core::mixer> mixer_;\r
- safe_ptr<caspar::core::stage> stage_;\r
-\r
- safe_ptr<diagnostics::graph> diag_;\r
- boost::timer frame_timer_;\r
- boost::timer tick_timer_;\r
- boost::timer output_timer_;\r
+ const int index_;\r
+ const video_format_desc format_desc_;\r
+ const safe_ptr<ogl_device> ogl_;\r
+ safe_ptr<diagnostics::graph> graph_;\r
+\r
+ safe_ptr<caspar::core::output> output_;\r
+ safe_ptr<caspar::core::mixer> mixer_;\r
+ safe_ptr<caspar::core::stage> stage_;\r
\r
public:\r
implementation(int index, const video_format_desc& format_desc, const safe_ptr<ogl_device>& ogl) \r
- : context_(index, ogl, format_desc)\r
- , output_(new caspar::core::output(context_, [this]{restart();}))\r
- , mixer_(new caspar::core::mixer(context_))\r
- , stage_(new caspar::core::stage(context_)) \r
+ : index_(index)\r
+ , format_desc_(format_desc)\r
+ , ogl_(ogl)\r
+ , output_(new caspar::core::output(graph_, format_desc))\r
+ , mixer_(new caspar::core::mixer(graph_, output_, format_desc, ogl))\r
+ , stage_(new caspar::core::stage(graph_, mixer_, format_desc)) \r
{\r
- diag_->add_guide("produce-time", 0.5f); \r
- diag_->set_color("produce-time", diagnostics::color(0.0f, 1.0f, 0.0f));\r
- diag_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f)); \r
- diag_->set_color("output-time", diagnostics::color(1.0f, 0.5f, 0.0f));\r
- diag_->set_color("mix-time", diagnostics::color(1.0f, 1.0f, 0.9f));\r
- diag_->set_text(print());\r
- diagnostics::register_graph(diag_);\r
+ graph_->set_text(print());\r
+ diagnostics::register_graph(graph_);\r
\r
CASPAR_LOG(info) << print() << " Successfully Initialized.";\r
- context_.execution().begin_invoke([this]{tick();});\r
- }\r
-\r
- ~implementation()\r
- {\r
- // Stop context before destroying devices.\r
- context_.execution().stop();\r
- context_.execution().join();\r
- }\r
-\r
- void tick()\r
- {\r
- try\r
- {\r
- // Produce\r
-\r
- frame_timer_.restart();\r
-\r
- auto simple_frames = stage_->execute();\r
-\r
- diag_->update_value("produce-time", frame_timer_.elapsed()*context_.get_format_desc().fps*0.5);\r
- \r
- // Mix\r
-\r
- frame_timer_.restart();\r
-\r
- auto finished_frame = mixer_->execute(simple_frames);\r
- \r
- diag_->update_value("mix-time", frame_timer_.elapsed()*context_.get_format_desc().fps*0.5);\r
- \r
- // Consume\r
- \r
- output_timer_.restart();\r
-\r
- output_->execute(finished_frame);\r
- \r
- diag_->update_value("output-time", frame_timer_.elapsed()*context_.get_format_desc().fps*0.5);\r
-\r
- \r
- diag_->update_value("tick-time", tick_timer_.elapsed()*context_.get_format_desc().fps*0.5);\r
- tick_timer_.restart();\r
- }\r
- catch(...)\r
- {\r
- CASPAR_LOG_CURRENT_EXCEPTION();\r
- CASPAR_LOG(error) << context_.print() << L" Unexpected exception. Clearing stage and freeing memory";\r
- restart();\r
- }\r
-\r
- context_.execution().begin_invoke([this]{tick();});\r
}\r
-\r
- void restart()\r
+ \r
+ void set_video_format_desc(const video_format_desc& format_desc)\r
{\r
- stage_->clear();\r
- context_.ogl()->gc().wait();\r
-\r
- mixer_ = nullptr;\r
- mixer_.reset(new caspar::core::mixer(context_));\r
+ mixer_->set_video_format_desc(format_desc);\r
+ output_->set_video_format_desc(format_desc);\r
+ ogl_->gc();\r
}\r
\r
std::wstring print() const\r
{\r
- return context_.print();\r
- }\r
-\r
- void set_video_format_desc(const video_format_desc& format_desc)\r
- {\r
- context_.execution().begin_invoke([=]\r
- {\r
- stage_->clear();\r
- context_.ogl()->gc().wait();\r
- context_.set_format_desc(format_desc);\r
- });\r
+ return L"video_channel[" + boost::lexical_cast<std::wstring>(index_+1) + L"|" + format_desc_.name + L"]";\r
}\r
};\r
\r
video_channel::video_channel(int index, const video_format_desc& format_desc, const safe_ptr<ogl_device>& ogl) : impl_(new implementation(index, format_desc, ogl)){}\r
-video_channel::video_channel(video_channel&& other) : impl_(std::move(other.impl_)){}\r
safe_ptr<stage> video_channel::stage() { return impl_->stage_;} \r
-safe_ptr<mixer> video_channel::mixer() { return make_safe_ptr(impl_->mixer_);} \r
+safe_ptr<mixer> video_channel::mixer() { return impl_->mixer_;} \r
safe_ptr<output> video_channel::output() { return impl_->output_;} \r
-video_format_desc video_channel::get_video_format_desc() const{return impl_->context_.get_format_desc();}\r
+video_format_desc video_channel::get_video_format_desc() const{return impl_->format_desc_;}\r
void video_channel::set_video_format_desc(const video_format_desc& format_desc){impl_->set_video_format_desc(format_desc);}\r
-std::wstring video_channel::print() const { return impl_->print();}\r
-video_channel_context& video_channel::context(){return impl_->context_;}\r
\r
}}
\ No newline at end of file
{\r
public:\r
explicit video_channel(int index, const video_format_desc& format_desc, const safe_ptr<ogl_device>& ogl);\r
- video_channel(video_channel&& other);\r
\r
safe_ptr<stage> stage();\r
safe_ptr<mixer> mixer();\r
safe_ptr<output> output();\r
-\r
- video_channel_context& context();\r
-\r
+ \r
video_format_desc get_video_format_desc() const;\r
void set_video_format_desc(const video_format_desc& format_desc);\r
\r
- std::wstring print() const;\r
-\r
private:\r
struct implementation;\r
safe_ptr<implementation> impl_;\r
, key_only_(key_only)\r
, executor_(print())\r
{\r
- executor_.set_capacity(core::consumer_buffer_depth());\r
+ executor_.set_capacity(1);\r
\r
graph_->add_guide("tick-time", 0.5);\r
graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f)); \r
graph_->add_guide("frame-time", 0.5f); \r
graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
graph_->set_color("sync-time", diagnostics::color(0.5f, 1.0f, 0.2f));\r
- graph_->set_color("input-buffer", diagnostics::color(1.0f, 1.0f, 0.0f));\r
graph_->set_text(print());\r
diagnostics::register_graph(graph_);\r
\r
{\r
try\r
{\r
- const size_t audio_samples = format_desc_.audio_samples_per_frame;\r
- const size_t audio_nchannels = format_desc_.audio_channels;\r
-\r
frame_timer_.restart();\r
\r
// Copy to local buffers\r
sync_timer_.restart();\r
unsigned long n_field = 0;\r
blue_->wait_output_video_synch(UPD_FMT_FRAME, n_field);\r
- graph_->update_value("sync-time", static_cast<float>(sync_timer_.elapsed()*format_desc_.fps*0.5));\r
+ graph_->update_value("sync-time", sync_timer_.elapsed()*format_desc_.fps*0.5);\r
\r
// Send and display\r
\r
if(embedded_audio_)\r
{ \r
- auto frame_audio = core::audio_32_to_16_sse(frame->audio_data());\r
- auto frame_audio_data = frame_audio.size() != audio_samples ? silence.data() : frame_audio.data(); \r
+ auto frame_audio16 = core::audio_32_to_16_sse(frame->audio_data());\r
\r
- encode_hanc(reinterpret_cast<BLUE_UINT32*>(reserved_frames_.front()->hanc_data()), frame_audio_data, audio_samples, audio_nchannels);\r
+ encode_hanc(reinterpret_cast<BLUE_UINT32*>(reserved_frames_.front()->hanc_data()), frame_audio16.data(), frame->audio_data().size(), format_desc_.audio_channels);\r
\r
blue_->system_buffer_write_async(const_cast<uint8_t*>(reserved_frames_.front()->image_data()), \r
reserved_frames_.front()->image_size(), \r
{\r
CASPAR_LOG_CURRENT_EXCEPTION();\r
}\r
- graph_->set_value("input-buffer", static_cast<double>(executor_.size())/static_cast<double>(executor_.capacity()));\r
});\r
- graph_->set_value("input-buffer", static_cast<double>(executor_.size())/static_cast<double>(executor_.capacity()));\r
}\r
\r
void encode_hanc(BLUE_UINT32* hanc_data, void* audio_data, size_t audio_samples, size_t audio_nchannels)\r
\r
return L"bluefish [" + boost::lexical_cast<std::wstring>(device_index_) + L"]";\r
}\r
+\r
+ size_t buffer_depth() const\r
+ {\r
+ return 1;\r
+ }\r
}; \r
\r
safe_ptr<core::frame_consumer> create_consumer(const std::vector<std::wstring>& params)\r
bool internal_key;\r
bool low_latency;\r
bool key_only;\r
+ size_t base_buffer_depth;\r
size_t buffer_depth;\r
\r
configuration()\r
: device_index(1)\r
, embedded_audio(false)\r
, internal_key(false)\r
- , low_latency(false)\r
+ , low_latency(true)\r
, key_only(false)\r
- , buffer_depth(core::consumer_buffer_depth()){}\r
+ , base_buffer_depth(3)\r
+ , buffer_depth(base_buffer_depth + (low_latency ? 0 : 1) + (embedded_audio ? 1 : 0)){}\r
};\r
\r
class decklink_frame : public IDeckLinkVideoFrame\r
, keyer_(decklink_)\r
, model_name_(get_model_name(decklink_))\r
, format_desc_(format_desc)\r
- , buffer_size_(config.embedded_audio ? config.buffer_depth + 1 : config.buffer_depth) // Minimum buffer-size 3.\r
+ , buffer_size_(config.buffer_depth) // Minimum buffer-size 3.\r
, frames_scheduled_(0)\r
, audio_scheduled_(0)\r
, preroll_count_(0)\r
{\r
return format_desc_;\r
}\r
+\r
+ virtual size_t buffer_depth() const\r
+ {\r
+ return config_.buffer_depth;\r
+ }\r
}; \r
\r
safe_ptr<core::frame_consumer> create_consumer(const std::vector<std::wstring>& params) \r
{\r
configuration config;\r
\r
- config.internal_key = ptree.get("internal-key", config.internal_key);\r
- config.low_latency = ptree.get("low-latency", config.low_latency);\r
- config.key_only = ptree.get("key-only", config.key_only);\r
- config.device_index = ptree.get("device", config.device_index);\r
- config.embedded_audio = ptree.get("embedded-audio", config.embedded_audio);\r
+ config.internal_key = ptree.get("internal-key", config.internal_key);\r
+ config.low_latency = ptree.get("low-latency", config.low_latency);\r
+ config.key_only = ptree.get("key-only", config.key_only);\r
+ config.device_index = ptree.get("device", config.device_index);\r
+ config.embedded_audio = ptree.get("embedded-audio", config.embedded_audio);\r
+ config.base_buffer_depth = ptree.get("buffer-depth", config.base_buffer_depth);\r
\r
return make_safe<decklink_consumer_proxy>(config);\r
}\r
\r
\r
/* File created by MIDL compiler version 7.00.0555 */\r
-/* at Sat Nov 19 12:04:44 2011\r
+/* at Sat Nov 19 12:16:19 2011\r
*/\r
/* Compiler settings for interop\DeckLinkAPI.idl:\r
Oicf, W1, Zp8, env=Win32 (32b run), target_arch=X86 7.00.0555 \r
\r
\r
/* File created by MIDL compiler version 7.00.0555 */\r
-/* at Sat Nov 19 12:04:44 2011\r
+/* at Sat Nov 19 12:16:19 2011\r
*/\r
/* Compiler settings for interop\DeckLinkAPI.idl:\r
Oicf, W1, Zp8, env=Win32 (32b run), target_arch=X86 7.00.0555 \r
<< msg_info(narrow(print()) + " Failed to start input stream.")\r
<< boost::errinfo_api_function("StartStreams"));\r
\r
+ if(format_desc_.duration == 1001)\r
+ CASPAR_LOG(warning) << print() << L"Audio not supported in NTSC frame-rates.";\r
+\r
CASPAR_LOG(info) << print() << L" Successfully Initialized.";\r
}\r
\r
muxer_.push(av_frame); \r
\r
// It is assumed that audio is always equal or ahead of video.\r
- if(audio && SUCCEEDED(audio->GetBytes(&bytes)))\r
+ if(audio && SUCCEEDED(audio->GetBytes(&bytes)) && format_desc_.duration != 1001)\r
{\r
auto sample_frame_count = audio->GetSampleFrameCount();\r
auto audio_data = reinterpret_cast<int32_t*>(bytes);\r
}\r
\r
nb_frames = std::min(static_cast<int64_t>(length_), nb_frames);\r
-\r
nb_frames = muxer_->calc_nb_frames(nb_frames);\r
\r
// TODO: Might need to scale nb_frames av frame_muxer transformations.\r
, start_(start)\r
, length_(length)\r
, frame_number_(0)\r
- { \r
+ { \r
+ buffer_size_ = 0;\r
+ nb_frames_ = 0;\r
+ nb_loops_ = 0;\r
+\r
buffer_size_ = 0;\r
nb_frames_ = 0;\r
nb_loops_ = 0;\r
: display_mode_(display_mode::invalid)\r
, in_fps_(in_fps)\r
, format_desc_(frame_factory->get_video_format_desc())\r
- , auto_transcode_(env::properties().get("configuration.producers.auto-transcode", false))\r
+ , auto_transcode_(env::properties().get("configuration.auto-transcode", true))\r
, audio_sample_count_(0)\r
, video_frame_count_(0)\r
, frame_factory_(frame_factory)\r
\r
display_mode_ = get_display_mode(mode, fps, format_desc_.field_mode, format_desc_.fps);\r
\r
- if(display_mode_ == display_mode::simple && mode != core::field_mode::progressive && format_desc_.field_mode != core::field_mode::progressive && frame.height != static_cast<int>(format_desc_.height))\r
+ if((frame.height != 480 || format_desc_.height != 486) && \r
+ display_mode_ == display_mode::simple && mode != core::field_mode::progressive && format_desc_.field_mode != core::field_mode::progressive && \r
+ frame.height != static_cast<int>(format_desc_.height))\r
+ {\r
display_mode_ = display_mode::deinterlace_bob_reinterlace; // The frame will most likely be scaled, we need to deinterlace->reinterlace \r
- \r
+ }\r
+\r
if(display_mode_ == display_mode::deinterlace)\r
filter_str_ = append_filter(filter_str_, L"YADIF=0:-1");\r
else if(display_mode_ == display_mode::deinterlace_bob || display_mode_ == display_mode::deinterlace_bob_reinterlace)\r
write->commit(n);\r
}\r
}\r
+\r
+ if(decoded_frame->height == 480) // NTSC DV\r
+ {\r
+ write->get_frame_transform().fill_translation[1] += 2.0/static_cast<double>(frame_factory->get_video_format_desc().height);\r
+ write->get_frame_transform().fill_scale[1] = 1.0 - 6.0*1.0/static_cast<double>(frame_factory->get_video_format_desc().height);\r
+ }\r
\r
// Fix field-order if needed\r
if(write->get_type() == core::field_mode::lower && frame_factory->get_video_format_desc().field_mode == core::field_mode::upper)\r
{\r
CASPAR_LOG(error) << print_() << L" Error: \n-------------------------------------------\n" << str << L"\n-------------------------------------------";\r
}\r
+ else if(str.find(TEXT("OnDebug")) != std::wstring::npos)\r
+ {\r
+ CASPAR_LOG(error) << print_() << L" Debug: \n-------------------------------------------\n" << str << L"\n-------------------------------------------";\r
+ }\r
+ //else if(str.find(TEXT("OnTemplateDescription")) != std::wstring::npos)\r
+ //{\r
+ // CASPAR_LOG(error) << print_() << L" TemplateDescription: \n-------------------------------------------\n" << str << L"\n-------------------------------------------";\r
+ //}\r
+ //else if(str.find(TEXT("OnGetInfo")) != std::wstring::npos)\r
+ //{\r
+ // CASPAR_LOG(error) << print_() << L" Info: \n-------------------------------------------\n" << str << L"\n-------------------------------------------";\r
+ //}\r
+ //else\r
+ //{\r
+ // CASPAR_LOG(error) << print_() << L" Unknown: \n-------------------------------------------\n" << str << L"\n-------------------------------------------";\r
+ //}\r
\r
CComPtr<IShockwaveFlash> spFlash;\r
HRESULT hr = m_spOleObject->QueryInterface(__uuidof(IShockwaveFlash), (void**) &spFlash);\r
}\r
}\r
\r
-bool FlashAxContainer::FlashCall(const std::wstring& str)\r
+bool FlashAxContainer::FlashCall(const std::wstring& str, std::wstring& result2)\r
{\r
CComBSTR result;\r
CComPtr<IShockwaveFlash> spFlash;\r
bCallSuccessful_ = false;\r
for(size_t retries = 0; !bCallSuccessful_ && retries < 4; ++retries)\r
spFlash->CallFunction(request, &result);\r
+\r
+ result2 = result;\r
+\r
return bCallSuccessful_;\r
}\r
\r
// static ATL::CComObject<FlashAxContainer>* CreateInstance();\r
\r
void Tick();\r
- bool FlashCall(const std::wstring& str);\r
+ bool FlashCall(const std::wstring& str, std::wstring& result);\r
bool DrawControl(HDC targetDC);\r
bool InvalidRect() const { return bInvalidRect_; } \r
bool IsEmpty() const { return bIsEmpty_; }\r
flash_producer_->param(str);\r
}\r
\r
- void invoke(int layer, const std::wstring& label)\r
+ std::wstring invoke(int layer, const std::wstring& label)\r
{\r
auto str = (boost::wformat(L"<invoke name=\"Invoke\" returntype=\"xml\"><arguments><array><property id=\"0\"><number>%1%</number></property></array><string>%2%</string></arguments></invoke>") % layer % label).str();\r
CASPAR_LOG(info) << flash_producer_->print() << " Invoking invoke-command: " << str;\r
- flash_producer_->param(str);\r
+ return flash_producer_->param(str);\r
+ }\r
+\r
+ std::wstring description(int layer)\r
+ {\r
+ auto str = (boost::wformat(L"<invoke name=\"GetDescription\" returntype=\"xml\"><arguments><array><property id=\"0\"><number>%1%</number></property></array></arguments></invoke>") % layer).str();\r
+ CASPAR_LOG(info) << flash_producer_->print() << " Invoking description-command: " << str;\r
+ return flash_producer_->param(str);\r
+ }\r
+\r
+ std::wstring info()\r
+ {\r
+ auto str = (boost::wformat(L"<invoke name=\"GetInfo\" returntype=\"xml\"><arguments></arguments></invoke>")).str();\r
+ CASPAR_LOG(info) << flash_producer_->print() << " Invoking info-command: " << str;\r
+ return flash_producer_->param(str);\r
+ }\r
+\r
+ virtual std::wstring param(const std::wstring& str)\r
+ { \r
+ return flash_producer_->param(str);\r
}\r
\r
virtual safe_ptr<core::basic_frame> receive(int hints)\r
virtual safe_ptr<core::basic_frame> last_frame() const\r
{\r
return flash_producer_->last_frame();\r
- } \r
+ } \r
\r
std::wstring print() const\r
{\r
void cg_producer::stop(int layer, unsigned int mix_out_duration){impl_->stop(layer, mix_out_duration);}\r
void cg_producer::next(int layer){impl_->next(layer);}\r
void cg_producer::update(int layer, const std::wstring& data){impl_->update(layer, data);}\r
-void cg_producer::invoke(int layer, const std::wstring& label){impl_->invoke(layer, label);}\r
+std::wstring cg_producer::invoke(int layer, const std::wstring& label){return impl_->invoke(layer, label);}\r
std::wstring cg_producer::print() const{return impl_->print();}\r
+std::wstring cg_producer::param(const std::wstring& str){return impl_->param(str);}\r
+std::wstring cg_producer::description(int layer){return impl_->description(layer);}\r
+std::wstring cg_producer::info(){return impl_->info();}\r
\r
}}
\ No newline at end of file
virtual safe_ptr<core::basic_frame> receive(int);\r
virtual safe_ptr<core::basic_frame> last_frame() const;\r
virtual std::wstring print() const;\r
+ virtual std::wstring param(const std::wstring&);\r
\r
//cg_producer\r
void add(int layer, const std::wstring& template_name, bool play_on_load, const std::wstring& start_from_label = TEXT(""), const std::wstring& data = TEXT(""));\r
void stop(int layer, unsigned int mix_out_duration);\r
void next(int layer);\r
void update(int layer, const std::wstring& data);\r
- void invoke(int layer, const std::wstring& label);\r
+ std::wstring invoke(int layer, const std::wstring& label);\r
+ std::wstring description(int layer);\r
+ std::wstring info();\r
\r
private:\r
struct implementation;\r
try\r
{\r
std::vector<template_host> template_hosts;\r
- BOOST_FOREACH(auto& xml_mapping, env::properties().get_child("configuration.producers.template-hosts"))\r
+ BOOST_FOREACH(auto& xml_mapping, env::properties().get_child("configuration.template-hosts"))\r
{\r
try\r
{\r
\r
ax_->SetSize(width_, height_); \r
\r
- CASPAR_LOG(info) << print() << L" Thread started.";\r
CASPAR_LOG(info) << print() << L" Successfully initialized with template-host: " << filename << L" width: " << width_ << L" height: " << height_ << L".";\r
}\r
\r
ax_->DestroyAxControl();\r
ax_->Release();\r
}\r
- CASPAR_LOG(info) << print() << L" Thread ended.";\r
+ CASPAR_LOG(info) << print() << L" Uninitialized.";\r
}\r
\r
- void param(const std::wstring& param)\r
+ std::wstring param(const std::wstring& param)\r
{ \r
- if(!ax_->FlashCall(param))\r
+ std::wstring result;\r
+\r
+ if(!ax_->FlashCall(param, result))\r
CASPAR_LOG(warning) << print() << L" Flash call failed:" << param;//BOOST_THROW_EXCEPTION(invalid_operation() << msg_info("Flash function call failed.") << arg_name_info("param") << arg_value_info(narrow(param)));\r
graph_->add_tag("param");\r
+\r
+ return result;\r
}\r
\r
safe_ptr<core::basic_frame> render_frame(bool has_underflow)\r
return last_frame_;\r
} \r
\r
- virtual void param(const std::wstring& param) \r
+ virtual std::wstring param(const std::wstring& param) \r
{ \r
- context_.begin_invoke([=]\r
+ return context_.invoke([=]() -> std::wstring\r
{\r
if(!context_)\r
initialize();\r
\r
try\r
{\r
- context_->param(param); \r
+ return context_->param(param); \r
\r
//const auto& format_desc = frame_factory_->get_video_format_desc();\r
//if(abs(context_->fps() - format_desc.fps) > 0.01 && abs(context_->fps()/2.0 - format_desc.fps) > 0.01)\r
context_.reset(nullptr);\r
frame_buffer_.push(core::basic_frame::empty());\r
}\r
+\r
+ return L"";\r
});\r
}\r
\r
struct image_consumer : public core::frame_consumer\r
{\r
core::video_format_desc format_desc_;\r
- std::vector<safe_ptr<core::read_frame>> frames_;\r
public:\r
\r
virtual void initialize(const core::video_format_desc& format_desc)\r
\r
virtual bool send(const safe_ptr<core::read_frame>& frame)\r
{ \r
- frames_.push_back(frame);\r
-\r
- if(frames_.size() < core::consumer_buffer_depth())\r
- return true;\r
-\r
- auto my_frame = frames_.front();\r
- boost::thread async([=]\r
+ auto format_desc = format_desc_;\r
+ boost::thread async([format_desc, frame]\r
{\r
try\r
{\r
auto filename = narrow(env::data_folder()) + boost::posix_time::to_iso_string(boost::posix_time::second_clock::local_time()) + ".png";\r
\r
- auto bitmap = std::shared_ptr<FIBITMAP>(FreeImage_Allocate(format_desc_.width, format_desc_.height, 32), FreeImage_Unload);\r
- memcpy(FreeImage_GetBits(bitmap.get()), my_frame->image_data().begin(), my_frame->image_size());\r
+ auto bitmap = std::shared_ptr<FIBITMAP>(FreeImage_Allocate(format_desc.width, format_desc.height, 32), FreeImage_Unload);\r
+ memcpy(FreeImage_GetBits(bitmap.get()), frame->image_data().begin(), frame->image_size());\r
FreeImage_FlipVertical(bitmap.get());\r
FreeImage_Save(FIF_PNG, bitmap.get(), filename.c_str(), 0);\r
}\r
{\r
return format_desc_;\r
}\r
+\r
+ virtual size_t buffer_depth() const \r
+ {\r
+ return 0;\r
+ }\r
};\r
\r
safe_ptr<core::frame_consumer> create_consumer(const std::vector<std::wstring>& params)\r
: container_(16)\r
, preroll_count_(0)\r
{\r
- if(core::consumer_buffer_depth() < 3)\r
- BOOST_THROW_EXCEPTION(invalid_argument() << msg_info("audio-consumer does not support buffer-depth lower than 3."));\r
-\r
graph_->add_guide("tick-time", 0.5);\r
graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f)); \r
graph_->set_text(print());\r
diagnostics::register_graph(graph_);\r
\r
is_running_ = true;\r
- input_.set_capacity(core::consumer_buffer_depth()-2);\r
+ input_.set_capacity(1);\r
}\r
\r
~oal_consumer()\r
{\r
format_desc_ = format_desc; \r
sf::SoundStream::Initialize(2, 48000);\r
+ Play(); \r
CASPAR_LOG(info) << print() << " Sucessfully initialized.";\r
}\r
\r
virtual bool send(const safe_ptr<core::read_frame>& frame)\r
{ \r
- if(preroll_count_ < input_.capacity())\r
- {\r
- while(input_.try_push(std::make_shared<std::vector<int16_t, tbb::cache_aligned_allocator<int16_t>>>(format_desc_.audio_samples_per_frame, 0)))\r
- ++preroll_count_;\r
- Play(); \r
- }\r
-\r
input_.push(std::make_shared<std::vector<int16_t, tbb::cache_aligned_allocator<int16_t>>>(core::audio_32_to_16_sse(frame->audio_data())));\r
\r
return true;\r
{\r
return format_desc_;\r
}\r
+\r
+ virtual size_t buffer_depth() const\r
+ {\r
+ return 2;\r
+ }\r
};\r
\r
safe_ptr<core::frame_consumer> create_consumer(const std::vector<std::wstring>& params)\r
safe_ptr<diagnostics::graph> graph_;\r
boost::timer perf_timer_;\r
\r
- boost::circular_buffer<safe_ptr<core::read_frame>> input_buffer_;\r
tbb::concurrent_bounded_queue<safe_ptr<core::read_frame>> frame_buffer_;\r
\r
boost::thread thread_;\r
, screen_height_(format_desc.height)\r
, square_width_(format_desc.square_width)\r
, square_height_(format_desc.square_height)\r
- , input_buffer_(core::consumer_buffer_depth()-1)\r
, filter_(format_desc.field_mode == core::field_mode::progressive || !config.auto_deinterlace ? L"" : L"YADIF=0:-1", boost::assign::list_of(PIX_FMT_BGRA))\r
{ \r
frame_buffer_.set_capacity(2);\r
\r
void render(const safe_ptr<core::read_frame>& frame)\r
{ \r
- if(frame->image_data().empty())\r
+ if(static_cast<size_t>(frame->image_data().size()) != format_desc_.size)\r
return;\r
\r
auto av_frame = get_av_frame();\r
\r
void send(const safe_ptr<core::read_frame>& frame)\r
{\r
- input_buffer_.push_back(frame);\r
-\r
- if(input_buffer_.full())\r
- {\r
- if(!frame_buffer_.try_push(input_buffer_.front()))\r
- graph_->add_tag("dropped-frame");\r
- }\r
+ if(!frame_buffer_.try_push(frame))\r
+ graph_->add_tag("dropped-frame");\r
}\r
\r
std::wstring print() const\r
\r
virtual void initialize(const core::video_format_desc& format_desc)\r
{\r
+ consumer_.reset();\r
consumer_.reset(new ogl_consumer(config_, format_desc));\r
}\r
\r
{\r
return consumer_->get_video_format_desc();\r
}\r
+\r
+ virtual size_t buffer_depth() const\r
+ {\r
+ return 1;\r
+ }\r
}; \r
\r
safe_ptr<core::frame_consumer> create_consumer(const std::vector<std::wstring>& params)\r
safe_ptr<core::frame_consumer> create_consumer(const boost::property_tree::ptree& ptree) \r
{\r
configuration config;\r
- config.screen_index = ptree.get("device", config.screen_index);\r
+ config.screen_index = ptree.get("device", config.screen_index+1)-1;\r
config.windowed = ptree.get("windowed", config.windowed);\r
config.key_only = ptree.get("key-only", config.key_only);\r
config.auto_deinterlace = ptree.get("auto-deinterlace", config.auto_deinterlace);\r
\r
#include <core/producer/frame_producer.h>\r
#include <core/video_format.h>\r
-#include <core/video_channel_context.h>\r
#include <core/producer/transition/transition_producer.h>\r
#include <core/producer/frame/frame_transform.h>\r
#include <core/producer/stage.h>\r
std::wstring param = _parameters2.at(1);\r
for(auto it = std::begin(_parameters2)+2; it != std::end(_parameters2); ++it)\r
param += L" " + *it;\r
-\r
+ \r
+ std::wstring result;\r
if(what == L"B")\r
- GetChannel()->stage()->background(GetLayerIndex()).get()->param(param);\r
+ result = GetChannel()->stage()->background(GetLayerIndex()).get()->param(param);\r
else if(what == L"F")\r
- GetChannel()->stage()->foreground(GetLayerIndex()).get()->param(param);\r
+ result = GetChannel()->stage()->foreground(GetLayerIndex()).get()->param(param);\r
\r
CASPAR_LOG(info) << "Executed param: " << _parameters[0] << TEXT(" successfully");\r
-\r
- SetReplyString(TEXT("202 PARAM OK\r\n"));\r
+ \r
+ std::wstringstream replyString;\r
+ replyString << TEXT("201 PARAM OK\r\n") << result << L"\r\n";\r
+ \r
+ SetReplyString(replyString.str());\r
\r
return true;\r
}\r
}\r
else if(_parameters[0] == L"CLEAR")\r
{\r
- GetChannel()->mixer()->clear_transforms();\r
+ int layer = GetLayerIndex(std::numeric_limits<int>::max());\r
+ if(layer == std::numeric_limits<int>::max())\r
+ GetChannel()->mixer()->clear_transforms();\r
+ else\r
+ GetChannel()->mixer()->clear_transforms(layer);\r
}\r
else\r
{\r
\r
bool CGCommand::DoExecuteInvoke() \r
{\r
+ std::wstringstream replyString;\r
+ replyString << TEXT("201 CG OK\r\n");\r
+\r
if(_parameters.size() > 2)\r
{\r
if(!ValidateLayer(_parameters[1]))\r
return false;\r
}\r
int layer = _ttoi(_parameters[1].c_str());\r
- flash::get_default_cg_producer(safe_ptr<core::video_channel>(GetChannel()), GetLayerIndex(flash::cg_producer::DEFAULT_LAYER))->invoke(layer, _parameters2[2]);\r
+ auto result = flash::get_default_cg_producer(safe_ptr<core::video_channel>(GetChannel()), GetLayerIndex(flash::cg_producer::DEFAULT_LAYER))->invoke(layer, _parameters2[2]);\r
+ replyString << result << TEXT("\r\n"); \r
}\r
else \r
{\r
SetReplyString(TEXT("402 CG ERROR\r\n"));\r
return true;\r
}\r
-\r
- SetReplyString(TEXT("202 CG OK\r\n"));\r
+ \r
+ SetReplyString(replyString.str());\r
return true;\r
}\r
\r
bool CGCommand::DoExecuteInfo() \r
{\r
- // TODO\r
- //flash::get_default_cg_producer(GetChannel())->Info();\r
- SetReplyString(TEXT("600 CG FAILED\r\n"));\r
+ std::wstringstream replyString;\r
+ replyString << TEXT("201 CG OK\r\n");\r
+\r
+ if(_parameters.size() > 1)\r
+ {\r
+ if(!ValidateLayer(_parameters[1]))\r
+ {\r
+ SetReplyString(TEXT("403 CG ERROR\r\n"));\r
+ return false;\r
+ }\r
+\r
+ int layer = _ttoi(_parameters[1].c_str());\r
+ auto desc = flash::get_default_cg_producer(safe_ptr<core::video_channel>(GetChannel()), GetLayerIndex(flash::cg_producer::DEFAULT_LAYER))->description(layer);\r
+ \r
+ replyString << desc << TEXT("\r\n"); \r
+ }\r
+ else \r
+ {\r
+ auto info = flash::get_default_cg_producer(safe_ptr<core::video_channel>(GetChannel()), GetLayerIndex(flash::cg_producer::DEFAULT_LAYER))->info();\r
+ replyString << info << TEXT("\r\n"); \r
+ } \r
+\r
+ SetReplyString(replyString.str());\r
return true;\r
}\r
\r
<data-path>L:\casparcg\_data\</data-path>\r
<template-path>L:\casparcg\_templates\</template-path>\r
</paths>\r
- <mixers>\r
- <blend-modes>true</blend-modes>\r
- </mixers>\r
- <producers>\r
- <auto-transcode>true</auto-transcode>\r
- </producers>\r
+ <blend-modes>true</blend-modes>\r
+ <auto-transcode>true</auto-transcode>\r
+ <pipeline-tokens>2</pipeline-tokens>\r
<channels>\r
<channel>\r
- <video-mode>720p5000</video-mode>\r
+ <video-mode>PAL</video-mode>\r
<consumers>\r
- <decklink></decklink>\r
+ <decklink>\r
+ <embedded-audio>true</embedded-audio>\r
+ </decklink>\r
+ <system-audio></system-audio>\r
</consumers>\r
</channel>\r
</channels>\r
</configuration>\r
\r
<!--\r
-<auto-mode-convert>true [true|false]]</auto-mode-convert>\r
-<consumers>\r
- <buffer-depth>5 [1..]</buffer-depth> \r
- NOTE:\r
- recommended: 5-6\r
- decklink_consumer: Min 3 depth\r
- decklink_consumer + embedded-audio: Min 4 depth\r
- oal_consumer: Min 3 depth\r
- bluefish_consumer: Min 1-2 depth\r
- ogl_consumer: Min 1-2 depth\r
-</consumers>\r
-<mixers>\r
- <blend-modes>false [true/false]</blend-modes>\r
-</mixers>\r
-<producers>\r
- <buffer-depth>1 [0..]</buffer-depth>\r
- <auto-transcode>true</auto-transcode>\r
- <template-hosts>\r
- <template-host>\r
- <video-mode/>\r
- <filename/>\r
- <width/>\r
- <height/>\r
- </template-host>\r
- </template-hosts>\r
-</producers>\r
-<channel>\r
- <video-mode> PAL [PAL|NTSC|1080i5000|576p2500|720p2500|720p5000|720p5994|720p6000|1080p2398|1080p2400|1080i5000|1080i5994|1080i6000|1080p2500|1080p2997|1080p3000|1080p5000] </video-mode>\r
+<blend-modes>false [true|false]</blend-modes>\r
+<auto-transcode>true [true|false]</auto-transcode>\r
+<pipeline-tokens>2 [1..]</pipeline-tokens>\r
+<template-hosts>\r
+ <template-host>\r
+ <video-mode/>\r
+ <filename/>\r
+ <width/>\r
+ <height/>\r
+ </template-host>\r
+</template-hosts>\r
+<channels>\r
+ <channel>\r
+ <video-mode> PAL [PAL|NTSC|1080i5000|576p2500|720p2500|720p5000|720p5994|720p6000|1080i5000|1080i5994|1080i6000|1080p2500|1080p2997|1080p3000|1080p5000] </video-mode>\r
<consumers>\r
<decklink>\r
<device>[1..]</device>\r
<key-only>false [true|false]</key-only>\r
</screen>\r
</consumers>\r
-</channel> \r
+ </channel>\r
+</channels> \r
-->\r
\r
\r
#include <tbb/task_scheduler_observer.h>\r
\r
#include <boost/property_tree/detail/file_parser_error.hpp>\r
+#include <boost/property_tree/xml_parser.hpp>\r
\r
#include <algorithm>\r
\r
CASPAR_LOG(info) << L"FFMPEG-avfilter " << caspar::ffmpeg::get_avfilter_version();\r
CASPAR_LOG(info) << L"FFMPEG-avutil " << caspar::ffmpeg::get_avutil_version();\r
CASPAR_LOG(info) << L"FFMPEG-swscale " << caspar::ffmpeg::get_swscale_version();\r
- CASPAR_LOG(info) << L"OpenGL " << caspar::core::ogl_device::get_version() << "\n\n";\r
}\r
\r
LONG WINAPI UserUnhandledExceptionFilter(EXCEPTION_POINTERS* info)\r
{\r
// Configure environment properties from configuration.\r
caspar::env::configure("casparcg.config");\r
-\r
+ \r
#ifdef _DEBUG\r
if(caspar::env::properties().get("configuration.debugging.remote", false))\r
MessageBox(nullptr, TEXT("Now is the time to connect for remote debugging..."), TEXT("Debug"), MB_OK | MB_TOPMOST);\r
\r
// Print environment information.\r
print_info();\r
+ \r
+ std::stringstream str;\r
+ boost::property_tree::xml_writer_settings<char> w(' ', 3);\r
+ boost::property_tree::write_xml(str, caspar::env::properties(), w);\r
+ CASPAR_LOG(info) << L"casparcg.config:\n-----------------------------------------\n" << str.str().c_str() << L"-----------------------------------------";\r
\r
// Create server object which initializes channels, protocols and controllers.\r
caspar::server caspar_server;\r
shutdown_cond.wait(lock); \r
} \r
\r
- Sleep(200); // CAPSAR_LOG is asynchronous. Try to get text in correct order.'\r
+ Sleep(500); // CAPSAR_LOG is asynchronous. Try to get text in correct order.'\r
\r
if(shutdown_event == application_state::pause_and_shutdown)\r
system("pause"); \r