1. CasparCG Server\r
==================================================================================\r
The CasparCG 2.0 Server is based open tree components types; "producers", "mixers" and \r
-"consumers" which in combined configurations make up a "channel". The server also\r
+"consumers" which in combined configurations make up a "video_channel". The server also\r
has "protocols" for communication and controlling.\r
\r
Producer - Renders and plays media such as video, animations, images and audio.\r
>> CUT\r
<< CUT\r
\r
- [channel:int] - A required value for "channel" which must a signed\r
+ [video_channel:int] - A required value for "video_channel" which must a signed\r
integer.\r
>> 1\r
<< 1\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- LOADBG [channel:int]{-[layer:int]|-0} [clip:string] {[loop:LOOP]}\r
+ LOADBG [video_channel:int]{-[layer:int]|-0} [clip:string] {[loop:LOOP]}\r
{[transition:CUT,MIX,PUSH,WIPE] [duration:uint] {[tween:string]|linear} \r
{[direction:LEFT,RIGHT]|RIGHT}|CUT 0} {([start:uint]{,[length:uint]})|(0)}\r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- LOAD [channel:int]{-[layer:int]|-0} [clip:string] {[loop:LOOP]} \r
+ LOAD [video_channel:int]{-[layer:int]|-0} [clip:string] {[loop:LOOP]} \r
{([start_frame:uint]{,[end_frame:uint]})|(0)}\r
\r
Description: \r
----------------------------------------------------------------------------------\r
Syntax:\r
\r
- PLAY [channel:int]{-[layer:int]|-0} {"additional parameters"}\r
+ PLAY [video_channel:int]{-[layer:int]|-0} {"additional parameters"}\r
\r
Description: \r
\r
----------------------------------------------------------------------------------\r
Syntax:\r
\r
- PAUSE [channel:int]{-[layer:int]|-0}\r
+ PAUSE [video_channel:int]{-[layer:int]|-0}\r
\r
Description: \r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- PAUSE [channel:int]{-[layer:int]|-0}\r
+ PAUSE [video_channel:int]{-[layer:int]|-0}\r
\r
Description: \r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- CLEAR [channel:int]{-[layer:int]}\r
+ CLEAR [video_channel:int]{-[layer:int]}\r
\r
Description: \r
\r
Removes all clips (both foreground and background). If no layer is specified \r
- then all layers in the specified channel are cleared.\r
+ then all layers in the specified video_channel are cleared.\r
\r
Examples: \r
\r
Description: \r
\r
Swaps layers between channels (both foreground and background will be swapped). \r
- If layers are not specified then all layers in respective channel will be swapped.\r
+ If layers are not specified then all layers in respective video_channel will be swapped.\r
\r
Examples: \r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- ADD [channel:int]{-[layer:int]|-0} [parameters:string]\r
+ ADD [video_channel:int]{-[layer:int]|-0} [parameters:string]\r
\r
Description: \r
\r
The string "clip" will be parsed by available registered consumer factories. If\r
- a successfull match is found a consumer will be created and added to the channel.\r
+ a successfull match is found a consumer will be created and added to the video_channel.\r
\r
Examples: \r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- REMOVE [channel:int]{-[layer:int]|-0}\r
+ REMOVE [video_channel:int]{-[layer:int]|-0}\r
\r
Description: \r
\r
- Removes an existing consumer from channel.\r
+ Removes an existing consumer from video_channel.\r
\r
Examples: \r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- PARAM [channel:int]{-[layer:int]|-0} [param:string]\r
+ PARAM [video_channel:int]{-[layer:int]|-0} [param:string]\r
\r
Description: \r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- CG [channel:int]{-[layer:int]|-0} ADD [flash_layer:uint] [template:string]\r
+ CG [video_channel:int]{-[layer:int]|-0} ADD [flash_layer:uint] [template:string]\r
[play-on-load:0,1] [data]\r
\r
Description: \r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- CG [channel:int]{-[layer:int]|-0} REMOVE [flash_layer:uint]\r
+ CG [video_channel:int]{-[layer:int]|-0} REMOVE [flash_layer:uint]\r
\r
Description: \r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- CG [channel:int]{-[layer:int]|-0} CLEAR [flash_layer:uint]\r
+ CG [video_channel:int]{-[layer:int]|-0} CLEAR [flash_layer:uint]\r
\r
Description: \r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- CG [channel:int]{-[layer:int]|-0} PLAY [flash_layer:uint]\r
+ CG [video_channel:int]{-[layer:int]|-0} PLAY [flash_layer:uint]\r
\r
Description: \r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- CG [channel:int]{-[layer:int]|-0} STOP [flash_layer:uint]\r
+ CG [video_channel:int]{-[layer:int]|-0} STOP [flash_layer:uint]\r
\r
Description: \r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- CG [channel:int]{-[layer:int]|-0} NEXT [flash_layer:uint]\r
+ CG [video_channel:int]{-[layer:int]|-0} NEXT [flash_layer:uint]\r
\r
Description: \r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- CG [channel:int]{-[layer:int]|-0} GOTO [flash_layer:uint] [label:string]\r
+ CG [video_channel:int]{-[layer:int]|-0} GOTO [flash_layer:uint] [label:string]\r
\r
Description: \r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- CG [channel:int]{-[layer:int]|-0} GOTO [flash_layer:uint] [data:string]\r
+ CG [video_channel:int]{-[layer:int]|-0} GOTO [flash_layer:uint] [data:string]\r
\r
Description: \r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- CG [channel:int]{-[layer:int]|-0} GOTO [flash_layer:uint] [method:string]\r
+ CG [video_channel:int]{-[layer:int]|-0} GOTO [flash_layer:uint] [method:string]\r
\r
Description: \r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- MIXER [channel:int]{-[layer:int]|-0} VIDEO IS_KEY {is_key:0,1|0}\r
+ MIXER [video_channel:int]{-[layer:int]|-0} VIDEO IS_KEY {is_key:0,1|0}\r
\r
Description: \r
\r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- MIXER [channel:int]{-[layer:int]|-0} VIDEO OPACITY {opacity:float} \r
+ MIXER [video_channel:int]{-[layer:int]|-0} VIDEO OPACITY {opacity:float} \r
{[duration:uint] {[tween:string]|linear}|0 linear}\r
\r
Description: \r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- MIXER [channel:int]{-[layer:int]|-0} VIDEO GAIN {opacity:float} \r
+ MIXER [video_channel:int]{-[layer:int]|-0} VIDEO GAIN {opacity:float} \r
{[duration:uint] {[tween:string]|linear}|0 linear}\r
\r
Description: \r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- MIXER [channel:int]{-[layer:int]|-0} VIDEO FILL_RECT [x:float] [y:float]\r
+ MIXER [video_channel:int]{-[layer:int]|-0} VIDEO FILL_RECT [x:float] [y:float]\r
[x-scale:float] [y-slace:float] {[duration:uint] {[tween:string]|linear}|0 linear}\r
\r
Description: \r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- MIXER [channel:int]{-[layer:int]|-0} VIDEO KEY_RECT [x:float] [y:float]\r
+ MIXER [video_channel:int]{-[layer:int]|-0} VIDEO KEY_RECT [x:float] [y:float]\r
[x-scale:float] [y-slace:float] {[duration:uint] {[tween:string]|linear}|0 linear}\r
\r
Description: \r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- MIXER [channel:int] GRID [resolution:uint] VIDEO {[duration:uint] \r
+ MIXER [video_channel:int] GRID [resolution:uint] VIDEO {[duration:uint] \r
{[tween:string]|linear}|0 linear}\r
\r
Description: \r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- MIXER [channel:int]{-[layer:int]|-0} VIDEO RESET {[duration:uint] \r
+ MIXER [video_channel:int]{-[layer:int]|-0} VIDEO RESET {[duration:uint] \r
{[tween:string]|linear}|0 linear}\r
\r
Description: \r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- MIXER [channel:int]{-[layer:int]|-0} AUDIO GAIN {opacity:float} \r
+ MIXER [video_channel:int]{-[layer:int]|-0} AUDIO GAIN {opacity:float} \r
{[duration:uint] {[tween:string]|linear}|0 linear}\r
\r
Description: \r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- MIXER [channel:int]{-[layer:int]|-0} AUDIO RESET {[duration:uint] \r
+ MIXER [video_channel:int]{-[layer:int]|-0} AUDIO RESET {[duration:uint] \r
{[tween:string]|linear}|0 linear}\r
\r
Description: \r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- MIXER [channel:int]{-[layer:int]|-0} RESET {[duration:uint] \r
+ MIXER [video_channel:int]{-[layer:int]|-0} RESET {[duration:uint] \r
{[tween:string]|linear}|0 linear}\r
\r
Description: \r
----------------------------------------------------------------------------------\r
Syntax: \r
\r
- INFO [channel:int]\r
+ INFO [video_channel:int]\r
\r
Description: \r
\r
----------------------------------------------------------------------------------\r
\r
400 ERROR - Command not understood\r
- 401 [command] ERROR - Illegal channel\r
+ 401 [command] ERROR - Illegal video_channel\r
402 [command] ERROR - Parameter missing\r
403 [command] ERROR - Illegal parameter\r
404 [command] ERROR - Media file not found\r
\r
#include "frame_consumer_device.h"\r
\r
-#include "../channel_context.h"\r
+#include "../video_channel_context.h"\r
\r
#include "../video_format.h"\r
#include "../mixer/gpu/ogl_device.h"\r
{ \r
typedef std::pair<safe_ptr<const read_frame>, safe_ptr<const read_frame>> fill_and_key;\r
\r
- channel_context& channel_;\r
+ video_channel_context& channel_;\r
\r
boost::circular_buffer<fill_and_key> buffer_;\r
\r
std::map<int, safe_ptr<frame_consumer>> consumers_;\r
+ typedef std::map<int, safe_ptr<frame_consumer>>::value_type layer_t;\r
\r
high_prec_timer timer_;\r
\r
boost::timer tick_timer_;\r
\r
public:\r
- implementation(channel_context& channel) \r
- : channel_(channel)\r
+ implementation(video_channel_context& video_channel) \r
+ : channel_(video_channel)\r
, diag_(diagnostics::create_graph(std::string("frame_consumer_device")))\r
{ \r
- diag_->set_color("input-buffer", diagnostics::color(1.0f, 1.0f, 0.0f)); \r
diag_->add_guide("frame-time", 0.5f); \r
diag_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
diag_->set_color("tick-time", diagnostics::color(0.1f, 0.7f, 0.8f));\r
}\r
\r
+ std::pair<size_t, size_t> buffer_depth()\r
+ { \r
+ auto depth_comp = [](const layer_t& lhs, const layer_t& rhs){ return lhs.second->buffer_depth() < rhs.second->buffer_depth(); };\r
+ auto min = std::min_element(consumers_.begin(), consumers_.end(), depth_comp)->second->buffer_depth();\r
+ auto max = std::max_element(consumers_.begin(), consumers_.end(), depth_comp)->second->buffer_depth();\r
+ CASPAR_ASSERT(max >= min);\r
+ return std::make_pair(min, max);\r
+ }\r
+\r
void add(int index, safe_ptr<frame_consumer>&& consumer)\r
{ \r
consumer->initialize(channel_.format_desc);\r
channel_.execution.invoke([&]\r
{\r
- buffer_.set_capacity(std::max(buffer_.capacity(), consumer->buffer_depth()));\r
-\r
this->remove(index);\r
consumers_.insert(std::make_pair(index, consumer));\r
+\r
+ auto depth = buffer_depth();\r
+ auto diff = depth.second-depth.first+1;\r
+ \r
+ if(diff != buffer_.capacity())\r
+ {\r
+ buffer_.set_capacity(diff);\r
+ CASPAR_LOG(info) << print() << L" Depth-diff: " << diff-1;\r
+ }\r
+\r
CASPAR_LOG(info) << print() << L" " << consumer->print() << L" Added.";\r
});\r
}\r
}\r
});\r
}\r
+ \r
+ void operator()(const safe_ptr<read_frame>& frame)\r
+ { \r
+ if(!has_synchronization_clock())\r
+ timer_.tick(1.0/channel_.format_desc.fps);\r
+\r
+ frame_timer_.restart();\r
+ \r
+ buffer_.push_back(std::make_pair(frame, get_key_frame(frame)));\r
+\r
+ if(!buffer_.full())\r
+ return;\r
+ \r
+ for_each_consumer([&](safe_ptr<frame_consumer>& consumer)\r
+ {\r
+ if(consumer->get_video_format_desc() != channel_.format_desc)\r
+ consumer->initialize(channel_.format_desc);\r
+\r
+ auto tmp = (consumer->buffer_depth()-buffer_depth().first);\r
+ auto pair = buffer_[tmp];\r
+ auto frame = consumer->key_only() ? pair.second : pair.first;\r
+\r
+ if(static_cast<size_t>(frame->image_data().size()) == consumer->get_video_format_desc().size)\r
+ consumer->send(frame);\r
+ });\r
+\r
+ diag_->update_value("frame-time", frame_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
+ \r
+ diag_->update_value("tick-time", tick_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
+ tick_timer_.restart();\r
+ }\r
+\r
+private:\r
\r
bool has_synchronization_clock()\r
{\r
auto key_data = channel_.ogl.create_host_buffer(frame->image_data().size(), host_buffer::write_only); \r
fast_memsfhl(key_data->data(), frame->image_data().begin(), frame->image_data().size(), 0x0F0F0F0F, 0x0B0B0B0B, 0x07070707, 0x03030303);\r
std::vector<int16_t> audio_data(frame->audio_data().begin(), frame->audio_data().end());\r
- return make_safe<read_frame>(std::move(key_data), std::move(audio_data));\r
+ return make_safe<read_frame>(std::move(key_data), std::move(audio_data), frame->number());\r
}\r
\r
return read_frame::empty();\r
}\r
- \r
- void send(const safe_ptr<read_frame>& frame)\r
- { \r
- channel_.execution.invoke([=]\r
- {\r
- if(!has_synchronization_clock())\r
- timer_.tick(1.0/channel_.format_desc.fps);\r
-\r
- diag_->set_value("input-buffer", static_cast<float>(channel_.execution.size())/static_cast<float>(channel_.execution.capacity()));\r
- frame_timer_.restart();\r
- \r
- buffer_.push_back(std::make_pair(frame, get_key_frame(frame)));\r
-\r
- if(!buffer_.full())\r
- return;\r
- \r
- for_each_consumer([&](safe_ptr<frame_consumer>& consumer)\r
- {\r
- if(consumer->get_video_format_desc() != channel_.format_desc)\r
- consumer->initialize(channel_.format_desc);\r
-\r
- auto pair = buffer_[consumer->buffer_depth()-1];\r
- auto frame = consumer->key_only() ? pair.second : pair.first;\r
-\r
- if(static_cast<size_t>(frame->image_data().size()) == consumer->get_video_format_desc().size)\r
- consumer->send(frame);\r
- });\r
-\r
- diag_->update_value("frame-time", frame_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
- \r
- diag_->update_value("tick-time", tick_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
- tick_timer_.restart();\r
- });\r
- diag_->set_value("input-buffer", static_cast<float>(channel_.execution.size())/static_cast<float>(channel_.execution.capacity()));\r
- }\r
\r
void for_each_consumer(const std::function<void(safe_ptr<frame_consumer>& consumer)>& func)\r
{\r
}\r
};\r
\r
-frame_consumer_device::frame_consumer_device(channel_context& channel) \r
- : impl_(new implementation(channel)){}\r
+frame_consumer_device::frame_consumer_device(video_channel_context& video_channel) \r
+ : impl_(new implementation(video_channel)){}\r
void frame_consumer_device::add(int index, safe_ptr<frame_consumer>&& consumer){impl_->add(index, std::move(consumer));}\r
void frame_consumer_device::remove(int index){impl_->remove(index);}\r
-void frame_consumer_device::send(const safe_ptr<read_frame>& frame) { impl_->send(frame); }\r
+void frame_consumer_device::operator()(const safe_ptr<read_frame>& frame) { (*impl_)(frame); }\r
}}
\ No newline at end of file
\r
namespace core {\r
\r
-struct channel_context;\r
+struct video_channel_context;\r
\r
class frame_consumer_device : boost::noncopyable\r
{\r
public:\r
- explicit frame_consumer_device(channel_context& channel);\r
+ explicit frame_consumer_device(video_channel_context& video_channel);\r
\r
void add(int index, safe_ptr<frame_consumer>&& consumer);\r
void remove(int index);\r
\r
- void send(const safe_ptr<read_frame>& frame); // nothrow\r
+ void operator()(const safe_ptr<read_frame>& frame); // nothrow\r
private:\r
struct implementation;\r
safe_ptr<implementation> impl_;\r
<Lib />\r
</ItemDefinitionGroup>\r
<ItemGroup>\r
- <ClInclude Include="channel.h" />\r
- <ClInclude Include="channel_context.h" />\r
+ <ClInclude Include="video_channel.h" />\r
+ <ClInclude Include="video_channel_context.h" />\r
<ClInclude Include="consumer\frame_consumer_device.h" />\r
<ClInclude Include="consumer\frame_consumer.h" />\r
<ClInclude Include="mixer\audio\audio_mixer.h" />\r
<ClInclude Include="StdAfx.h" />\r
</ItemGroup>\r
<ItemGroup>\r
- <ClCompile Include="channel.cpp" />\r
+ <ClCompile Include="video_channel.cpp" />\r
<ClCompile Include="consumer\frame_consumer.cpp">\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
<ClInclude Include="producer\transition\transition_producer.h">\r
<Filter>producer\transition</Filter>\r
</ClInclude>\r
- <ClInclude Include="channel.h" />\r
- <ClInclude Include="StdAfx.h" />\r
<ClInclude Include="consumer\frame_consumer_device.h">\r
<Filter>consumer</Filter>\r
</ClInclude>\r
<ClInclude Include="mixer\frame_mixer_device.h">\r
<Filter>mixer</Filter>\r
</ClInclude>\r
- <ClInclude Include="channel_context.h" />\r
+ <ClInclude Include="video_channel.h" />\r
+ <ClInclude Include="video_channel_context.h" />\r
+ <ClInclude Include="StdAfx.h" />\r
</ItemGroup>\r
<ItemGroup>\r
<ClCompile Include="producer\transition\transition_producer.cpp">\r
<Filter>producer\transition</Filter>\r
</ClCompile>\r
- <ClCompile Include="channel.cpp" />\r
- <ClCompile Include="StdAfx.cpp" />\r
<ClCompile Include="consumer\frame_consumer.cpp">\r
<Filter>consumer</Filter>\r
</ClCompile>\r
<ClCompile Include="mixer\write_frame.cpp">\r
<Filter>mixer</Filter>\r
</ClCompile>\r
+ <ClCompile Include="video_channel.cpp" />\r
+ <ClCompile Include="StdAfx.cpp" />\r
</ItemGroup>\r
</Project>
\ No newline at end of file
#include "audio/audio_mixer.h"\r
#include "image/image_mixer.h"\r
\r
-#include "../channel_context.h"\r
+#include "../video_channel_context.h"\r
\r
#include <common/exception/exceptions.h>\r
#include <common/concurrency/executor.h>\r
\r
struct frame_mixer_device::implementation : boost::noncopyable\r
{ \r
- channel_context& channel_;\r
+ video_channel_context& channel_;\r
\r
safe_ptr<diagnostics::graph> diag_;\r
boost::timer frame_timer_;\r
\r
audio_mixer audio_mixer_;\r
image_mixer image_mixer_;\r
+\r
+ int frame_number_;\r
\r
typedef std::unordered_map<int, tweened_transform<core::image_transform>> image_transforms;\r
typedef std::unordered_map<int, tweened_transform<core::audio_transform>> audio_transforms;\r
\r
boost::fusion::map<boost::fusion::pair<core::image_transform, tweened_transform<core::image_transform>>,\r
boost::fusion::pair<core::audio_transform, tweened_transform<core::audio_transform>>> root_transforms_;\r
-\r
- std::queue<safe_ptr<read_frame>> frame_buffer_;\r
public:\r
- implementation(channel_context& channel) \r
- : channel_(channel)\r
+ implementation(video_channel_context& video_channel) \r
+ : channel_(video_channel)\r
, diag_(diagnostics::create_graph(narrow(print())))\r
, image_mixer_(channel_)\r
+ , frame_number_(0)\r
{\r
diag_->add_guide("frame-time", 0.5f); \r
diag_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
diag_->set_color("tick-time", diagnostics::color(0.1f, 0.7f, 0.8f));\r
- diag_->set_color("input-buffer", diagnostics::color(1.0f, 1.0f, 0.0f)); \r
\r
CASPAR_LOG(info) << print() << L" Successfully initialized."; \r
}\r
- \r
- safe_ptr<host_buffer> mix_image(std::map<int, safe_ptr<core::basic_frame>> frames)\r
- { \r
- auto& root_image_transform = boost::fusion::at_key<core::image_transform>(root_transforms_);\r
- auto& image_transforms = boost::fusion::at_key<core::image_transform>(transforms_);\r
- \r
- BOOST_FOREACH(auto& frame, frames)\r
- {\r
- image_mixer_.begin_layer();\r
\r
- if(channel_.format_desc.mode != core::video_mode::progressive)\r
- {\r
- auto frame1 = make_safe<core::basic_frame>(frame.second);\r
- auto frame2 = make_safe<core::basic_frame>(frame.second);\r
- \r
- frame1->get_image_transform() = root_image_transform.fetch_and_tick(1)*image_transforms[frame.first].fetch_and_tick(1);\r
- frame2->get_image_transform() = root_image_transform.fetch_and_tick(1)*image_transforms[frame.first].fetch_and_tick(1);\r
-\r
- if(frame1->get_image_transform() != frame2->get_image_transform())\r
- core::basic_frame::interlace(frame1, frame2, channel_.format_desc.mode)->accept(image_mixer_);\r
- else\r
- frame2->accept(image_mixer_);\r
- }\r
- else\r
- {\r
- auto frame1 = make_safe<core::basic_frame>(frame.second);\r
- frame1->get_image_transform() = root_image_transform.fetch_and_tick(1)*image_transforms[frame.first].fetch_and_tick(1);\r
- frame1->accept(image_mixer_);\r
- }\r
+ safe_ptr<read_frame> operator()(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
+ { \r
+ frame_timer_.restart();\r
\r
- image_mixer_.end_layer();\r
- }\r
-\r
- return image_mixer_.render();\r
- }\r
-\r
- std::vector<int16_t> mix_audio(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
- {\r
- auto& root_audio_transform = boost::fusion::at_key<core::audio_transform>(root_transforms_);\r
- auto& audio_transforms = boost::fusion::at_key<core::audio_transform>(transforms_);\r
-\r
- BOOST_FOREACH(auto& frame, frames)\r
- {\r
- const unsigned int num = channel_.format_desc.mode == core::video_mode::progressive ? 1 : 2;\r
-\r
- auto frame1 = make_safe<core::basic_frame>(frame.second);\r
- frame1->get_audio_transform() = root_audio_transform.fetch_and_tick(num)*audio_transforms[frame.first].fetch_and_tick(num);\r
- frame1->accept(audio_mixer_);\r
- }\r
-\r
- return audio_mixer_.mix();\r
- }\r
- \r
- void send(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
- { \r
- channel_.execution.invoke([=]\r
- { \r
- diag_->set_value("input-buffer", static_cast<float>(channel_.execution.size())/static_cast<float>(channel_.execution.capacity())); \r
- frame_timer_.restart();\r
-\r
- auto image = mix_image(frames);\r
- auto audio = mix_audio(frames);\r
+ auto image = mix_image(frames);\r
+ auto audio = mix_audio(frames);\r
\r
- diag_->update_value("frame-time", frame_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
-\r
- frame_buffer_.push(make_safe<read_frame>(std::move(image), std::move(audio)));\r
-\r
- diag_->update_value("tick-time", tick_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
- tick_timer_.restart();\r
- });\r
- diag_->set_value("input-buffer", static_cast<float>(channel_.execution.size())/static_cast<float>(channel_.execution.capacity()));\r
- }\r
+ diag_->update_value("frame-time", frame_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
+ \r
+ diag_->update_value("tick-time", tick_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
+ tick_timer_.restart();\r
\r
- safe_ptr<read_frame> receive()\r
- {\r
- return channel_.execution.invoke([=]() -> safe_ptr<read_frame>\r
- {\r
- auto frame = frame_buffer_.front();\r
- frame_buffer_.pop();\r
- return frame;\r
- });\r
+ return make_safe<read_frame>(std::move(image), std::move(audio), frame_number_++);\r
}\r
- \r
+ \r
safe_ptr<core::write_frame> create_frame(void* tag, const core::pixel_format_desc& desc)\r
{ \r
return image_mixer_.create_frame(tag, desc);\r
{\r
return L"frame_mixer_device";\r
}\r
+\r
+private:\r
+ \r
+ safe_ptr<host_buffer> mix_image(std::map<int, safe_ptr<core::basic_frame>> frames)\r
+ { \r
+ auto& root_image_transform = boost::fusion::at_key<core::image_transform>(root_transforms_);\r
+ auto& image_transforms = boost::fusion::at_key<core::image_transform>(transforms_);\r
+ \r
+ BOOST_FOREACH(auto& frame, frames)\r
+ {\r
+ image_mixer_.begin_layer();\r
+ \r
+ if(channel_.format_desc.mode != core::video_mode::progressive)\r
+ {\r
+ auto frame1 = make_safe<core::basic_frame>(frame.second);\r
+ auto frame2 = make_safe<core::basic_frame>(frame.second);\r
+ \r
+ frame1->get_image_transform() = root_image_transform.fetch_and_tick(1)*image_transforms[frame.first].fetch_and_tick(1);\r
+ frame2->get_image_transform() = root_image_transform.fetch_and_tick(1)*image_transforms[frame.first].fetch_and_tick(1);\r
+\r
+ if(frame1->get_image_transform() != frame2->get_image_transform())\r
+ core::basic_frame::interlace(frame1, frame2, channel_.format_desc.mode)->accept(image_mixer_);\r
+ else\r
+ frame2->accept(image_mixer_);\r
+ }\r
+ else\r
+ {\r
+ auto frame1 = make_safe<core::basic_frame>(frame.second);\r
+ frame1->get_image_transform() = root_image_transform.fetch_and_tick(1)*image_transforms[frame.first].fetch_and_tick(1);\r
+ frame1->accept(image_mixer_);\r
+ }\r
+\r
+ image_mixer_.end_layer();\r
+ }\r
+\r
+ return image_mixer_.render();\r
+ }\r
+\r
+ std::vector<int16_t> mix_audio(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
+ {\r
+ auto& root_audio_transform = boost::fusion::at_key<core::audio_transform>(root_transforms_);\r
+ auto& audio_transforms = boost::fusion::at_key<core::audio_transform>(transforms_);\r
+\r
+ BOOST_FOREACH(auto& frame, frames)\r
+ {\r
+ const unsigned int num = channel_.format_desc.mode == core::video_mode::progressive ? 1 : 2;\r
+\r
+ auto frame1 = make_safe<core::basic_frame>(frame.second);\r
+ frame1->get_audio_transform() = root_audio_transform.fetch_and_tick(num)*audio_transforms[frame.first].fetch_and_tick(num);\r
+ frame1->accept(audio_mixer_);\r
+ }\r
+\r
+ return audio_mixer_.mix();\r
+ }\r
};\r
\r
-frame_mixer_device::frame_mixer_device(channel_context& channel) : impl_(new implementation(channel)){}\r
-void frame_mixer_device::send(const std::map<int, safe_ptr<core::basic_frame>>& frames){impl_->send(frames);}\r
+frame_mixer_device::frame_mixer_device(video_channel_context& video_channel) : impl_(new implementation(video_channel)){}\r
+safe_ptr<core::read_frame> frame_mixer_device::operator()(const std::map<int, safe_ptr<core::basic_frame>>& frames){ return (*impl_)(frames);}\r
const core::video_format_desc& frame_mixer_device::get_video_format_desc() const { return impl_->channel_.format_desc; }\r
safe_ptr<core::write_frame> frame_mixer_device::create_frame(void* tag, const core::pixel_format_desc& desc){ return impl_->create_frame(tag, desc); } \r
safe_ptr<core::write_frame> frame_mixer_device::create_frame(void* tag, size_t width, size_t height, core::pixel_format::type pix_fmt)\r
void frame_mixer_device::reset_image_transform(int index, unsigned int mix_duration, const std::wstring& tween){impl_->reset_transform<core::image_transform>(index, mix_duration, tween);}\r
void frame_mixer_device::reset_audio_transform(unsigned int mix_duration, const std::wstring& tween){impl_->reset_transform<core::audio_transform>(mix_duration, tween);}\r
void frame_mixer_device::reset_audio_transform(int index, unsigned int mix_duration, const std::wstring& tween){impl_->reset_transform<core::audio_transform>(index, mix_duration, tween);}\r
-safe_ptr<core::read_frame> frame_mixer_device::receive(){return impl_->receive();}\r
}}
\ No newline at end of file
class basic_frame;\r
class audio_transform;\r
class image_transform;\r
-struct channel_context;\r
+struct video_channel_context;\r
\r
class frame_mixer_device : public core::frame_factory\r
{\r
public: \r
- explicit frame_mixer_device(channel_context& channel);\r
+ explicit frame_mixer_device(video_channel_context& video_channel);\r
\r
- void send(const std::map<int, safe_ptr<core::basic_frame>>& frames); // nothrow\r
- safe_ptr<core::read_frame> receive();\r
+ safe_ptr<core::read_frame> operator()(const std::map<int, safe_ptr<core::basic_frame>>& frames); // nothrow\r
\r
safe_ptr<core::write_frame> create_frame(void* tag, const core::pixel_format_desc& desc); \r
safe_ptr<core::write_frame> create_frame(void* tag, size_t width, size_t height, core::pixel_format::type pix_fmt = core::pixel_format::bgra); \r
GL(glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));\r
GL(glTexImage2D(GL_TEXTURE_2D, 0, INTERNAL_FORMAT[stride_], width_, height_, 0, FORMAT[stride_], GL_UNSIGNED_BYTE, NULL));\r
GL(glBindTexture(GL_TEXTURE_2D, 0));\r
- CASPAR_LOG(trace) << "[device_buffer] allocated size:" << width*height*stride; \r
+ //CASPAR_LOG(trace) << "[device_buffer] allocated size:" << width*height*stride; \r
} \r
\r
~implementation()\r
if(!pbo_)\r
BOOST_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to allocate buffer."));\r
\r
- CASPAR_LOG(trace) << "[host_buffer] allocated size:" << size_ << " usage: " << (usage == write_only ? "write_only" : "read_only");\r
+ //CASPAR_LOG(trace) << "[host_buffer] allocated size:" << size_ << " usage: " << (usage == write_only ? "write_only" : "read_only");\r
} \r
\r
~implementation()\r
#include "../gpu/device_buffer.h"\r
#include "../write_frame.h"\r
\r
-#include "../../channel_context.h"\r
+#include "../../video_channel_context.h"\r
\r
#include <common/concurrency/executor.h>\r
#include <common/exception/exceptions.h>\r
core::image_transform transform;\r
};\r
\r
- channel_context& channel_;\r
+ video_channel_context& channel_;\r
\r
std::stack<core::image_transform> transform_stack_;\r
std::queue<std::queue<render_item>> render_queue_;\r
bool layer_key_;\r
\r
public:\r
- implementation(channel_context& channel) \r
- : channel_(channel)\r
- , read_buffer_(channel.ogl.create_host_buffer(channel.format_desc.size, host_buffer::read_only))\r
- , draw_buffer_(channel.ogl.create_device_buffer(channel.format_desc.width, channel_.format_desc.height, 4))\r
- , write_buffer_ (channel.ogl.create_device_buffer(channel.format_desc.width, channel_.format_desc.height, 4))\r
- , local_key_buffer_(channel.ogl.create_device_buffer(channel.format_desc.width, channel_.format_desc.height, 1))\r
- , layer_key_buffer_(channel.ogl.create_device_buffer(channel.format_desc.width, channel_.format_desc.height, 1))\r
+ implementation(video_channel_context& video_channel) \r
+ : channel_(video_channel)\r
+ , read_buffer_(video_channel.ogl.create_host_buffer(video_channel.format_desc.size, host_buffer::read_only))\r
+ , draw_buffer_(video_channel.ogl.create_device_buffer(video_channel.format_desc.width, channel_.format_desc.height, 4))\r
+ , write_buffer_ (video_channel.ogl.create_device_buffer(video_channel.format_desc.width, channel_.format_desc.height, 4))\r
+ , local_key_buffer_(video_channel.ogl.create_device_buffer(video_channel.format_desc.width, channel_.format_desc.height, 1))\r
+ , layer_key_buffer_(video_channel.ogl.create_device_buffer(video_channel.format_desc.width, channel_.format_desc.height, 1))\r
, local_key_(false)\r
, layer_key_(false)\r
{\r
}\r
};\r
\r
-image_mixer::image_mixer(channel_context& channel) : impl_(new implementation(channel)){}\r
+image_mixer::image_mixer(video_channel_context& video_channel) : impl_(new implementation(video_channel)){}\r
void image_mixer::begin(const core::basic_frame& frame){impl_->begin(frame);}\r
void image_mixer::visit(core::write_frame& frame){impl_->visit(frame);}\r
void image_mixer::end(){impl_->end();}\r
class write_frame;\r
class host_buffer;\r
class ogl_device;\r
-struct channel_context;\r
+struct video_channel_context;\r
\r
class image_mixer : public core::frame_visitor, boost::noncopyable\r
{\r
public:\r
- image_mixer(channel_context& context);\r
+ image_mixer(video_channel_context& context);\r
\r
virtual void begin(const core::basic_frame& frame);\r
virtual void visit(core::write_frame& frame);\r
{\r
safe_ptr<host_buffer> image_data_;\r
std::vector<int16_t> audio_data_;\r
+ int number_;\r
\r
public:\r
- implementation(safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data) \r
+ implementation(safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data, int number) \r
: image_data_(std::move(image_data))\r
- , audio_data_(std::move(audio_data)){} \r
+ , audio_data_(std::move(audio_data))\r
+ , number_(number){} \r
\r
const boost::iterator_range<const uint8_t*> image_data()\r
{\r
}\r
};\r
\r
-read_frame::read_frame(safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data) \r
- : impl_(new implementation(std::move(image_data), std::move(audio_data))){}\r
+read_frame::read_frame(safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data, int number) \r
+ : impl_(new implementation(std::move(image_data), std::move(audio_data), number)){}\r
\r
const boost::iterator_range<const uint8_t*> read_frame::image_data() const{return impl_->image_data();}\r
const boost::iterator_range<const int16_t*> read_frame::audio_data() const{return impl_->audio_data();}\r
+int read_frame::number() const{return impl_->number_;}\r
\r
}}
\ No newline at end of file
{\r
read_frame(){}\r
public:\r
- read_frame(safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data);\r
+ read_frame(safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data, int frame_number);\r
\r
virtual const boost::iterator_range<const uint8_t*> image_data() const;\r
virtual const boost::iterator_range<const int16_t*> audio_data() const;\r
\r
+ virtual int number() const;\r
+\r
static safe_ptr<const read_frame> empty()\r
{\r
struct empty : public read_frame\r
{ \r
virtual const boost::iterator_range<const uint8_t*> image_data() const {return boost::iterator_range<const uint8_t*>();}\r
virtual const boost::iterator_range<const int16_t*> audio_data() const {return boost::iterator_range<const int16_t*>();}\r
+ virtual int number() const{return -1;}\r
};\r
static safe_ptr<const empty> frame;\r
return frame;\r
\r
#include "frame_producer_device.h"\r
\r
-#include "../channel_context.h"\r
+#include "../video_channel_context.h"\r
\r
#include "layer.h"\r
\r
\r
struct frame_producer_device::implementation : boost::noncopyable\r
{ \r
- std::map<int, layer> layers_; \r
+ std::map<int, layer> layers_; \r
+ typedef std::map<int, layer>::value_type layer_t;\r
\r
- safe_ptr<diagnostics::graph> diag_;\r
- boost::timer frame_timer_;\r
- boost::timer tick_timer_;\r
- boost::timer output_timer_;\r
+ safe_ptr<diagnostics::graph> diag_;\r
+ boost::timer frame_timer_;\r
+ boost::timer tick_timer_;\r
+ boost::timer output_timer_;\r
\r
- channel_context& channel_;\r
+ video_channel_context& channel_;\r
public:\r
- implementation(channel_context& channel) \r
+ implementation(video_channel_context& video_channel) \r
: diag_(diagnostics::create_graph(std::string("frame_producer_device")))\r
- , channel_(channel)\r
+ , channel_(video_channel)\r
{\r
diag_->add_guide("frame-time", 0.5f); \r
diag_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
- diag_->set_color("tick-time", diagnostics::color(0.1f, 0.7f, 0.8f));\r
- //diag_->set_color("output-time", diagnostics::color(0.5f, 1.0f, 0.2f));\r
-\r
- //channel_.execution.begin_invoke([=]{tick();}); \r
+ diag_->set_color("tick-time", diagnostics::color(0.1f, 0.7f, 0.8f)); \r
}\r
- \r
- //void tick()\r
- //{ \r
- // try\r
- // {\r
- // auto frame = render();\r
- // output_timer_.restart();\r
- // output_(frame);\r
- // diag_->update_value("output-time", static_cast<float>(output_timer_.elapsed()*channel_.format_desc.fps*0.5));\r
- // }\r
- // catch(...)\r
- // {\r
- // CASPAR_LOG_CURRENT_EXCEPTION();\r
- // }\r
-\r
- // channel_.execution.begin_invoke([=]{tick();});\r
- //}\r
- \r
- std::map<int, safe_ptr<basic_frame>> receive()\r
+ \r
+ std::map<int, safe_ptr<basic_frame>> operator()()\r
{ \r
- return channel_.execution.invoke([=]() -> std::map<int, safe_ptr<basic_frame>>\r
- {\r
- frame_timer_.restart();\r
+ frame_timer_.restart();\r
+ \r
+ std::map<int, safe_ptr<basic_frame>> frames;\r
\r
- std::map<int, safe_ptr<basic_frame>> frames;\r
- BOOST_FOREACH(auto& layer, layers_)\r
- frames[layer.first] = basic_frame::empty();\r
+ // Allocate placeholders.\r
+ std::for_each(layers_.begin(), layers_.end(), [&](layer_t& layer)\r
+ {\r
+ frames[layer.first] = basic_frame::empty();\r
+ });\r
\r
- tbb::parallel_for_each(layers_.begin(), layers_.end(), [&](decltype(*layers_.begin())& pair)\r
- {\r
- frames[pair.first] = pair.second.receive();\r
- });\r
+ // Render layers\r
+ tbb::parallel_for_each(layers_.begin(), layers_.end(), [&](layer_t& layer)\r
+ {\r
+ frames[layer.first] = layer.second.receive();\r
+ });\r
\r
- diag_->update_value("frame-time", frame_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
+ diag_->update_value("frame-time", frame_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
\r
- diag_->update_value("tick-time", tick_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
- tick_timer_.restart();\r
+ diag_->update_value("tick-time", tick_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
+ tick_timer_.restart();\r
\r
- return frames;\r
- });\r
+ return frames;\r
}\r
\r
void load(int index, const safe_ptr<frame_producer>& producer, bool preview)\r
\r
std::transform(layers_.begin(), layers_.end(), inserter, sel_first);\r
std::transform(other.impl_->layers_.begin(), other.impl_->layers_.end(), inserter, sel_first);\r
+\r
std::for_each(indices.begin(), indices.end(), [&](int index)\r
{\r
layers_[index].swap(other.impl_->layers_[index]);\r
}\r
};\r
\r
-frame_producer_device::frame_producer_device(channel_context& channel) : impl_(new implementation(channel)){}\r
+frame_producer_device::frame_producer_device(video_channel_context& video_channel) : impl_(new implementation(video_channel)){}\r
void frame_producer_device::swap(frame_producer_device& other){impl_->swap(other);}\r
void frame_producer_device::load(int index, const safe_ptr<frame_producer>& producer, bool preview){impl_->load(index, producer, preview);}\r
void frame_producer_device::pause(int index){impl_->pause(index);}\r
void frame_producer_device::swap_layer(int index, size_t other_index, frame_producer_device& other){impl_->swap_layer(index, other_index, other);}\r
boost::unique_future<safe_ptr<frame_producer>> frame_producer_device::foreground(size_t index) {return impl_->foreground(index);}\r
boost::unique_future<safe_ptr<frame_producer>> frame_producer_device::background(size_t index) {return impl_->background(index);}\r
-std::map<int, safe_ptr<basic_frame>> frame_producer_device::receive(){return impl_->receive();}\r
+std::map<int, safe_ptr<basic_frame>> frame_producer_device::operator()(){return (*impl_)();}\r
}}
\ No newline at end of file
namespace caspar { namespace core {\r
\r
struct video_format_desc;\r
-struct channel_context;\r
+struct video_channel_context;\r
\r
class frame_producer_device : boost::noncopyable\r
{\r
public:\r
- explicit frame_producer_device(channel_context& channel);\r
+ explicit frame_producer_device(video_channel_context& video_channel);\r
\r
void swap(frame_producer_device& other);\r
\r
- std::map<int, safe_ptr<basic_frame>> receive();\r
+ std::map<int, safe_ptr<basic_frame>> operator()();\r
\r
void load(int index, const safe_ptr<frame_producer>& producer, bool preview = false);\r
void pause(int index);\r
if(current_frame_++ >= info_.duration)\r
return basic_frame::eof();\r
\r
- safe_ptr<core::basic_frame> dest;\r
- safe_ptr<core::basic_frame> source;\r
+ auto dest = core::basic_frame::empty();\r
+ auto source = core::basic_frame::empty();\r
\r
tbb::parallel_invoke\r
(\r
\r
#include "StdAfx.h"\r
\r
-#include "channel.h"\r
+#include "video_channel.h"\r
\r
-#include "channel_context.h"\r
+#include "video_channel_context.h"\r
\r
#include "video_format.h"\r
#include "producer/layer.h"\r
\r
namespace caspar { namespace core {\r
\r
-struct channel::implementation : boost::noncopyable\r
+struct video_channel::implementation : boost::noncopyable\r
{\r
- channel_context context_;\r
+ video_channel_context context_;\r
\r
safe_ptr<frame_consumer_device> consumer_;\r
safe_ptr<frame_mixer_device> mixer_;\r
\r
void tick()\r
{\r
- auto simple_frames = producer_->receive();\r
- mixer_->send(simple_frames);\r
- auto finished_frame = mixer_->receive();\r
- consumer_->send(finished_frame);\r
+ auto simple_frames = (*producer_)();\r
+ auto finished_frame = (*mixer_)(simple_frames);\r
+ (*consumer_)(finished_frame);\r
\r
context_.execution.begin_invoke([this]{tick();});\r
}\r
}\r
};\r
\r
-channel::channel(int index, const video_format_desc& format_desc, ogl_device& ogl) : impl_(new implementation(index, format_desc, ogl)){}\r
-channel::channel(channel&& other) : impl_(std::move(other.impl_)){}\r
-safe_ptr<frame_producer_device> channel::producer() { return impl_->producer_;} \r
-safe_ptr<frame_mixer_device> channel::mixer() { return impl_->mixer_;} \r
-safe_ptr<frame_consumer_device> channel::consumer() { return impl_->consumer_;} \r
-const video_format_desc& channel::get_video_format_desc() const{return impl_->context_.format_desc;}\r
-void channel::set_video_format_desc(const video_format_desc& format_desc){impl_->set_video_format_desc(format_desc);}\r
-std::wstring channel::print() const { return impl_->print();}\r
+video_channel::video_channel(int index, const video_format_desc& format_desc, ogl_device& ogl) : impl_(new implementation(index, format_desc, ogl)){}\r
+video_channel::video_channel(video_channel&& other) : impl_(std::move(other.impl_)){}\r
+safe_ptr<frame_producer_device> video_channel::producer() { return impl_->producer_;} \r
+safe_ptr<frame_mixer_device> video_channel::mixer() { return impl_->mixer_;} \r
+safe_ptr<frame_consumer_device> video_channel::consumer() { return impl_->consumer_;} \r
+const video_format_desc& video_channel::get_video_format_desc() const{return impl_->context_.format_desc;}\r
+void video_channel::set_video_format_desc(const video_format_desc& format_desc){impl_->set_video_format_desc(format_desc);}\r
+std::wstring video_channel::print() const { return impl_->print();}\r
\r
}}
\ No newline at end of file
\r
class ogl_device;\r
\r
-class channel : boost::noncopyable\r
+class video_channel : boost::noncopyable\r
{\r
public:\r
- explicit channel(int index, const video_format_desc& format_desc, ogl_device& ogl);\r
- channel(channel&& other);\r
+ explicit video_channel(int index, const video_format_desc& format_desc, ogl_device& ogl);\r
+ video_channel(video_channel&& other);\r
\r
safe_ptr<frame_producer_device> producer();\r
safe_ptr<frame_mixer_device> mixer();\r
\r
namespace caspar { namespace core {\r
\r
-struct channel_context\r
+struct video_channel_context\r
{\r
- channel_context(int index, ogl_device& ogl, const video_format_desc& format_desc) \r
+ video_channel_context(int index, ogl_device& ogl, const video_format_desc& format_desc) \r
: index(index)\r
+ , format_desc(format_desc)\r
, execution(print() + L"/execution")\r
, destruction(print() + L"/destruction")\r
, ogl(ogl)\r
- , format_desc(format_desc)\r
{\r
execution.set_priority_class(above_normal_priority_class);\r
- destruction.set_priority_class(below_normal_priority_class);\r
}\r
\r
const int index;\r
+ video_format_desc format_desc;\r
executor execution;\r
executor destruction;\r
ogl_device& ogl;\r
- video_format_desc format_desc;\r
\r
std::wstring print() const\r
{\r
- return L"channel[" + boost::lexical_cast<std::wstring>(index+1) + L"-" + format_desc.name + L"]";\r
+ return L"video_channel[" + boost::lexical_cast<std::wstring>(index+1) + L"-" + format_desc.name + L"]";\r
}\r
};\r
\r
#include <common/concurrency/executor.h>\r
#include <common/diagnostics/graph.h>\r
#include <common/memory/memcpy.h>\r
+#include <common/memory/memclr.h>\r
#include <common/utility/timer.h>\r
\r
#include <tbb/concurrent_queue.h>\r
boost::timer tick_timer_;\r
boost::timer sync_timer_;\r
\r
- boost::unique_future<void> active_;\r
- \r
std::shared_ptr<CBlueVelvet4> blue_;\r
\r
const core::video_format_desc format_desc_;\r
unsigned long engine_mode_;\r
EVideoMode vid_fmt_; \r
\r
- std::array<blue_dma_buffer_ptr, 3> reserved_frames_; \r
+ std::array<blue_dma_buffer_ptr, 4> reserved_frames_; \r
+ tbb::concurrent_bounded_queue<std::shared_ptr<const core::read_frame>> frame_buffer_;\r
\r
const bool embedded_audio_;\r
-\r
+ \r
executor executor_;\r
public:\r
- bluefish_consumer(const core::video_format_desc& format_desc, unsigned int device_index, bool embedded_audio) \r
+ bluefish_consumer(const core::video_format_desc& format_desc, unsigned int device_index, bool embedded_audio, size_t buffer_depth) \r
: model_name_(L"BLUEFISH")\r
, device_index_(device_index) \r
, format_desc_(format_desc)\r
, embedded_audio_(embedded_audio)\r
, executor_(print())\r
{\r
+ executor_.set_capacity(buffer_depth);\r
+\r
if(!BlueVelvetFactory4 || (embedded_audio_ && (!encode_hanc_frame || !encode_hanc_frame)))\r
BOOST_THROW_EXCEPTION(caspar_exception() << msg_info("Bluefish drivers not found."));\r
\r
\r
//void* pBlueDevice = blue_attach_to_device(1);\r
//EBlueConnectorPropertySetting video_routing[1];\r
- //auto channel = BLUE_VIDEO_OUTPUT_CHANNEL_A;\r
- //video_routing[0].channel = channel; \r
+ //auto video_channel = BLUE_VIDEO_OUTPUT_CHANNEL_A;\r
+ //video_routing[0].video_channel = video_channel; \r
//video_routing[0].propType = BLUE_CONNECTOR_PROP_SINGLE_LINK;\r
- //video_routing[0].connector = channel == BLUE_VIDEO_OUTPUT_CHANNEL_A ? BLUE_CONNECTOR_SDI_OUTPUT_A : BLUE_CONNECTOR_SDI_OUTPUT_B;\r
+ //video_routing[0].connector = video_channel == BLUE_VIDEO_OUTPUT_CHANNEL_A ? BLUE_CONNECTOR_SDI_OUTPUT_A : BLUE_CONNECTOR_SDI_OUTPUT_B;\r
//blue_set_connector_property(pBlueDevice, 1, video_routing);\r
//blue_detach_from_device(&pBlueDevice);\r
\r
if(vid_fmt_ == VID_FMT_INVALID)\r
BOOST_THROW_EXCEPTION(bluefish_exception() << msg_info(narrow(print()) + " Failed to set videomode."));\r
\r
- // Set default video output channel\r
- //if(BLUE_FAIL(set_card_property(blue_, DEFAULT_VIDEO_OUTPUT_CHANNEL, channel)))\r
- // CASPAR_LOG(error) << TEXT("BLUECARD ERROR: Failed to set default channel. (device ") << device_index_ << TEXT(")");\r
+ // Set default video output video_channel\r
+ //if(BLUE_FAIL(set_card_property(blue_, DEFAULT_VIDEO_OUTPUT_CHANNEL, video_channel)))\r
+ // CASPAR_LOG(error) << TEXT("BLUECARD ERROR: Failed to set default video_channel. (device ") << device_index_ << TEXT(")");\r
\r
//Setting output Video mode\r
if(BLUE_FAIL(set_card_property(blue_, VIDEO_MODE, vid_fmt_))) \r
enable_video_output();\r
\r
for(size_t n = 0; n < reserved_frames_.size(); ++n)\r
- reserved_frames_[n] = std::make_shared<blue_dma_buffer>(format_desc_.size, n); \r
+ reserved_frames_[n] = std::make_shared<blue_dma_buffer>(format_desc_.size, n); \r
\r
- active_ = executor_.begin_invoke([]{});\r
- \r
+ for(int n = 0; n < executor_.capacity(); ++n)\r
+ schedule_next_video(core::read_frame::empty());\r
+ \r
CASPAR_LOG(info) << print() << L" Successfully Initialized.";\r
}\r
\r
if(!BLUE_PASS(set_card_property(blue_, VIDEO_BLACKGENERATOR, 1)))\r
CASPAR_LOG(error)<< print() << TEXT(" Failed to disable video output."); \r
}\r
-\r
+ \r
void send(const safe_ptr<const core::read_frame>& frame)\r
{ \r
+ schedule_next_video(frame); \r
+ }\r
+ \r
+ void schedule_next_video(const safe_ptr<const core::read_frame>& frame)\r
+ {\r
static std::vector<short> silence(MAX_HANC_BUFFER_SIZE, 0);\r
- \r
- active_.get();\r
- active_ = executor_.begin_invoke([=]\r
+ \r
+ executor_.begin_invoke([=]\r
{\r
try\r
{\r
const size_t audio_samples = static_cast<size_t>(48000.0 / format_desc_.fps);\r
const size_t audio_nchannels = 2;\r
\r
- fast_memcpy(reserved_frames_.front()->image_data(), frame->image_data().begin(), frame->image_data().size());\r
- \r
+ if(!frame->image_data().empty())\r
+ fast_memcpy(reserved_frames_.front()->image_data(), frame->image_data().begin(), frame->image_data().size());\r
+ else\r
+ fast_memclr(reserved_frames_.front()->image_data(), reserved_frames_.front()->image_size());\r
+\r
sync_timer_.restart();\r
unsigned long n_field = 0;\r
blue_->wait_output_video_synch(UPD_FMT_FRAME, n_field);\r
\r
struct bluefish_consumer_proxy : public core::frame_consumer\r
{\r
- std::unique_ptr<bluefish_consumer> consumer_;\r
- const size_t device_index_;\r
- const bool embedded_audio_;\r
- bool key_only_;\r
+ std::unique_ptr<bluefish_consumer> consumer_;\r
+ const size_t device_index_;\r
+ const bool embedded_audio_;\r
+ bool key_only_;\r
+ size_t buffer_depth_;\r
public:\r
\r
- bluefish_consumer_proxy(size_t device_index, bool embedded_audio, bool key_only)\r
+ bluefish_consumer_proxy(size_t device_index, bool embedded_audio, bool key_only, size_t buffer_depth)\r
: device_index_(device_index)\r
, embedded_audio_(embedded_audio)\r
- , key_only_(key_only){}\r
+ , key_only_(key_only)\r
+ , buffer_depth_(buffer_depth){}\r
\r
virtual void initialize(const core::video_format_desc& format_desc)\r
{\r
- consumer_.reset(new bluefish_consumer(format_desc, device_index_, embedded_audio_));\r
+ consumer_.reset(new bluefish_consumer(format_desc, device_index_, embedded_audio_, buffer_depth_));\r
}\r
\r
virtual void send(const safe_ptr<const core::read_frame>& frame)\r
{\r
return key_only_;\r
}\r
+\r
+ virtual size_t buffer_depth() const\r
+ {\r
+ return consumer_->executor_.capacity();\r
+ }\r
}; \r
\r
std::wstring get_bluefish_version()\r
bool embedded_audio = std::find(params.begin(), params.end(), L"EMBEDDED_AUDIO") != params.end();\r
bool key_only = std::find(params.begin(), params.end(), L"KEY_ONLY") != params.end();\r
\r
- return make_safe<bluefish_consumer_proxy>(device_index, embedded_audio, key_only);\r
+ return make_safe<bluefish_consumer_proxy>(device_index, embedded_audio, key_only, 3);\r
}\r
\r
safe_ptr<core::frame_consumer> create_bluefish_consumer(const boost::property_tree::ptree& ptree) \r
{ \r
- auto device_index = ptree.get("device", 0);\r
+ auto device_index = ptree.get("device", 1);\r
auto embedded_audio = ptree.get("embedded-audio", false);\r
bool key_only = ptree.get("key-only", false);\r
+ size_t buffer_depth = ptree.get("buffer-depth", 3);\r
\r
- return make_safe<bluefish_consumer_proxy>(device_index, embedded_audio, key_only);\r
+ return make_safe<bluefish_consumer_proxy>(device_index, embedded_audio, key_only, buffer_depth);\r
}\r
\r
}
\ No newline at end of file
bool external_key;\r
bool low_latency;\r
bool key_only;\r
+ size_t buffer_depth;\r
\r
configuration()\r
: device_index(1)\r
, embedded_audio(false)\r
, external_key(false)\r
, low_latency(false)\r
- , key_only(false){}\r
+ , key_only(false)\r
+ , buffer_depth(5){}\r
};\r
\r
class decklink_frame_adapter : public IDeckLinkVideoFrame\r
\r
unsigned long frames_scheduled_;\r
unsigned long audio_scheduled_;\r
+\r
+ size_t preroll_count_;\r
\r
std::list<std::shared_ptr<IDeckLinkVideoFrame>> frame_container_; // Must be std::list in order to guarantee that pointers are always valid.\r
boost::circular_buffer<std::vector<short>> audio_container_;\r
, keyer_(decklink_)\r
, model_name_(get_model_name(decklink_))\r
, format_desc_(format_desc)\r
- , buffer_size_(config.embedded_audio ? 5 : 4) // Minimum buffer-size (3 + 1 tolerance).\r
+ , buffer_size_(config.embedded_audio ? config.buffer_depth + 1 : config.buffer_depth) // Minimum buffer-size 3.\r
, frames_scheduled_(0)\r
, audio_scheduled_(0)\r
+ , preroll_count_(0)\r
, audio_container_(buffer_size_+1)\r
{\r
is_running_ = true;\r
\r
set_latency(config.low_latency); \r
set_keyer(config.external_key);\r
- \r
+ \r
+ if(config.embedded_audio) \r
+ output_->BeginAudioPreroll(); \r
+ \r
for(size_t n = 0; n < buffer_size_; ++n)\r
schedule_next_video(core::read_frame::empty());\r
- \r
- if(config.embedded_audio)\r
- output_->BeginAudioPreroll();\r
- else\r
+\r
+ if(!config.embedded_audio)\r
start_playback();\r
- \r
+ \r
CASPAR_LOG(info) << print() << L" Buffer depth: " << buffer_size_; \r
CASPAR_LOG(info) << print() << L" Successfully Initialized."; \r
}\r
return E_FAIL;\r
\r
try\r
- {\r
- std::shared_ptr<const core::read_frame> frame;\r
- audio_frame_buffer_.pop(frame);\r
- schedule_next_audio(safe_ptr<const core::read_frame>(frame)); \r
-\r
+ { \r
if(preroll)\r
- start_playback();\r
+ {\r
+ if(++preroll_count_ >= buffer_size_)\r
+ {\r
+ output_->EndAudioPreroll();\r
+ start_playback(); \r
+ }\r
+ else\r
+ schedule_next_audio(core::read_frame::empty()); \r
+ }\r
+ else\r
+ {\r
+ std::shared_ptr<const core::read_frame> frame;\r
+ audio_frame_buffer_.pop(frame);\r
+ schedule_next_audio(safe_ptr<const core::read_frame>(frame)); \r
+ }\r
}\r
catch(...)\r
{\r
{\r
return context_->get_video_format_desc();\r
}\r
+\r
+ virtual size_t buffer_depth() const\r
+ {\r
+ return context_->buffer_size_;\r
+ }\r
}; \r
\r
safe_ptr<core::frame_consumer> create_decklink_consumer(const std::vector<std::wstring>& params) \r
{\r
configuration config;\r
\r
- config.external_key = ptree.get("external-key", false);\r
- config.low_latency = ptree.get("low-latency", false);\r
- config.key_only = ptree.get("key-only", false);\r
- config.device_index = ptree.get("device", 0);\r
- config.embedded_audio = ptree.get("embedded-audio", false);\r
+ config.external_key = ptree.get("external-key", config.external_key);\r
+ config.low_latency = ptree.get("low-latency", config.low_latency);\r
+ config.key_only = ptree.get("key-only", config.key_only);\r
+ config.device_index = ptree.get("device", config.device_index);\r
+ config.embedded_audio = ptree.get("embedded-audio", config.embedded_audio);\r
+ config.buffer_depth = ptree.get("buffer-depth", config.buffer_depth);\r
\r
return make_safe<decklink_consumer_proxy>(config);\r
}\r
}\r
};\r
\r
-safe_ptr<cg_producer> get_default_cg_producer(const safe_ptr<core::channel>& channel, int render_layer)\r
+safe_ptr<cg_producer> get_default_cg_producer(const safe_ptr<core::video_channel>& video_channel, int render_layer)\r
{ \r
- auto flash_producer = channel->producer()->foreground(render_layer).get();\r
+ auto flash_producer = video_channel->producer()->foreground(render_layer).get();\r
\r
if(flash_producer->print().find(L"flash") == std::string::npos)\r
{\r
- flash_producer = create_flash_producer(channel->mixer(), boost::assign::list_of(env::template_host())); \r
- channel->producer()->load(render_layer, flash_producer, true); \r
- channel->producer()->play(render_layer);\r
+ flash_producer = create_flash_producer(video_channel->mixer(), boost::assign::list_of(env::template_host())); \r
+ video_channel->producer()->load(render_layer, flash_producer, true); \r
+ video_channel->producer()->play(render_layer);\r
}\r
\r
return make_safe<cg_producer>(flash_producer);\r
#include <core/producer/frame_producer.h>\r
#include <core/producer/frame_producer_device.h>\r
#include <core/video_format.h>\r
-#include <core/channel.h>\r
+#include <core/video_channel.h>\r
\r
#include <string>\r
\r
struct implementation;\r
std::shared_ptr<implementation> impl_;\r
};\r
-safe_ptr<cg_producer> get_default_cg_producer(const safe_ptr<core::channel>& channel, int layer_index = cg_producer::DEFAULT_LAYER);\r
+safe_ptr<cg_producer> get_default_cg_producer(const safe_ptr<core::video_channel>& video_channel, int layer_index = cg_producer::DEFAULT_LAYER);\r
\r
safe_ptr<core::frame_producer> create_ct_producer(const safe_ptr<core::frame_factory> frame_factory, const std::vector<std::wstring>& params);\r
\r
if(!frame->audio_data().empty())\r
input_.push(std::vector<short>(frame->audio_data().begin(), frame->audio_data().end())); \r
else\r
- input_.push(std::vector<short>(static_cast<size_t>(48000.0f/format_desc_.fps)*2, 0)); \r
+ input_.push(std::vector<short>(3840, 0)); //static_cast<size_t>(48000.0f/format_desc_.fps)*2\r
}\r
\r
size_t buffer_depth() const{return 3;}\r
#include "../util/clientinfo.h"\r
\r
#include <core/consumer/frame_consumer.h>\r
-#include <core/channel.h>\r
+#include <core/video_channel.h>\r
\r
#include <boost/algorithm/string.hpp>\r
\r
void SetClientInfo(IO::ClientInfoPtr& s){pClientInfo_ = s;}\r
IO::ClientInfoPtr GetClientInfo(){return pClientInfo_;}\r
\r
- void SetChannel(const std::shared_ptr<core::channel>& pChannel){pChannel_ = pChannel;}\r
- std::shared_ptr<core::channel> GetChannel(){return pChannel_;}\r
+ void SetChannel(const std::shared_ptr<core::video_channel>& pChannel){pChannel_ = pChannel;}\r
+ std::shared_ptr<core::video_channel> GetChannel(){return pChannel_;}\r
\r
- void SetChannels(const std::vector<safe_ptr<core::channel>>& channels){channels_ = channels;}\r
- const std::vector<safe_ptr<core::channel>>& GetChannels() { return channels_; }\r
+ void SetChannels(const std::vector<safe_ptr<core::video_channel>>& channels){channels_ = channels;}\r
+ const std::vector<safe_ptr<core::video_channel>>& GetChannels() { return channels_; }\r
\r
void SetChannelIndex(unsigned int channelIndex){channelIndex_ = channelIndex;}\r
unsigned int GetChannelIndex(){return channelIndex_;}\r
unsigned int channelIndex_;\r
int layerIndex_;\r
IO::ClientInfoPtr pClientInfo_;\r
- std::shared_ptr<core::channel> pChannel_;\r
- std::vector<safe_ptr<core::channel>> channels_;\r
+ std::shared_ptr<core::video_channel> pChannel_;\r
+ std::vector<safe_ptr<core::video_channel>> channels_;\r
AMCPCommandScheduling scheduling_;\r
std::wstring replyString_;\r
};\r
\r
//std::function<std::wstring()> channel_cg_add_command::parse(const std::wstring& message, const std::vector<renderer::render_device_ptr>& channels)\r
//{\r
-// static boost::wregex expr(L"^CG\\s(?<CHANNEL>\\d+)-?(?<LAYER>\\d+)?\\sADD\\s(?<FLASH_LAYER>\\d+)\\s(?<TEMPLATE>\\S+)\\s?(?<START_LABEL>\\S\\S+)?\\s?(?<PLAY_ON_LOAD>\\d)?\\s?(?<DATA>.*)?");\r
+// static boost::wregex expr(L"^CG\\s(?<video_channel>\\d+)-?(?<LAYER>\\d+)?\\sADD\\s(?<FLASH_LAYER>\\d+)\\s(?<TEMPLATE>\\S+)\\s?(?<START_LABEL>\\S\\S+)?\\s?(?<PLAY_ON_LOAD>\\d)?\\s?(?<DATA>.*)?");\r
//\r
// boost::wsmatch what;\r
// if(!boost::regex_match(message, what, expr))\r
// std::wstring filename = templatename;\r
// filename.append(extension);\r
//\r
-// flash::get_default_cg_producer(info.channel, std::max<int>(DEFAULT_CHANNEL_LAYER+1, info.layer_index))\r
+// flash::get_default_cg_producer(info.video_channel, std::max<int>(DEFAULT_CHANNEL_LAYER+1, info.layer_index))\r
// ->add(flash_layer_index, filename, play_on_load, start_label, data);\r
//\r
// CASPAR_LOG(info) << L"Executed [amcp_channel_cg_add]";\r
std::wstring filename = _parameters[2];\r
filename.append(extension);\r
\r
- get_default_cg_producer(safe_ptr<core::channel>(GetChannel()), GetLayerIndex(cg_producer::DEFAULT_LAYER))->add(layer, filename, bDoStart, label, (pDataString!=0) ? pDataString : TEXT(""));\r
+ get_default_cg_producer(safe_ptr<core::video_channel>(GetChannel()), GetLayerIndex(cg_producer::DEFAULT_LAYER))->add(layer, filename, bDoStart, label, (pDataString!=0) ? pDataString : TEXT(""));\r
SetReplyString(TEXT("202 CG OK\r\n"));\r
}\r
else\r
return false;\r
}\r
int layer = _ttoi(_parameters[1].c_str());\r
- get_default_cg_producer(safe_ptr<core::channel>(GetChannel()), GetLayerIndex(cg_producer::DEFAULT_LAYER))->play(layer);\r
+ get_default_cg_producer(safe_ptr<core::video_channel>(GetChannel()), GetLayerIndex(cg_producer::DEFAULT_LAYER))->play(layer);\r
}\r
else\r
{\r
return false;\r
}\r
int layer = _ttoi(_parameters[1].c_str());\r
- get_default_cg_producer(safe_ptr<core::channel>(GetChannel()), GetLayerIndex(cg_producer::DEFAULT_LAYER))->stop(layer, 0);\r
+ get_default_cg_producer(safe_ptr<core::video_channel>(GetChannel()), GetLayerIndex(cg_producer::DEFAULT_LAYER))->stop(layer, 0);\r
}\r
else \r
{\r
return false;\r
}\r
int layer = _ttoi(_parameters[1].c_str());\r
- get_default_cg_producer(safe_ptr<core::channel>(GetChannel()), GetLayerIndex(cg_producer::DEFAULT_LAYER))->next(layer);\r
+ get_default_cg_producer(safe_ptr<core::video_channel>(GetChannel()), GetLayerIndex(cg_producer::DEFAULT_LAYER))->next(layer);\r
}\r
else \r
{\r
return false;\r
}\r
int layer = _ttoi(_parameters[1].c_str());\r
- get_default_cg_producer(safe_ptr<core::channel>(GetChannel()), GetLayerIndex(cg_producer::DEFAULT_LAYER))->remove(layer);\r
+ get_default_cg_producer(safe_ptr<core::video_channel>(GetChannel()), GetLayerIndex(cg_producer::DEFAULT_LAYER))->remove(layer);\r
}\r
else \r
{\r
}\r
int layer = _ttoi(_parameters[1].c_str());\r
//TODO: Implement indirect data loading from file. Same as in Add\r
- get_default_cg_producer(safe_ptr<core::channel>(GetChannel()), GetLayerIndex(cg_producer::DEFAULT_LAYER))->update(layer, _parameters[2]);\r
+ get_default_cg_producer(safe_ptr<core::video_channel>(GetChannel()), GetLayerIndex(cg_producer::DEFAULT_LAYER))->update(layer, _parameters[2]);\r
}\r
else \r
{\r
return false;\r
}\r
int layer = _ttoi(_parameters[1].c_str());\r
- get_default_cg_producer(safe_ptr<core::channel>(GetChannel()), GetLayerIndex(cg_producer::DEFAULT_LAYER))->invoke(layer, _parameters[2]);\r
+ get_default_cg_producer(safe_ptr<core::video_channel>(GetChannel()), GetLayerIndex(cg_producer::DEFAULT_LAYER))->invoke(layer, _parameters[2]);\r
}\r
else \r
{\r
return false;\r
}\r
\r
-void GenerateChannelInfo(int index, const safe_ptr<core::channel>& pChannel, std::wstringstream& replyString)\r
+void GenerateChannelInfo(int index, const safe_ptr<core::video_channel>& pChannel, std::wstringstream& replyString)\r
{\r
replyString << index+1 << TEXT(" ") << pChannel->get_video_format_desc().name << TEXT(" PLAYING") << TEXT("\r\n");\r
}\r
{\r
public:\r
std::wstring print() const { return L"InfoCommand";}\r
- InfoCommand(const std::vector<safe_ptr<core::channel>>& channels) : channels_(channels){}\r
+ InfoCommand(const std::vector<safe_ptr<core::video_channel>>& channels) : channels_(channels){}\r
bool DoExecute();\r
private:\r
- const std::vector<safe_ptr<core::channel>>& channels_;\r
+ const std::vector<safe_ptr<core::video_channel>>& channels_;\r
};\r
\r
class VersionCommand : public AMCPCommandBase<false, AddToQueue, 0>\r
\r
const std::wstring AMCPProtocolStrategy::MessageDelimiter = TEXT("\r\n");\r
\r
-inline std::shared_ptr<core::channel> GetChannelSafe(unsigned int index, const std::vector<safe_ptr<core::channel>>& channels)\r
+inline std::shared_ptr<core::video_channel> GetChannelSafe(unsigned int index, const std::vector<safe_ptr<core::video_channel>>& channels)\r
{\r
- return index < channels.size() ? std::shared_ptr<core::channel>(channels[index]) : nullptr;\r
+ return index < channels.size() ? std::shared_ptr<core::video_channel>(channels[index]) : nullptr;\r
}\r
\r
-AMCPProtocolStrategy::AMCPProtocolStrategy(const std::vector<safe_ptr<core::channel>>& channels) : channels_(channels) {\r
+AMCPProtocolStrategy::AMCPProtocolStrategy(const std::vector<safe_ptr<core::video_channel>>& channels) : channels_(channels) {\r
AMCPCommandQueuePtr pGeneralCommandQueue(new AMCPCommandQueue());\r
if(!pGeneralCommandQueue->Start()) {\r
CASPAR_LOG(error) << "Failed to start the general command-queue";\r
commandQueues_.push_back(pGeneralCommandQueue);\r
\r
\r
- std::shared_ptr<core::channel> pChannel;\r
+ std::shared_ptr<core::video_channel> pChannel;\r
unsigned int index = -1;\r
- //Create a commandpump for each channel\r
+ //Create a commandpump for each video_channel\r
while((pChannel = GetChannelSafe(++index, channels_)) != 0) {\r
AMCPCommandQueuePtr pChannelCommandQueue(new AMCPCommandQueue());\r
- std::wstring title = TEXT("CHANNEL ");\r
+ std::wstring title = TEXT("video_channel ");\r
\r
//HACK: Perform real conversion from int to string\r
TCHAR num = TEXT('1')+static_cast<TCHAR>(index);\r
goto ParseFinnished;\r
}\r
\r
- std::shared_ptr<core::channel> pChannel = GetChannelSafe(channelIndex, channels_);\r
+ std::shared_ptr<core::video_channel> pChannel = GetChannelSafe(channelIndex, channels_);\r
if(pChannel == 0) {\r
goto ParseFinnished;\r
}\r
#pragma once\r
\r
#include "../util/protocolstrategy.h"\r
-#include <core/channel.h>\r
+#include <core/video_channel.h>\r
\r
#include "AMCPCommand.h"\r
#include "AMCPCommandQueue.h"\r
AMCPProtocolStrategy& operator=(const AMCPProtocolStrategy&);\r
\r
public:\r
- AMCPProtocolStrategy(const std::vector<safe_ptr<core::channel>>& channels);\r
+ AMCPProtocolStrategy(const std::vector<safe_ptr<core::video_channel>>& channels);\r
virtual ~AMCPProtocolStrategy();\r
\r
virtual void Parse(const TCHAR* pData, int charCount, IO::ClientInfoPtr pClientInfo);\r
\r
bool QueueCommand(AMCPCommandPtr);\r
\r
- std::vector<safe_ptr<core::channel>> channels_;\r
+ std::vector<safe_ptr<core::video_channel>> channels_;\r
std::vector<AMCPCommandQueuePtr> commandQueues_;\r
static const std::wstring MessageDelimiter;\r
};\r
const std::wstring CIIProtocolStrategy::MessageDelimiter = TEXT("\r\n");\r
const TCHAR CIIProtocolStrategy::TokenDelimiter = TEXT('\\');\r
\r
-CIIProtocolStrategy::CIIProtocolStrategy(const std::vector<safe_ptr<core::channel>>& channels) : pChannel_(channels.at(0)), executor_(L"CIIProtocolStrategy")\r
+CIIProtocolStrategy::CIIProtocolStrategy(const std::vector<safe_ptr<core::video_channel>>& channels) : pChannel_(channels.at(0)), executor_(L"CIIProtocolStrategy")\r
{\r
}\r
\r
\r
#pragma once\r
\r
-#include <core/channel.h>\r
+#include <core/video_channel.h>\r
\r
#include "../util/ProtocolStrategy.h"\r
#include "CIICommand.h"\r
class CIIProtocolStrategy : public IO::IProtocolStrategy\r
{\r
public:\r
- CIIProtocolStrategy(const std::vector<safe_ptr<core::channel>>& channels);\r
+ CIIProtocolStrategy(const std::vector<safe_ptr<core::video_channel>>& channels);\r
\r
void Parse(const TCHAR* pData, int charCount, IO::ClientInfoPtr pClientInfo);\r
UINT GetCodepage() {return 28591;} //ISO 8859-1\r
\r
void SetProfile(const std::wstring& profile) {currentProfile_ = profile;}\r
\r
- safe_ptr<core::channel> GetChannel() const{return this->pChannel_;}\r
+ safe_ptr<core::video_channel> GetChannel() const{return this->pChannel_;}\r
\r
void DisplayMediaFile(const std::wstring& filename);\r
void DisplayTemplate(const std::wstring& titleName);\r
std::wstring currentMessage_;\r
\r
std::wstring currentProfile_;\r
- safe_ptr<core::channel> pChannel_;\r
+ safe_ptr<core::video_channel> pChannel_;\r
};\r
\r
}}}
\ No newline at end of file
\r
namespace caspar { namespace protocol { namespace CLK {\r
\r
-CLKProtocolStrategy::CLKProtocolStrategy(const std::vector<safe_ptr<core::channel>>& channels) \r
+CLKProtocolStrategy::CLKProtocolStrategy(const std::vector<safe_ptr<core::video_channel>>& channels) \r
: currentState_(ExpectingNewCommand), bClockLoaded_(false), pChannel_(channels.at(0))\r
{}\r
\r
\r
#include "CLKCommand.h"\r
#include "../util/ProtocolStrategy.h"\r
-#include <core/channel.h>\r
+#include <core/video_channel.h>\r
\r
namespace caspar { namespace protocol { namespace CLK {\r
\r
class CLKProtocolStrategy : public IO::IProtocolStrategy\r
{\r
public:\r
- CLKProtocolStrategy(const std::vector<safe_ptr<core::channel>>& channels);\r
+ CLKProtocolStrategy(const std::vector<safe_ptr<core::video_channel>>& channels);\r
\r
void Parse(const TCHAR* pData, int charCount, IO::ClientInfoPtr pClientInfo);\r
UINT GetCodepage() { return 28591; } //ISO 8859-1\r
CLKCommand currentCommand_;\r
std::wstringstream currentCommandString_;\r
\r
- safe_ptr<core::channel> pChannel_;\r
+ safe_ptr<core::video_channel> pChannel_;\r
\r
bool bClockLoaded_;\r
};\r
#include <common/utility/string.h>\r
\r
#include <core/mixer/gpu/ogl_device.h>\r
-#include <core/channel.h>\r
+#include <core/video_channel.h>\r
\r
#include <modules/bluefish/bluefish.h>\r
#include <modules/decklink/decklink.h>\r
struct server::implementation : boost::noncopyable\r
{\r
std::vector<safe_ptr<IO::AsyncEventServer>> async_servers_; \r
- std::vector<safe_ptr<channel>> channels_;\r
+ std::vector<safe_ptr<video_channel>> channels_;\r
ogl_device ogl_;\r
\r
implementation() \r
if(format_desc.format == video_format::invalid)\r
BOOST_THROW_EXCEPTION(caspar_exception() << msg_info("Invalid video-mode."));\r
\r
- channels_.push_back(channel(channels_.size(), format_desc, ogl_));\r
+ channels_.push_back(video_channel(channels_.size(), format_desc, ogl_));\r
\r
int index = 0;\r
BOOST_FOREACH(auto& xml_consumer, xml_channel.second.get_child("consumers"))\r
else if(name == "file") \r
channels_.back()->consumer()->add(index++, create_ffmpeg_consumer(xml_consumer.second)); \r
else if(name == "audio")\r
- channels_.back()->consumer()->add(index++, oal_consumer()); \r
+ channels_.back()->consumer()->add(index++, make_safe<oal_consumer>()); \r
else if(name != "<xmlcomment>")\r
CASPAR_LOG(warning) << "Invalid consumer: " << widen(name); \r
}\r
\r
server::server() : impl_(new implementation()){}\r
\r
-const std::vector<safe_ptr<channel>> server::get_channels() const\r
+const std::vector<safe_ptr<video_channel>> server::get_channels() const\r
{\r
return impl_->channels_;\r
}\r
\r
#pragma once\r
\r
-#include <core/channel.h>\r
+#include <core/video_channel.h>\r
\r
#include <common/exception/exceptions.h>\r
\r
public:\r
server();\r
\r
- const std::vector<safe_ptr<core::channel>> get_channels() const;\r
+ const std::vector<safe_ptr<core::video_channel>> get_channels() const;\r
\r
private:\r
struct implementation;\r