\r
#include "channel.h"\r
\r
+#include "channel_context.h"\r
+\r
#include "video_format.h"\r
#include "producer/layer.h"\r
\r
\r
struct channel::implementation : boost::noncopyable\r
{\r
- const int index_;\r
- video_format_desc format_desc_;\r
- \r
- ogl_device& ogl_;\r
- executor context_;\r
- executor destroy_context_;\r
+ channel_context context_;\r
\r
std::shared_ptr<frame_consumer_device> consumer_;\r
std::shared_ptr<frame_mixer_device> mixer_;\r
\r
public:\r
implementation(int index, const video_format_desc& format_desc, ogl_device& ogl) \r
- : index_(index)\r
- , format_desc_(format_desc)\r
- , ogl_(ogl)\r
- , consumer_(new frame_consumer_device(context_, format_desc, ogl))\r
- , mixer_(new frame_mixer_device(context_, format_desc, [=](const safe_ptr<read_frame>& frame){consumer_->send(frame);}, ogl))\r
- , producer_(new frame_producer_device(context_, destroy_context_, format_desc_, [=](const std::map<int, safe_ptr<basic_frame>>& frames){mixer_->send(frames);})) \r
- , context_(print() + L"/render")\r
- , destroy_context_(print() + L"/destroy")\r
+ : context_(index, ogl, format_desc)\r
+ , consumer_(new frame_consumer_device(context_))\r
+ , mixer_(new frame_mixer_device(context_, [=](const safe_ptr<read_frame>& frame){consumer_->send(frame);}))\r
+ , producer_(new frame_producer_device(context_, [=](const std::map<int, safe_ptr<basic_frame>>& frames){mixer_->send(frames);})) \r
{\r
- context_.set_priority_class(above_normal_priority_class);\r
- destroy_context_.set_priority_class(below_normal_priority_class);\r
CASPAR_LOG(info) << print() << " Successfully Initialized.";\r
}\r
\r
~implementation()\r
{\r
// Stop context before destroying devices.\r
- context_.stop();\r
- context_.join();\r
+ context_.execution.stop();\r
+ context_.execution.join();\r
}\r
\r
std::wstring print() const\r
{\r
- return L"channel[" + boost::lexical_cast<std::wstring>(index_+1) + L"-" + format_desc_.name + L"]";\r
+ return context_.print();\r
}\r
\r
void set_video_format_desc(const video_format_desc& format_desc)\r
{\r
- format_desc_ = format_desc;\r
- producer_.reset();\r
- mixer_.reset();\r
-\r
- consumer_->set_video_format_desc(format_desc_);\r
- mixer_ = std::make_shared<frame_mixer_device>(context_, format_desc_, [=](const safe_ptr<read_frame>& frame){consumer_->send(frame);}, ogl_);\r
- producer_ = std::make_shared<frame_producer_device>(context_, destroy_context_, format_desc_, [=](const std::map<int, safe_ptr<basic_frame>>& frames){mixer_->send(frames);});\r
+ context_.execution.begin_invoke([=]\r
+ {\r
+ context_.format_desc = format_desc;\r
+ });\r
}\r
};\r
\r
safe_ptr<frame_producer_device> channel::producer() { return make_safe(impl_->producer_);} \r
safe_ptr<frame_mixer_device> channel::mixer() { return make_safe(impl_->mixer_);} \r
safe_ptr<frame_consumer_device> channel::consumer() { return make_safe(impl_->consumer_);} \r
-const video_format_desc& channel::get_video_format_desc() const{return impl_->format_desc_;}\r
+const video_format_desc& channel::get_video_format_desc() const{return impl_->context_.format_desc;}\r
void channel::set_video_format_desc(const video_format_desc& format_desc){impl_->set_video_format_desc(format_desc);}\r
std::wstring channel::print() const { return impl_->print();}\r
\r
--- /dev/null
+#pragma once\r
+\r
+#include <common/concurrency/executor.h>\r
+\r
+#include <core/mixer/gpu/ogl_device.h>\r
+#include <core/video_format.h>\r
+\r
+#include <boost/noncopyable.hpp>\r
+#include <boost/lexical_cast.hpp>\r
+\r
+#include <string>\r
+\r
+namespace caspar { namespace core {\r
+\r
+struct channel_context\r
+{\r
+ channel_context(int index, ogl_device& ogl, const video_format_desc& format_desc) \r
+ : index(index)\r
+ , execution(print() + L"/execution")\r
+ , destruction(print() + L"/destruction")\r
+ , ogl(ogl)\r
+ , format_desc(format_desc)\r
+ {\r
+ execution.set_priority_class(above_normal_priority_class);\r
+ destruction.set_priority_class(below_normal_priority_class);\r
+ }\r
+\r
+ const int index;\r
+ executor execution;\r
+ executor destruction;\r
+ ogl_device& ogl;\r
+ video_format_desc format_desc;\r
+\r
+ std::wstring print() const\r
+ {\r
+ return L"channel[" + boost::lexical_cast<std::wstring>(index+1) + L"-" + format_desc.name + L"]";\r
+ }\r
+};\r
+ \r
+}}
\ No newline at end of file
*/\r
#pragma once\r
\r
+#include "../video_format.h"\r
+\r
#include <common/memory/safe_ptr.h>\r
\r
#include <boost/noncopyable.hpp>\r
virtual void initialize(const video_format_desc& format_desc) = 0;\r
virtual std::wstring print() const = 0;\r
virtual bool has_synchronization_clock() const {return true;}\r
+ virtual const core::video_format_desc& get_video_format_desc() const = 0; // nothrow\r
\r
static const safe_ptr<frame_consumer>& empty()\r
{\r
struct empty_frame_consumer : public frame_consumer\r
{\r
+ core::video_format_desc format_desc;\r
virtual void send(const safe_ptr<const read_frame>&){}\r
virtual size_t buffer_depth() const{return 0;}\r
virtual void initialize(const video_format_desc&){}\r
virtual std::wstring print() const {return L"empty";}\r
virtual bool has_synchronization_clock() const {return false;}\r
+ virtual const core::video_format_desc& get_video_format_desc() const{return format_desc;}; // nothrow\r
};\r
static safe_ptr<frame_consumer> consumer = make_safe<empty_frame_consumer>();\r
return consumer;\r
\r
#include "frame_consumer_device.h"\r
\r
+#include "../channel_context.h"\r
+\r
#include "../video_format.h"\r
#include "../mixer/gpu/ogl_device.h"\r
#include "../mixer/read_frame.h"\r
{ \r
typedef std::pair<safe_ptr<const read_frame>, safe_ptr<const read_frame>> fill_and_key;\r
\r
+ channel_context& channel_;\r
+\r
boost::circular_buffer<fill_and_key> buffer_;\r
\r
std::map<int, safe_ptr<frame_consumer>> consumers_;\r
\r
high_prec_timer timer_;\r
-\r
- video_format_desc format_desc_;\r
-\r
+ \r
safe_ptr<diagnostics::graph> diag_;\r
\r
boost::timer frame_timer_;\r
boost::timer tick_timer_;\r
-\r
- ogl_device& ogl_;\r
- \r
- executor& context_; \r
+ \r
public:\r
- implementation(executor& context, const video_format_desc& format_desc, ogl_device& ogl) \r
- : format_desc_(format_desc)\r
+ implementation(channel_context& channel) \r
+ : channel_(channel)\r
, diag_(diagnostics::create_graph(std::string("frame_consumer_device")))\r
- , ogl_(ogl)\r
- , context_(context)\r
{ \r
diag_->set_color("input-buffer", diagnostics::color(1.0f, 1.0f, 0.0f)); \r
diag_->add_guide("frame-time", 0.5f); \r
diag_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
diag_->set_color("tick-time", diagnostics::color(0.1f, 0.7f, 0.8f));\r
-\r
- context_.set_priority_class(above_normal_priority_class);\r
}\r
\r
void add(int index, safe_ptr<frame_consumer>&& consumer)\r
{ \r
- consumer->initialize(format_desc_);\r
- context_.invoke([&]\r
+ consumer->initialize(channel_.format_desc);\r
+ channel_.execution.invoke([&]\r
{\r
buffer_.set_capacity(std::max(buffer_.capacity(), consumer->buffer_depth()));\r
\r
\r
void remove(int index)\r
{\r
- context_.invoke([&]\r
+ channel_.execution.invoke([&]\r
{\r
auto it = consumers_.find(index);\r
if(it != consumers_.end())\r
if(has_key_only)\r
{\r
// Currently do key_only transform on cpu. Unsure if the extra 400MB/s (1080p50) overhead is worth it to do it on gpu.\r
- auto key_data = ogl_.create_host_buffer(frame->image_data().size(), host_buffer::write_only); \r
+ auto key_data = channel_.ogl.create_host_buffer(frame->image_data().size(), host_buffer::write_only); \r
fast_memsfhl(key_data->data(), frame->image_data().begin(), frame->image_data().size(), 0x0F0F0F0F, 0x0B0B0B0B, 0x07070707, 0x03030303);\r
std::vector<int16_t> audio_data(frame->audio_data().begin(), frame->audio_data().end());\r
return make_safe<read_frame>(std::move(key_data), std::move(audio_data));\r
\r
void send(const safe_ptr<read_frame>& frame)\r
{ \r
- context_.invoke([=]\r
+ channel_.execution.invoke([=]\r
{\r
if(!has_synchronization_clock())\r
- timer_.tick(1.0/format_desc_.fps);\r
+ timer_.tick(1.0/channel_.format_desc.fps);\r
\r
- diag_->set_value("input-buffer", static_cast<float>(context_.size())/static_cast<float>(context_.capacity()));\r
+ diag_->set_value("input-buffer", static_cast<float>(channel_.execution.size())/static_cast<float>(channel_.execution.capacity()));\r
frame_timer_.restart();\r
\r
buffer_.push_back(std::make_pair(frame, get_key_frame(frame)));\r
\r
for_each_consumer([&](safe_ptr<frame_consumer>& consumer)\r
{\r
+ if(consumer->get_video_format_desc() != channel_.format_desc)\r
+ consumer->initialize(channel_.format_desc);\r
+\r
auto pair = buffer_[consumer->buffer_depth()-1];\r
- \r
- consumer->send(consumer->key_only() ? pair.second : pair.first);\r
+ auto frame = consumer->key_only() ? pair.second : pair.first;\r
+\r
+ if(static_cast<size_t>(frame->image_data().size()) == consumer->get_video_format_desc().size)\r
+ consumer->send(frame);\r
});\r
\r
- diag_->update_value("frame-time", frame_timer_.elapsed()*format_desc_.fps*0.5);\r
+ diag_->update_value("frame-time", frame_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
\r
- diag_->update_value("tick-time", tick_timer_.elapsed()*format_desc_.fps*0.5);\r
+ diag_->update_value("tick-time", tick_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
tick_timer_.restart();\r
});\r
- diag_->set_value("input-buffer", static_cast<float>(context_.size())/static_cast<float>(context_.capacity()));\r
+ diag_->set_value("input-buffer", static_cast<float>(channel_.execution.size())/static_cast<float>(channel_.execution.capacity()));\r
}\r
- \r
- void set_video_format_desc(const video_format_desc& format_desc)\r
- {\r
- context_.invoke([&]\r
- {\r
- format_desc_ = format_desc;\r
- buffer_.clear();\r
- \r
- for_each_consumer([&](safe_ptr<frame_consumer>& consumer)\r
- {\r
- consumer->initialize(format_desc_);\r
- });\r
- });\r
- }\r
- \r
+ \r
void for_each_consumer(const std::function<void(safe_ptr<frame_consumer>& consumer)>& func)\r
{\r
auto it = consumers_.begin();\r
}\r
};\r
\r
-frame_consumer_device::frame_consumer_device(executor& context, const video_format_desc& format_desc, ogl_device& ogl) \r
- : impl_(new implementation(context, format_desc, ogl)){}\r
+frame_consumer_device::frame_consumer_device(channel_context& channel) \r
+ : impl_(new implementation(channel)){}\r
void frame_consumer_device::add(int index, safe_ptr<frame_consumer>&& consumer){impl_->add(index, std::move(consumer));}\r
void frame_consumer_device::remove(int index){impl_->remove(index);}\r
void frame_consumer_device::send(const safe_ptr<read_frame>& future_frame) { impl_->send(future_frame); }\r
-void frame_consumer_device::set_video_format_desc(const video_format_desc& format_desc){impl_->set_video_format_desc(format_desc);}\r
}}
\ No newline at end of file
class basic_frame;\r
struct video_format_desc;\r
class ogl_device;\r
+struct channel_context;\r
\r
class frame_consumer_device : boost::noncopyable\r
{\r
public:\r
- explicit frame_consumer_device(executor& context, const video_format_desc& format_desc, ogl_device& ogl);\r
+ explicit frame_consumer_device(channel_context& channel);\r
\r
void add(int index, safe_ptr<frame_consumer>&& consumer);\r
void remove(int index);\r
\r
void send(const safe_ptr<read_frame>& future_frame); // nothrow\r
- \r
- void set_video_format_desc(const video_format_desc& format_desc);\r
private:\r
struct implementation;\r
safe_ptr<implementation> impl_;\r
</ItemDefinitionGroup>\r
<ItemGroup>\r
<ClInclude Include="channel.h" />\r
+ <ClInclude Include="channel_context.h" />\r
<ClInclude Include="consumer\frame_consumer_device.h" />\r
<ClInclude Include="consumer\frame_consumer.h" />\r
<ClInclude Include="mixer\audio\audio_mixer.h" />\r
<ClInclude Include="mixer\frame_mixer_device.h">\r
<Filter>mixer</Filter>\r
</ClInclude>\r
+ <ClInclude Include="channel_context.h" />\r
</ItemGroup>\r
<ItemGroup>\r
<ClCompile Include="producer\transition\transition_producer.cpp">\r
\r
struct audio_mixer::implementation\r
{\r
- std::vector<int16_t> audio_data_;\r
+ std::deque<std::vector<int16_t>> audio_data_;\r
std::stack<core::audio_transform> transform_stack_;\r
\r
std::map<int, core::audio_transform> prev_audio_transforms_;\r
implementation()\r
{\r
transform_stack_.push(core::audio_transform());\r
+ audio_data_.push_back(std::vector<int16_t>());\r
}\r
\r
void begin(const core::basic_frame& frame)\r
auto& audio_data = frame.audio_data();\r
auto tag = frame.tag(); // Get the identifier for the audio-stream.\r
\r
- if(audio_data_.empty())\r
- audio_data_.resize(audio_data.size(), 0);\r
+ if(audio_data_.back().empty())\r
+ audio_data_.back().resize(audio_data.size(), 0);\r
\r
auto next = transform_stack_.top();\r
auto prev = next;\r
auto next_gain = static_cast<int>(next.get_gain()*BASE);\r
auto prev_gain = static_cast<int>(prev.get_gain()*BASE);\r
\r
- int n_samples = audio_data_.size();\r
+ int n_samples = audio_data_.back().size();\r
\r
tbb::parallel_for\r
(\r
\r
int sample = (static_cast<int>(audio_data[n])*sample_gain)/BASE;\r
\r
- audio_data_[n] = static_cast<int16_t>((static_cast<int>(audio_data_[n]) + sample) & 0xFFFF);\r
+ audio_data_.back()[n] = static_cast<int16_t>((static_cast<int>(audio_data_.back()[n]) + sample) & 0xFFFF);\r
}\r
}\r
);\r
}\r
\r
-\r
void begin(const core::audio_transform& transform)\r
{\r
transform_stack_.push(transform_stack_.top()*transform);\r
{\r
transform_stack_.pop();\r
}\r
-\r
-\r
+ \r
std::vector<int16_t> mix()\r
{\r
prev_audio_transforms_ = std::move(next_audio_transforms_); \r
- return std::move(audio_data_);\r
+ auto result = std::move(audio_data_.front());\r
+ audio_data_.pop_front();\r
+ audio_data_.push_back(std::vector<int16_t>());\r
+ return std::move(result);\r
}\r
};\r
\r
#include "audio/audio_mixer.h"\r
#include "image/image_mixer.h"\r
\r
+#include "../channel_context.h"\r
+\r
#include <common/exception/exceptions.h>\r
#include <common/concurrency/executor.h>\r
#include <common/diagnostics/graph.h>\r
\r
struct frame_mixer_device::implementation : boost::noncopyable\r
{ \r
- const core::video_format_desc format_desc_;\r
+ channel_context& channel_;\r
\r
safe_ptr<diagnostics::graph> diag_;\r
boost::timer frame_timer_;\r
\r
boost::fusion::map<boost::fusion::pair<core::image_transform, tweened_transform<core::image_transform>>,\r
boost::fusion::pair<core::audio_transform, tweened_transform<core::audio_transform>>> root_transforms_;\r
- \r
- executor& context_;\r
public:\r
- implementation(executor& context, const core::video_format_desc& format_desc, const output_t& output, ogl_device& ogl) \r
- : format_desc_(format_desc)\r
+ implementation(channel_context& channel, const output_t& output) \r
+ : channel_(channel)\r
, diag_(diagnostics::create_graph(narrow(print())))\r
- , image_mixer_(format_desc, ogl)\r
+ , image_mixer_(channel_)\r
, output_(output)\r
- , context_(context)\r
{\r
diag_->add_guide("frame-time", 0.5f); \r
diag_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
CASPAR_LOG(info) << print() << L" Successfully initialized."; \r
}\r
\r
- boost::unique_future<safe_ptr<host_buffer>> mix_image(std::map<int, safe_ptr<core::basic_frame>> frames)\r
+ safe_ptr<host_buffer> mix_image(std::map<int, safe_ptr<core::basic_frame>> frames)\r
{ \r
auto& root_image_transform = boost::fusion::at_key<core::image_transform>(root_transforms_);\r
auto& image_transforms = boost::fusion::at_key<core::image_transform>(transforms_);\r
{\r
image_mixer_.begin_layer();\r
\r
- if(format_desc_.mode != core::video_mode::progressive)\r
+ if(channel_.format_desc.mode != core::video_mode::progressive)\r
{\r
auto frame1 = make_safe<core::basic_frame>(frame.second);\r
auto frame2 = make_safe<core::basic_frame>(frame.second);\r
frame2->get_image_transform() = root_image_transform.fetch_and_tick(1)*image_transforms[frame.first].fetch_and_tick(1);\r
\r
if(frame1->get_image_transform() != frame2->get_image_transform())\r
- core::basic_frame::interlace(frame1, frame2, format_desc_.mode)->accept(image_mixer_);\r
+ core::basic_frame::interlace(frame1, frame2, channel_.format_desc.mode)->accept(image_mixer_);\r
else\r
frame2->accept(image_mixer_);\r
}\r
\r
BOOST_FOREACH(auto& frame, frames)\r
{\r
- const unsigned int num = format_desc_.mode == core::video_mode::progressive ? 1 : 2;\r
+ const unsigned int num = channel_.format_desc.mode == core::video_mode::progressive ? 1 : 2;\r
\r
auto frame1 = make_safe<core::basic_frame>(frame.second);\r
frame1->get_audio_transform() = root_audio_transform.fetch_and_tick(num)*audio_transforms[frame.first].fetch_and_tick(num);\r
\r
void send(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
{ \r
- context_.invoke([=]\r
+ channel_.execution.invoke([=]\r
{ \r
- diag_->set_value("input-buffer", static_cast<float>(context_.size())/static_cast<float>(context_.capacity())); \r
+ diag_->set_value("input-buffer", static_cast<float>(channel_.execution.size())/static_cast<float>(channel_.execution.capacity())); \r
frame_timer_.restart();\r
\r
auto image = mix_image(frames);\r
auto audio = mix_audio(frames);\r
\r
- diag_->update_value("frame-time", frame_timer_.elapsed()*format_desc_.fps*0.5);\r
+ diag_->update_value("frame-time", frame_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
\r
output_(make_safe<read_frame>(std::move(image), std::move(audio)));\r
\r
- diag_->update_value("tick-time", tick_timer_.elapsed()*format_desc_.fps*0.5);\r
+ diag_->update_value("tick-time", tick_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
tick_timer_.restart();\r
});\r
- diag_->set_value("input-buffer", static_cast<float>(context_.size())/static_cast<float>(context_.capacity()));\r
+ diag_->set_value("input-buffer", static_cast<float>(channel_.execution.size())/static_cast<float>(channel_.execution.capacity()));\r
}\r
\r
safe_ptr<core::write_frame> create_frame(void* tag, const core::pixel_format_desc& desc)\r
template<typename T> \r
void set_transform(const T& transform, unsigned int mix_duration, const std::wstring& tween)\r
{\r
- context_.invoke([&]\r
+ channel_.execution.invoke([&]\r
{\r
auto& root = boost::fusion::at_key<T>(root_transforms_);\r
\r
template<typename T>\r
void set_transform(int index, const T& transform, unsigned int mix_duration, const std::wstring& tween)\r
{\r
- context_.invoke([&]\r
+ channel_.execution.invoke([&]\r
{\r
auto& transforms = boost::fusion::at_key<T>(transforms_);\r
\r
template<typename T>\r
void apply_transform(const std::function<T(const T&)>& transform, unsigned int mix_duration, const std::wstring& tween)\r
{\r
- return context_.invoke([&]\r
+ return channel_.execution.invoke([&]\r
{\r
auto& root = boost::fusion::at_key<T>(root_transforms_);\r
\r
template<typename T>\r
void apply_transform(int index, const std::function<T(T)>& transform, unsigned int mix_duration, const std::wstring& tween)\r
{\r
- context_.invoke([&]\r
+ channel_.execution.invoke([&]\r
{\r
auto& transforms = boost::fusion::at_key<T>(transforms_);\r
\r
template<typename T>\r
void reset_transform(unsigned int mix_duration, const std::wstring& tween)\r
{\r
- context_.invoke([&]\r
+ channel_.execution.invoke([&]\r
{\r
auto& transforms = boost::fusion::at_key<T>(transforms_);\r
\r
template<typename T>\r
void reset_transform(int index, unsigned int mix_duration, const std::wstring& tween)\r
{\r
- context_.invoke([&]\r
+ channel_.execution.invoke([&]\r
{ \r
set_transform(T(), mix_duration, tween);\r
});\r
}\r
};\r
\r
-frame_mixer_device::frame_mixer_device(executor& context, const core::video_format_desc& format_desc, const output_t& output, ogl_device& ogl)\r
- : impl_(new implementation(context, format_desc, output, ogl)){}\r
+frame_mixer_device::frame_mixer_device(channel_context& channel, const output_t& output)\r
+ : impl_(new implementation(channel, output)){}\r
void frame_mixer_device::send(const std::map<int, safe_ptr<core::basic_frame>>& frames){impl_->send(frames);}\r
-const core::video_format_desc& frame_mixer_device::get_video_format_desc() const { return impl_->format_desc_; }\r
+const core::video_format_desc& frame_mixer_device::get_video_format_desc() const { return impl_->channel_.format_desc; }\r
safe_ptr<core::write_frame> frame_mixer_device::create_frame(void* tag, const core::pixel_format_desc& desc){ return impl_->create_frame(tag, desc); } \r
safe_ptr<core::write_frame> frame_mixer_device::create_frame(void* tag, size_t width, size_t height, core::pixel_format::type pix_fmt)\r
{\r
desc.planes.push_back( core::pixel_format_desc::plane(width, height, 4));\r
return create_frame(tag, desc);\r
}\r
- \r
-safe_ptr<core::write_frame> frame_mixer_device::create_frame(void* tag, core::pixel_format::type pix_fmt)\r
-{\r
- // Create bgra frame with output resolution\r
- core::pixel_format_desc desc;\r
- desc.pix_fmt = pix_fmt;\r
- desc.planes.push_back( core::pixel_format_desc::plane(get_video_format_desc().width, get_video_format_desc().height, 4));\r
- return create_frame(tag, desc);\r
-}\r
void frame_mixer_device::set_image_transform(const core::image_transform& transform, unsigned int mix_duration, const std::wstring& tween){impl_->set_transform<core::image_transform>(transform, mix_duration, tween);}\r
void frame_mixer_device::set_image_transform(int index, const core::image_transform& transform, unsigned int mix_duration, const std::wstring& tween){impl_->set_transform<core::image_transform>(index, transform, mix_duration, tween);}\r
void frame_mixer_device::set_audio_transform(const core::audio_transform& transform, unsigned int mix_duration, const std::wstring& tween){impl_->set_transform<core::audio_transform>(transform, mix_duration, tween);}\r
class audio_transform;\r
class image_transform;\r
class ogl_device;\r
+struct channel_context;\r
\r
class frame_mixer_device : public core::frame_factory\r
{\r
public: \r
typedef std::function<void(const safe_ptr<core::read_frame>&)> output_t;\r
\r
- frame_mixer_device(executor& context, const core::video_format_desc& format_desc, const output_t& output, ogl_device& ogl);\r
+ frame_mixer_device(channel_context& channel, const output_t& output);\r
\r
void send(const std::map<int, safe_ptr<core::basic_frame>>& frames); // nothrow\r
\r
safe_ptr<core::write_frame> create_frame(void* tag, const core::pixel_format_desc& desc); \r
- safe_ptr<core::write_frame> create_frame(void* tag, size_t width, size_t height, core::pixel_format::type pix_fmt = core::pixel_format::bgra); \r
- safe_ptr<core::write_frame> create_frame(void* tag, core::pixel_format::type pix_fmt = core::pixel_format::bgra);\r
+ safe_ptr<core::write_frame> create_frame(void* tag, size_t width, size_t height, core::pixel_format::type pix_fmt = core::pixel_format::bgra); \r
\r
const core::video_format_desc& get_video_format_desc() const; // nothrow\r
\r
executor_.yield();\r
}\r
\r
+void ogl_device::gc()\r
+{\r
+ //begin_invoke([=]\r
+ //{ \r
+ // BOOST_FOREACH(auto& pool, device_pools_)\r
+ // pool.clear();\r
+ // BOOST_FOREACH(auto& pool, host_pools_)\r
+ // pool.clear();\r
+ //});\r
+}\r
+\r
std::wstring ogl_device::get_version()\r
{ \r
static std::wstring ver;\r
safe_ptr<device_buffer> create_device_buffer(size_t width, size_t height, size_t stride);\r
safe_ptr<host_buffer> create_host_buffer(size_t size, host_buffer::usage_t usage);\r
void yield();\r
+ void gc();\r
\r
static std::wstring get_version();\r
};\r
#include "../gpu/device_buffer.h"\r
#include "../write_frame.h"\r
\r
+#include "../../channel_context.h"\r
+\r
#include <common/concurrency/executor.h>\r
#include <common/exception/exceptions.h>\r
#include <common/gl/gl_check.h>\r
core::image_transform transform;\r
};\r
\r
- const core::video_format_desc format_desc_;\r
+ channel_context& channel_;\r
\r
std::stack<core::image_transform> transform_stack_;\r
std::queue<std::queue<render_item>> render_queue_;\r
\r
image_kernel kernel_;\r
- \r
- ogl_device& ogl_;\r
- \r
+ \r
safe_ptr<host_buffer> read_buffer_;\r
safe_ptr<device_buffer> draw_buffer_;\r
safe_ptr<device_buffer> write_buffer_;\r
bool layer_key_;\r
\r
public:\r
- implementation(const core::video_format_desc& format_desc, ogl_device& ogl) \r
- : format_desc_(format_desc)\r
- , ogl_(ogl)\r
- , read_buffer_(ogl_.create_host_buffer(format_desc_.size, host_buffer::read_only))\r
- , draw_buffer_(ogl_.create_device_buffer(format_desc.width, format_desc.height, 4))\r
- , write_buffer_ (ogl_.create_device_buffer(format_desc.width, format_desc.height, 4))\r
- , local_key_buffer_(ogl_.create_device_buffer(format_desc.width, format_desc.height, 1))\r
- , layer_key_buffer_(ogl_.create_device_buffer(format_desc.width, format_desc.height, 1))\r
+ implementation(channel_context& channel) \r
+ : channel_(channel)\r
+ , read_buffer_(channel.ogl.create_host_buffer(channel.format_desc.size, host_buffer::read_only))\r
+ , draw_buffer_(channel.ogl.create_device_buffer(channel.format_desc.width, channel_.format_desc.height, 4))\r
+ , write_buffer_ (channel.ogl.create_device_buffer(channel.format_desc.width, channel_.format_desc.height, 4))\r
+ , local_key_buffer_(channel.ogl.create_device_buffer(channel.format_desc.width, channel_.format_desc.height, 1))\r
+ , layer_key_buffer_(channel.ogl.create_device_buffer(channel.format_desc.width, channel_.format_desc.height, 1))\r
, local_key_(false)\r
, layer_key_(false)\r
{\r
{\r
}\r
\r
- boost::unique_future<safe_ptr<host_buffer>> render()\r
+ void reinitialize_buffers()\r
+ {\r
+ read_buffer_ = channel_.ogl.create_host_buffer(channel_.format_desc.size, host_buffer::read_only);\r
+ draw_buffer_ = channel_.ogl.create_device_buffer(channel_.format_desc.width, channel_.format_desc.height, 4);\r
+ write_buffer_ = channel_.ogl.create_device_buffer(channel_.format_desc.width, channel_.format_desc.height, 4);\r
+ local_key_buffer_ = channel_.ogl.create_device_buffer(channel_.format_desc.width, channel_.format_desc.height, 1);\r
+ layer_key_buffer_ = channel_.ogl.create_device_buffer(channel_.format_desc.width, channel_.format_desc.height, 1);\r
+ channel_.ogl.gc();\r
+ }\r
+\r
+ safe_ptr<host_buffer> render()\r
{ \r
auto read_buffer = read_buffer_;\r
- auto result = ogl_.begin_invoke([=]() -> safe_ptr<host_buffer>\r
+ auto result = channel_.ogl.begin_invoke([=]() -> safe_ptr<host_buffer>\r
{\r
read_buffer->map();\r
return read_buffer;\r
\r
auto render_queue = std::move(render_queue_);\r
\r
- ogl_.begin_invoke([=]() mutable\r
+ channel_.ogl.begin_invoke([=]() mutable\r
{\r
+ if(draw_buffer_->width() != channel_.format_desc.width || draw_buffer_->height() != channel_.format_desc.height)\r
+ reinitialize_buffers();\r
+\r
local_key_ = false;\r
layer_key_ = false;\r
\r
{\r
draw(layer.front());\r
layer.pop();\r
- ogl_.yield(); // Allow quick buffer allocation to execute.\r
+ channel_.ogl.yield(); // Allow quick buffer allocation to execute.\r
}\r
\r
layer_key_ = local_key_; // If there was only key in last layer then use it as key for the entire next layer.\r
std::swap(draw_buffer_, write_buffer_);\r
\r
// Start transfer from device to host. \r
- read_buffer_ = ogl_.create_host_buffer(format_desc_.size, host_buffer::read_only); \r
+ read_buffer_ = channel_.ogl.create_host_buffer(channel_.format_desc.size, host_buffer::read_only); \r
write_buffer_->write(*read_buffer_);\r
});\r
\r
- return std::move(result);\r
+ return std::move(result.get());\r
}\r
\r
void draw(const render_item& item)\r
\r
// Draw\r
\r
- kernel_.draw(format_desc_.width, format_desc_.height, item.desc, item.transform, local_key, layer_key); \r
+ kernel_.draw(channel_.format_desc.width, channel_.format_desc.height, item.desc, item.transform, local_key, layer_key); \r
}\r
\r
safe_ptr<write_frame> create_frame(void* tag, const core::pixel_format_desc& desc)\r
{\r
- return make_safe<write_frame>(ogl_, reinterpret_cast<int>(tag), desc);\r
+ return make_safe<write_frame>(channel_.ogl, reinterpret_cast<int>(tag), desc);\r
}\r
};\r
\r
-image_mixer::image_mixer(const core::video_format_desc& format_desc, ogl_device& ogl) : impl_(new implementation(format_desc, ogl)){}\r
+image_mixer::image_mixer(channel_context& channel) : impl_(new implementation(channel)){}\r
void image_mixer::begin(const core::basic_frame& frame){impl_->begin(frame);}\r
void image_mixer::visit(core::write_frame& frame){impl_->visit(frame);}\r
void image_mixer::end(){impl_->end();}\r
-boost::unique_future<safe_ptr<host_buffer>> image_mixer::render(){return impl_->render();}\r
+safe_ptr<host_buffer> image_mixer::render(){return impl_->render();}\r
safe_ptr<write_frame> image_mixer::create_frame(void* tag, const core::pixel_format_desc& desc){return impl_->create_frame(tag, desc);}\r
void image_mixer::begin_layer(){impl_->begin_layer();}\r
void image_mixer::end_layer(){impl_->end_layer();}\r
#include <core/producer/frame/pixel_format.h>\r
\r
#include <boost/noncopyable.hpp>\r
-#include <boost/thread/future.hpp>\r
\r
namespace caspar { namespace core {\r
\r
class write_frame;\r
class host_buffer;\r
class ogl_device;\r
+struct channel_context;\r
\r
class image_mixer : public core::frame_visitor, boost::noncopyable\r
{\r
public:\r
- image_mixer(const core::video_format_desc& format_desc, ogl_device& ogl);\r
+ image_mixer(channel_context& context);\r
\r
virtual void begin(const core::basic_frame& frame);\r
virtual void visit(core::write_frame& frame);\r
void begin_layer();\r
void end_layer();\r
\r
- boost::unique_future<safe_ptr<host_buffer>> render();\r
+ safe_ptr<host_buffer> render();\r
\r
safe_ptr<write_frame> create_frame(void* tag, const core::pixel_format_desc& format);\r
\r
\r
struct read_frame::implementation : boost::noncopyable\r
{\r
- boost::unique_future<safe_ptr<host_buffer>> future_image_data_;\r
- std::shared_ptr<host_buffer> image_data_;\r
+ safe_ptr<host_buffer> image_data_;\r
std::vector<int16_t> audio_data_;\r
\r
public:\r
- implementation(boost::unique_future<safe_ptr<host_buffer>>&& future_image_data, std::vector<int16_t>&& audio_data) \r
- : future_image_data_(std::move(future_image_data))\r
+ implementation(safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data) \r
+ : image_data_(std::move(image_data))\r
, audio_data_(std::move(audio_data)){} \r
\r
const boost::iterator_range<const uint8_t*> image_data()\r
{\r
- try\r
- {\r
- if(!image_data_)\r
- image_data_ = future_image_data_.get();\r
- }\r
- catch(...) // image_data_ future might store exception.\r
- {\r
- CASPAR_LOG_CURRENT_EXCEPTION();\r
- return boost::iterator_range<const uint8_t*>();\r
- }\r
-\r
auto ptr = static_cast<const uint8_t*>(image_data_->data());\r
return boost::iterator_range<const uint8_t*>(ptr, ptr + image_data_->size());\r
}\r
}\r
};\r
\r
-read_frame::read_frame(boost::unique_future<safe_ptr<host_buffer>>&& image_data, std::vector<int16_t>&& audio_data) \r
- : impl_(new implementation(std::move(image_data), std::move(audio_data))){}\r
read_frame::read_frame(safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data) \r
-{\r
- boost::promise<safe_ptr<host_buffer>> p;\r
- p.set_value(std::move(image_data));\r
- impl_.reset(new implementation(std::move(p.get_future()), std::move(audio_data)));\r
-}\r
+ : impl_(new implementation(std::move(image_data), std::move(audio_data))){}\r
\r
const boost::iterator_range<const uint8_t*> read_frame::image_data() const{return impl_->image_data();}\r
const boost::iterator_range<const int16_t*> read_frame::audio_data() const{return impl_->audio_data();}\r
{\r
read_frame(){}\r
public:\r
- read_frame(boost::unique_future<safe_ptr<host_buffer>>&& image_data, std::vector<int16_t>&& audio_data);\r
read_frame(safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data);\r
\r
virtual const boost::iterator_range<const uint8_t*> image_data() const;\r
{\r
virtual safe_ptr<write_frame> create_frame(void* video_stream_tag, const pixel_format_desc& desc) = 0;\r
virtual safe_ptr<write_frame> create_frame(void* video_stream_tag, size_t width, size_t height, pixel_format::type pix_fmt = pixel_format::bgra) = 0; \r
- virtual safe_ptr<write_frame> create_frame(void* video_stream_tag, pixel_format::type pix_fmt = pixel_format::bgra) = 0;\r
\r
virtual const video_format_desc& get_video_format_desc() const = 0; // nothrow\r
};\r
\r
#include "frame_producer_device.h"\r
\r
+#include "../channel_context.h"\r
+\r
#include "layer.h"\r
\r
#include <core/producer/frame/basic_frame.h>\r
struct frame_producer_device::implementation : boost::noncopyable\r
{ \r
std::map<int, layer> layers_; \r
- const video_format_desc format_desc_; \r
const output_t output_;\r
\r
safe_ptr<diagnostics::graph> diag_;\r
boost::timer tick_timer_;\r
boost::timer output_timer_;\r
\r
- executor& context_;\r
- executor& destroy_context_;\r
+ channel_context& channel_;\r
public:\r
- implementation(executor& context, executor& destroy_context, const video_format_desc& format_desc, const output_t& output) \r
- : format_desc_(format_desc)\r
- , diag_(diagnostics::create_graph(std::string("frame_producer_device")))\r
- , context_(context)\r
- , destroy_context_(destroy_context)\r
+ implementation(channel_context& channel, const output_t& output) \r
+ : diag_(diagnostics::create_graph(std::string("frame_producer_device")))\r
+ , channel_(channel)\r
, output_(output)\r
{\r
diag_->add_guide("frame-time", 0.5f); \r
diag_->set_color("tick-time", diagnostics::color(0.1f, 0.7f, 0.8f));\r
diag_->set_color("output-time", diagnostics::color(0.5f, 1.0f, 0.2f));\r
\r
- context_.begin_invoke([=]{tick();}); \r
+ channel_.execution.begin_invoke([=]{tick();}); \r
}\r
\r
void tick()\r
auto frame = render();\r
output_timer_.restart();\r
output_(frame);\r
- diag_->update_value("output-time", static_cast<float>(output_timer_.elapsed()*format_desc_.fps*0.5));\r
+ diag_->update_value("output-time", static_cast<float>(output_timer_.elapsed()*channel_.format_desc.fps*0.5));\r
}\r
catch(...)\r
{\r
CASPAR_LOG_CURRENT_EXCEPTION();\r
}\r
\r
- context_.begin_invoke([=]{tick();});\r
+ channel_.execution.begin_invoke([=]{tick();});\r
}\r
\r
std::map<int, safe_ptr<basic_frame>> render()\r
frames[pair.first] = pair.second.receive();\r
});\r
\r
- diag_->update_value("frame-time", frame_timer_.elapsed()*format_desc_.fps*0.5);\r
+ diag_->update_value("frame-time", frame_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
\r
- diag_->update_value("tick-time", tick_timer_.elapsed()*format_desc_.fps*0.5);\r
+ diag_->update_value("tick-time", tick_timer_.elapsed()*channel_.format_desc.fps*0.5);\r
tick_timer_.restart();\r
\r
return frames;\r
\r
void load(int index, const safe_ptr<frame_producer>& producer, bool preview)\r
{\r
- context_.invoke([&]{layers_[index].load(make_safe<destroy_producer_proxy>(destroy_context_, producer), preview);});\r
+ channel_.execution.invoke([&]{layers_[index].load(make_safe<destroy_producer_proxy>(channel_.destruction, producer), preview);});\r
}\r
\r
void pause(int index)\r
{ \r
- context_.invoke([&]{layers_[index].pause();});\r
+ channel_.execution.invoke([&]{layers_[index].pause();});\r
}\r
\r
void play(int index)\r
{ \r
- context_.invoke([&]{layers_[index].play();});\r
+ channel_.execution.invoke([&]{layers_[index].play();});\r
}\r
\r
void stop(int index)\r
{ \r
- context_.invoke([&]{layers_[index].stop();});\r
+ channel_.execution.invoke([&]{layers_[index].stop();});\r
}\r
\r
void clear(int index)\r
{\r
- context_.invoke([&]{layers_.erase(index);});\r
+ channel_.execution.invoke([&]{layers_.erase(index);});\r
}\r
\r
void clear()\r
{\r
- context_.invoke([&]{layers_.clear();});\r
+ channel_.execution.invoke([&]{layers_.clear();});\r
} \r
\r
void swap_layer(int index, size_t other_index)\r
{\r
- context_.invoke([&]{layers_[index].swap(layers_[other_index]);});\r
+ channel_.execution.invoke([&]{layers_[index].swap(layers_[other_index]);});\r
}\r
\r
void swap_layer(int index, size_t other_index, frame_producer_device& other)\r
swap_layer(index, other_index);\r
else\r
{\r
- if(format_desc_ != other.impl_->format_desc_)\r
+ if(channel_.format_desc != other.impl_->channel_.format_desc)\r
BOOST_THROW_EXCEPTION(invalid_operation() << msg_info("Cannot swap between channels with different formats."));\r
\r
auto func = [&]{layers_[index].swap(other.impl_->layers_[other_index]);};\r
\r
- context_.invoke([&]{other.impl_->context_.invoke(func);});\r
+ channel_.execution.invoke([&]{other.impl_->channel_.execution.invoke(func);});\r
}\r
}\r
\r
if(other.impl_.get() == this)\r
return;\r
\r
- if(format_desc_ != other.impl_->format_desc_)\r
+ if(channel_.format_desc != other.impl_->channel_.format_desc)\r
BOOST_THROW_EXCEPTION(invalid_operation() << msg_info("Cannot swap between channels with different formats."));\r
\r
auto func = [&]\r
}); \r
};\r
\r
- context_.invoke([&]{other.impl_->context_.invoke(func);});\r
+ channel_.execution.invoke([&]{other.impl_->channel_.execution.invoke(func);});\r
}\r
\r
boost::unique_future<safe_ptr<frame_producer>> foreground(int index)\r
{\r
- return context_.begin_invoke([=]{return layers_[index].foreground();});\r
+ return channel_.execution.begin_invoke([=]{return layers_[index].foreground();});\r
}\r
\r
boost::unique_future<safe_ptr<frame_producer>> background(int index)\r
{\r
- return context_.begin_invoke([=]{return layers_[index].background();});\r
+ return channel_.execution.begin_invoke([=]{return layers_[index].background();});\r
}\r
};\r
\r
-frame_producer_device::frame_producer_device(executor& context, executor& destroy_context, const video_format_desc& format_desc, const output_t& output)\r
- : impl_(new implementation(context, destroy_context, format_desc, output)){}\r
-frame_producer_device::frame_producer_device(frame_producer_device&& other) : impl_(std::move(other.impl_)){}\r
+frame_producer_device::frame_producer_device(channel_context& channel, const output_t& output)\r
+ : impl_(new implementation(channel, output)){}\r
void frame_producer_device::swap(frame_producer_device& other){impl_->swap(other);}\r
void frame_producer_device::load(int index, const safe_ptr<frame_producer>& producer, bool preview){impl_->load(index, producer, preview);}\r
void frame_producer_device::pause(int index){impl_->pause(index);}\r
namespace core {\r
\r
struct video_format_desc;\r
+struct channel_context;\r
\r
class frame_producer_device : boost::noncopyable\r
{\r
public:\r
typedef std::function<void(const std::map<int, safe_ptr<basic_frame>>&)> output_t;\r
\r
- explicit frame_producer_device(executor& context, executor& destroy_context, const video_format_desc& format_desc, const output_t& output);\r
- frame_producer_device(frame_producer_device&& other);\r
+ explicit frame_producer_device(channel_context& channel, const output_t& output);\r
\r
void swap(frame_producer_device& other);\r
\r
\r
CASPAR_LOG(info) << print() << L" Shutting down."; \r
}\r
+ \r
+ const core::video_format_desc& get_video_format_desc() const\r
+ {\r
+ return format_desc_;\r
+ }\r
\r
void enable_video_output()\r
{\r
{\r
consumer_->send(frame);\r
}\r
+\r
+ virtual const core::video_format_desc& get_video_format_desc() const\r
+ {\r
+ return consumer_->get_video_format_desc();\r
+ }\r
\r
virtual std::wstring print() const\r
{\r
}\r
}\r
\r
+ const core::video_format_desc& get_video_format_desc() const\r
+ {\r
+ return format_desc_;\r
+ }\r
+\r
void set_latency(bool low_latency)\r
{ \r
if(!low_latency)\r
{\r
return config_.key_only;\r
}\r
+ \r
+ virtual const core::video_format_desc& get_video_format_desc() const\r
+ {\r
+ return context_->get_video_format_desc();\r
+ }\r
}; \r
\r
safe_ptr<core::frame_consumer> create_decklink_consumer(const std::vector<std::wstring>& params) \r
if (!(fmt_->flags & AVFMT_NOFILE)) \r
url_fclose(oc_->pb); // Close the output ffmpeg.\r
}\r
+\r
+ const core::video_format_desc& get_video_format_desc() const\r
+ {\r
+ return format_desc_;\r
+ }\r
\r
std::wstring print() const\r
{\r
{\r
return key_only_;\r
}\r
+\r
+ virtual const core::video_format_desc& get_video_format_desc() const\r
+ {\r
+ return consumer_->get_video_format_desc();\r
+ }\r
}; \r
\r
safe_ptr<core::frame_consumer> create_ffmpeg_consumer(const std::vector<std::wstring>& params)\r
\r
// DVVIDEO is in lower field. Make it upper field if needed.\r
if(codec_context_.codec_id == CODEC_ID_DVVIDEO && frame_factory_->get_video_format_desc().mode == core::video_mode::upper)\r
- write->get_image_transform().set_fill_translation(0.0f, 1.0/static_cast<double>(height_));\r
+ write->get_image_transform().set_fill_translation(0.0f, 0.5/static_cast<double>(height_));\r
\r
return write;\r
}\r
class flash_renderer\r
{ \r
const std::wstring filename_;\r
- const core::video_format_desc format_desc_;\r
+ core::video_format_desc format_desc_;\r
\r
const std::shared_ptr<core::frame_factory> frame_factory_;\r
\r
\r
safe_ptr<core::basic_frame> render_frame(bool has_underflow)\r
{\r
+ if(format_desc_ != frame_factory_->get_video_format_desc())\r
+ {\r
+ format_desc_ = frame_factory_->get_video_format_desc();\r
+ ax_->SetFormat(format_desc_);\r
+ }\r
+\r
float frame_time = 1.0f/ax_->GetFPS();\r
\r
graph_->update_value("tick-time", static_cast<float>(tick_timer_.elapsed()/frame_time)*0.5f);\r
fast_memclr(bmp_.data(), format_desc_.size);\r
ax_->DrawControl(bmp_);\r
\r
- auto frame = frame_factory_->create_frame(this);\r
+ auto frame = frame_factory_->create_frame(this, format_desc_.width, format_desc_.height);\r
fast_memcpy(frame->image_data().begin(), bmp_.data(), format_desc_.size);\r
frame->commit();\r
head_ = frame;\r
size_t oal_consumer::buffer_depth() const{return impl_->buffer_depth();}\r
void oal_consumer::initialize(const core::video_format_desc& format_desc){impl_.reset(new implementation(format_desc));}\r
std::wstring oal_consumer::print() const { return impl_->print(); }\r
+const core::video_format_desc& oal_consumer::get_video_format_desc() const{return impl_->format_desc_;}\r
\r
safe_ptr<core::frame_consumer> create_oal_consumer(const std::vector<std::wstring>& params)\r
{\r
virtual void send(const safe_ptr<const core::read_frame>&);\r
virtual size_t buffer_depth() const;\r
virtual std::wstring print() const;\r
+ virtual const core::video_format_desc& get_video_format_desc() const;\r
private:\r
struct implementation;\r
std::shared_ptr<implementation> impl_;\r
CASPAR_LOG(info) << print() << " Sucessfully Initialized.";\r
}\r
\r
+ const core::video_format_desc& get_video_format_desc() const\r
+ {\r
+ return format_desc_;\r
+ }\r
+\r
void calculate_aspect()\r
{\r
if(windowed_)\r
{\r
return false;\r
}\r
+\r
+ virtual const core::video_format_desc& get_video_format_desc() const\r
+ {\r
+ return consumer_->get_video_format_desc();\r
+ }\r
}; \r
\r
safe_ptr<core::frame_consumer> create_ogl_consumer(const std::vector<std::wstring>& params)\r
while (window_.GetEvent(Event)){}\r
window_.Display();\r
\r
- auto frame = frame_factory_->create_frame(this);\r
+ auto frame = frame_factory_->create_frame(this, format_desc_.width, format_desc_.height);\r
::BitBlt(mem_, 0, 0, format_desc_.width, format_desc_.height, screen_, 0, 0, SRCCOPY); \r
std::copy_n(bmp_data_, format_desc_.size, frame->image_data().begin());\r
\r