-/*\r
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>\r
-*\r
-* This file is part of CasparCG (www.casparcg.com).\r
-*\r
-* CasparCG is free software: you can redistribute it and/or modify\r
-* it under the terms of the GNU General Public License as published by\r
-* the Free Software Foundation, either version 3 of the License, or\r
-* (at your option) any later version.\r
-*\r
-* CasparCG is distributed in the hope that it will be useful,\r
-* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
-* GNU General Public License for more details.\r
-*\r
-* You should have received a copy of the GNU General Public License\r
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
-*\r
-* Author: Robert Nagy, ronag89@gmail.com\r
-*/\r
-\r
-#include "../stdafx.h"\r
-\r
-#include "decklink_producer.h"\r
-\r
-#include "../interop/DeckLinkAPI_h.h"\r
-#include "../util/util.h"\r
-\r
-#include "../../ffmpeg/producer/filter/filter.h"\r
-#include "../../ffmpeg/producer/util/util.h"\r
-#include "../../ffmpeg/producer/muxer/frame_muxer.h"\r
-#include "../../ffmpeg/producer/muxer/display_mode.h"\r
-\r
-#include <common/executor.h>\r
-#include <common/diagnostics/graph.h>\r
-#include <common/except.h>\r
-#include <common/log.h>\r
-#include <common/param.h>\r
-\r
-#include <core/frame/frame.h>\r
-#include <core/frame/draw_frame.h>\r
-#include <core/frame/frame_transform.h>\r
-#include <core/frame/frame_factory.h>\r
-#include <core/monitor/monitor.h>\r
-\r
-#include <tbb/concurrent_queue.h>\r
-\r
-#include <boost/algorithm/string.hpp>\r
-#include <boost/foreach.hpp>\r
-#include <boost/property_tree/ptree.hpp>\r
-#include <boost/timer.hpp>\r
-\r
-#if defined(_MSC_VER)\r
-#pragma warning (push)\r
-#pragma warning (disable : 4244)\r
-#endif\r
-extern "C" \r
-{\r
- #define __STDC_CONSTANT_MACROS\r
- #define __STDC_LIMIT_MACROS\r
- #include <libavcodec/avcodec.h>\r
-}\r
-#if defined(_MSC_VER)\r
-#pragma warning (pop)\r
-#endif\r
-\r
-#pragma warning(push)\r
-#pragma warning(disable : 4996)\r
-\r
- #include <atlbase.h>\r
-\r
- #include <atlcom.h>\r
- #include <atlhost.h>\r
-\r
-#pragma warning(push)\r
-\r
-#include <functional>\r
-\r
-namespace caspar { namespace decklink {\r
- \r
-class decklink_producer : boost::noncopyable, public IDeckLinkInputCallback\r
-{ \r
- monitor::basic_subject event_subject_;\r
- spl::shared_ptr<diagnostics::graph> graph_;\r
- boost::timer tick_timer_;\r
-\r
- CComPtr<IDeckLink> decklink_;\r
- CComQIPtr<IDeckLinkInput> input_;\r
- CComQIPtr<IDeckLinkAttributes > attributes_;\r
- \r
- const std::wstring model_name_;\r
- const int device_index_;\r
- const std::wstring filter_;\r
- \r
- std::vector<int> audio_cadence_;\r
- boost::circular_buffer<size_t> sync_buffer_;\r
- ffmpeg::frame_muxer muxer_;\r
- \r
- tbb::atomic<int> flags_;\r
- spl::shared_ptr<core::frame_factory> frame_factory_;\r
- core::video_format_desc in_format_desc_;\r
- core::video_format_desc out_format_desc_;\r
-\r
- tbb::concurrent_bounded_queue<core::draw_frame> frame_buffer_;\r
-\r
- std::exception_ptr exception_; \r
-\r
-public:\r
- decklink_producer(const core::video_format_desc& in_format_desc, \r
- int device_index, \r
- const spl::shared_ptr<core::frame_factory>& frame_factory, \r
- const core::video_format_desc& out_format_desc, \r
- const std::wstring& filter)\r
- : decklink_(get_device(device_index))\r
- , input_(decklink_)\r
- , attributes_(decklink_)\r
- , model_name_(get_model_name(decklink_))\r
- , device_index_(device_index)\r
- , filter_(filter)\r
- , in_format_desc_(in_format_desc)\r
- , out_format_desc_(out_format_desc)\r
- , muxer_(in_format_desc.fps, frame_factory, out_format_desc, filter)\r
- , audio_cadence_(out_format_desc.audio_cadence)\r
- , sync_buffer_(out_format_desc.audio_cadence.size())\r
- , frame_factory_(frame_factory)\r
- { \r
- flags_ = 0;\r
- frame_buffer_.set_capacity(2);\r
- \r
- graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f)); \r
- graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f));\r
- graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
- graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f));\r
- graph_->set_color("output-buffer", diagnostics::color(0.0f, 1.0f, 0.0f));\r
- graph_->set_text(print());\r
- diagnostics::register_graph(graph_);\r
- \r
- auto display_mode = get_display_mode(input_, in_format_desc.format, bmdFormat8BitYUV, bmdVideoInputFlagDefault);\r
- \r
- // NOTE: bmdFormat8BitARGB is currently not supported by any decklink card. (2011-05-08)\r
- if(FAILED(input_->EnableVideoInput(display_mode, bmdFormat8BitYUV, 0))) \r
- BOOST_THROW_EXCEPTION(caspar_exception() \r
- << msg_info(print() + L" Could not enable video input.")\r
- << boost::errinfo_api_function("EnableVideoInput"));\r
-\r
- if(FAILED(input_->EnableAudioInput(bmdAudioSampleRate48kHz, bmdAudioSampleType32bitInteger, static_cast<int>(in_format_desc.audio_channels)))) \r
- BOOST_THROW_EXCEPTION(caspar_exception() \r
- << msg_info(print() + L" Could not enable audio input.")\r
- << boost::errinfo_api_function("EnableAudioInput"));\r
- \r
- if (FAILED(input_->SetCallback(this)) != S_OK)\r
- BOOST_THROW_EXCEPTION(caspar_exception() \r
- << msg_info(print() + L" Failed to set input callback.")\r
- << boost::errinfo_api_function("SetCallback"));\r
- \r
- if(FAILED(input_->StartStreams()))\r
- BOOST_THROW_EXCEPTION(caspar_exception() \r
- << msg_info(print() + L" Failed to start input stream.")\r
- << boost::errinfo_api_function("StartStreams"));\r
-\r
- CASPAR_LOG(info) << print() << L" Initialized";\r
- }\r
-\r
- ~decklink_producer()\r
- {\r
- if(input_ != nullptr) \r
- {\r
- input_->StopStreams();\r
- input_->DisableVideoInput();\r
- }\r
- }\r
-\r
- virtual HRESULT STDMETHODCALLTYPE QueryInterface (REFIID, LPVOID*) {return E_NOINTERFACE;}\r
- virtual ULONG STDMETHODCALLTYPE AddRef () {return 1;}\r
- virtual ULONG STDMETHODCALLTYPE Release () {return 1;}\r
- \r
- virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(BMDVideoInputFormatChangedEvents /*notificationEvents*/, IDeckLinkDisplayMode* newDisplayMode, BMDDetectedVideoInputFormatFlags /*detectedSignalFlags*/)\r
- {\r
- return S_OK;\r
- }\r
-\r
- virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame* video, IDeckLinkAudioInputPacket* audio)\r
- { \r
- if(!video)\r
- return S_OK;\r
-\r
- try\r
- {\r
- graph_->set_value("tick-time", tick_timer_.elapsed()*out_format_desc_.fps*0.5);\r
- tick_timer_.restart();\r
-\r
- boost::timer frame_timer; \r
-\r
- // PUSH\r
-\r
- void* bytes = nullptr;\r
- if(FAILED(video->GetBytes(&bytes)) || !bytes)\r
- return S_OK;\r
- \r
- spl::shared_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free); \r
- avcodec_get_frame_defaults(av_frame.get());\r
- \r
- av_frame->data[0] = reinterpret_cast<uint8_t*>(bytes);\r
- av_frame->linesize[0] = video->GetRowBytes(); \r
- av_frame->format = PIX_FMT_UYVY422;\r
- av_frame->width = video->GetWidth();\r
- av_frame->height = video->GetHeight();\r
- av_frame->interlaced_frame = in_format_desc_.field_mode != core::field_mode::progressive;\r
- av_frame->top_field_first = in_format_desc_.field_mode == core::field_mode::upper ? 1 : 0;\r
- \r
- event_subject_ << monitor::event("file/name") % model_name_\r
- << monitor::event("file/path") % device_index_\r
- << monitor::event("file/video/width") % video->GetWidth()\r
- << monitor::event("file/video/height") % video->GetHeight()\r
- << monitor::event("file/video/field") % u8(!av_frame->interlaced_frame ? "progressive" : (av_frame->top_field_first ? "upper" : "lower"))\r
- << monitor::event("file/audio/sample-rate") % 48000\r
- << monitor::event("file/audio/channels") % 2\r
- << monitor::event("file/audio/format") % u8(av_get_sample_fmt_name(AV_SAMPLE_FMT_S32))\r
- << monitor::event("file/fps") % in_format_desc_.fps;\r
-\r
- std::shared_ptr<core::audio_buffer> audio_buffer;\r
-\r
- // It is assumed that audio is always equal or ahead of video.\r
- if(audio && SUCCEEDED(audio->GetBytes(&bytes)) && bytes)\r
- {\r
- auto sample_frame_count = audio->GetSampleFrameCount();\r
- auto audio_data = reinterpret_cast<int32_t*>(bytes);\r
- audio_buffer = std::make_shared<core::audio_buffer>(audio_data, audio_data + sample_frame_count*out_format_desc_.audio_channels);\r
- }\r
- else \r
- audio_buffer = std::make_shared<core::audio_buffer>(audio_cadence_.front(), 0);\r
- \r
- // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)\r
- // This cadence fills the audio mixer most optimally.\r
-\r
- sync_buffer_.push_back(audio_buffer->size()); \r
- if(!boost::range::equal(sync_buffer_, audio_cadence_))\r
- {\r
- CASPAR_LOG(trace) << print() << L" Syncing audio.";\r
- return S_OK;\r
- }\r
-\r
- muxer_.push(audio_buffer);\r
- muxer_.push(av_frame, flags_); \r
- \r
- boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1);\r
- \r
- // POLL\r
-\r
- auto frame = core::draw_frame::late();\r
- if(muxer_.try_pop(frame))\r
- {\r
- if(!frame_buffer_.try_push(frame))\r
- {\r
- auto dummy = core::draw_frame::empty();\r
- frame_buffer_.try_pop(dummy);\r
- frame_buffer_.try_push(frame);\r
- \r
- graph_->set_tag("dropped-frame");\r
- }\r
- }\r
- \r
- graph_->set_value("frame-time", frame_timer.elapsed()*out_format_desc_.fps*0.5); \r
- event_subject_ << monitor::event("profiler/time") % frame_timer.elapsed() % out_format_desc_.fps;\r
-\r
- graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity())); \r
- event_subject_ << monitor::event("buffer") % frame_buffer_.size() % frame_buffer_.capacity();\r
- }\r
- catch(...)\r
- {\r
- exception_ = std::current_exception();\r
- return E_FAIL;\r
- }\r
-\r
- return S_OK;\r
- }\r
- \r
- core::draw_frame get_frame(int flags)\r
- {\r
- if(exception_ != nullptr)\r
- std::rethrow_exception(exception_);\r
-\r
- flags_ = flags;\r
-\r
- core::draw_frame frame = core::draw_frame::late();\r
- if(!frame_buffer_.try_pop(frame))\r
- graph_->set_tag("late-frame");\r
- graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity())); \r
- return frame;\r
- }\r
- \r
- std::wstring print() const\r
- {\r
- return model_name_ + L" [" + boost::lexical_cast<std::wstring>(device_index_) + L"|" + in_format_desc_.name + L"]";\r
- }\r
-\r
- void subscribe(const monitor::observable::observer_ptr& o)\r
- {\r
- event_subject_.subscribe(o);\r
- }\r
-\r
- void unsubscribe(const monitor::observable::observer_ptr& o)\r
- {\r
- event_subject_.unsubscribe(o);\r
- }\r
-};\r
- \r
-class decklink_producer_proxy : public core::frame_producer\r
-{ \r
- std::unique_ptr<decklink_producer> producer_;\r
- const uint32_t length_;\r
- core::draw_frame last_frame_;\r
- executor executor_;\r
-public:\r
- explicit decklink_producer_proxy(const core::video_format_desc& in_format_desc,\r
- const spl::shared_ptr<core::frame_factory>& frame_factory, \r
- const core::video_format_desc& out_format_desc, \r
- int device_index,\r
- const std::wstring& filter_str, uint32_t length)\r
- : executor_(L"decklink_producer[" + boost::lexical_cast<std::wstring>(device_index) + L"]")\r
- , length_(length)\r
- , last_frame_(core::draw_frame::empty())\r
- {\r
- executor_.invoke([=]\r
- {\r
- CoInitialize(nullptr);\r
- producer_.reset(new decklink_producer(in_format_desc, device_index, frame_factory, out_format_desc, filter_str));\r
- });\r
- }\r
-\r
- ~decklink_producer_proxy()\r
- { \r
- executor_.invoke([=]\r
- {\r
- producer_.reset();\r
- CoUninitialize();\r
- });\r
- }\r
-\r
- virtual void subscribe(const monitor::observable::observer_ptr& o) override\r
- {\r
- producer_->subscribe(o);\r
- }\r
-\r
- virtual void unsubscribe(const monitor::observable::observer_ptr& o) override\r
- {\r
- producer_->unsubscribe(o);\r
- }\r
- \r
- // frame_producer\r
- \r
- virtual core::draw_frame receive(int flags) override\r
- {\r
- auto frame = producer_->get_frame(flags);\r
-\r
- if(frame != core::draw_frame::late())\r
- last_frame_ = frame;\r
-\r
- return frame;\r
- }\r
-\r
- virtual core::draw_frame last_frame() const override\r
- {\r
- return core::draw_frame::still(last_frame_);\r
- }\r
- \r
- virtual uint32_t nb_frames() const override\r
- {\r
- return length_;\r
- }\r
- \r
- virtual std::wstring print() const override\r
- {\r
- return producer_->print();\r
- }\r
- \r
- virtual std::wstring name() const override\r
- {\r
- return L"decklink";\r
- }\r
-\r
- virtual boost::property_tree::wptree info() const override\r
- {\r
- boost::property_tree::wptree info;\r
- info.add(L"type", L"decklink");\r
- return info;\r
- }\r
-};\r
-\r
-spl::shared_ptr<core::frame_producer> create_producer(const spl::shared_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& out_format_desc, const std::vector<std::wstring>& params)\r
-{\r
- if(params.empty() || !boost::iequals(params[0], "decklink"))\r
- return core::frame_producer::empty();\r
-\r
- auto device_index = get_param(L"DEVICE", params, -1);\r
- if(device_index == -1)\r
- device_index = boost::lexical_cast<int>(params.at(1));\r
- \r
- auto filter_str = get_param(L"FILTER", params); \r
- auto length = get_param(L"LENGTH", params, std::numeric_limits<uint32_t>::max()); \r
- auto in_format_desc = core::video_format_desc(get_param(L"FORMAT", params, L"INVALID"));\r
- \r
- boost::replace_all(filter_str, L"DEINTERLACE", L"YADIF=0:-1");\r
- boost::replace_all(filter_str, L"DEINTERLACE_BOB", L"YADIF=1:-1");\r
- \r
- if(in_format_desc.format == core::video_format::invalid)\r
- in_format_desc = out_format_desc;\r
- \r
- return spl::make_shared<decklink_producer_proxy>(in_format_desc, frame_factory, out_format_desc, device_index, filter_str, length);\r
-}\r
-\r
-}}
\ No newline at end of file
+/*
+* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+*
+* This file is part of CasparCG (www.casparcg.com).
+*
+* CasparCG is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* CasparCG is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
+*
+* Author: Robert Nagy, ronag89@gmail.com
+*/
+
+#include "../StdAfx.h"
+
+#include "decklink_producer.h"
+
+#include "../util/util.h"
+
+#include "../../ffmpeg/producer/filter/filter.h"
+#include "../../ffmpeg/producer/util/util.h"
+#include "../../ffmpeg/producer/muxer/frame_muxer.h"
+#include "../../ffmpeg/producer/muxer/display_mode.h"
+
+#include <common/executor.h>
+#include <common/diagnostics/graph.h>
+#include <common/except.h>
+#include <common/log.h>
+#include <common/param.h>
+#include <common/timer.h>
+
+#include <core/frame/audio_channel_layout.h>
+#include <core/frame/frame.h>
+#include <core/frame/draw_frame.h>
+#include <core/frame/frame_transform.h>
+#include <core/frame/frame_factory.h>
+#include <core/producer/frame_producer.h>
+#include <core/producer/framerate/framerate_producer.h>
+#include <core/monitor/monitor.h>
+#include <core/diagnostics/call_context.h>
+#include <core/mixer/audio/audio_mixer.h>
+#include <core/help/help_repository.h>
+#include <core/help/help_sink.h>
+
+#include <tbb/concurrent_queue.h>
+
+#include <boost/algorithm/string.hpp>
+#include <boost/property_tree/ptree.hpp>
+#include <boost/range/adaptor/transformed.hpp>
+
+#if defined(_MSC_VER)
+#pragma warning (push)
+#pragma warning (disable : 4244)
+#endif
+extern "C"
+{
+ #define __STDC_CONSTANT_MACROS
+ #define __STDC_LIMIT_MACROS
+ #include <libavcodec/avcodec.h>
+}
+#if defined(_MSC_VER)
+#pragma warning (pop)
+#endif
+
+#include "../decklink_api.h"
+
+#include <functional>
+
+namespace caspar { namespace decklink {
+core::audio_channel_layout get_adjusted_channel_layout(core::audio_channel_layout layout)
+{
+ if (layout.num_channels <= 2)
+ layout.num_channels = 2;
+ else if (layout.num_channels <= 8)
+ layout.num_channels = 8;
+ else
+ layout.num_channels = 16;
+
+ return layout;
+}
+
+template <typename T>
+std::wstring to_string(const T& cadence)
+{
+ return boost::join(cadence | boost::adaptors::transformed([](size_t i) { return boost::lexical_cast<std::wstring>(i); }), L", ");
+}
+
+class decklink_producer : boost::noncopyable, public IDeckLinkInputCallback
+{
+ const int device_index_;
+ core::monitor::subject monitor_subject_;
+ spl::shared_ptr<diagnostics::graph> graph_;
+ caspar::timer tick_timer_;
+
+ com_ptr<IDeckLink> decklink_ = get_device(device_index_);
+ com_iface_ptr<IDeckLinkInput> input_ = iface_cast<IDeckLinkInput>(decklink_);
+ com_iface_ptr<IDeckLinkAttributes> attributes_ = iface_cast<IDeckLinkAttributes>(decklink_);
+
+ const std::wstring model_name_ = get_model_name(decklink_);
+ const std::wstring filter_;
+
+ core::video_format_desc in_format_desc_;
+ core::video_format_desc out_format_desc_;
+ std::vector<int> audio_cadence_ = in_format_desc_.audio_cadence;
+ boost::circular_buffer<size_t> sync_buffer_ { audio_cadence_.size() };
+ spl::shared_ptr<core::frame_factory> frame_factory_;
+ core::audio_channel_layout channel_layout_;
+ ffmpeg::frame_muxer muxer_ {
+ in_format_desc_.framerate,
+ { ffmpeg::create_input_pad(in_format_desc_, channel_layout_.num_channels) },
+ frame_factory_,
+ out_format_desc_,
+ channel_layout_,
+ filter_,
+ ffmpeg::filter::is_deinterlacing(filter_)
+ };
+
+ core::constraints constraints_ { in_format_desc_.width, in_format_desc_.height };
+
+ tbb::concurrent_bounded_queue<core::draw_frame> frame_buffer_;
+ core::draw_frame last_frame_ = core::draw_frame::empty();
+
+ std::exception_ptr exception_;
+
+public:
+ decklink_producer(
+ const core::video_format_desc& in_format_desc,
+ int device_index,
+ const spl::shared_ptr<core::frame_factory>& frame_factory,
+ const core::video_format_desc& out_format_desc,
+ const core::audio_channel_layout& channel_layout,
+ const std::wstring& filter)
+ : device_index_(device_index)
+ , filter_(filter)
+ , in_format_desc_(in_format_desc)
+ , out_format_desc_(out_format_desc)
+ , frame_factory_(frame_factory)
+ , channel_layout_(get_adjusted_channel_layout(channel_layout))
+ {
+ frame_buffer_.set_capacity(4);
+
+ graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));
+ graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f));
+ graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));
+ graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f));
+ graph_->set_color("output-buffer", diagnostics::color(0.0f, 1.0f, 0.0f));
+ graph_->set_text(print());
+ diagnostics::register_graph(graph_);
+
+ bool will_attempt_dma;
+ auto display_mode = get_display_mode(input_, in_format_desc.format, bmdFormat8BitYUV, bmdVideoInputFlagDefault, will_attempt_dma);
+
+ // NOTE: bmdFormat8BitARGB is currently not supported by any decklink card. (2011-05-08)
+ if(FAILED(input_->EnableVideoInput(display_mode, bmdFormat8BitYUV, 0)))
+ CASPAR_THROW_EXCEPTION(caspar_exception()
+ << msg_info(print() + L" Could not enable video input.")
+ << boost::errinfo_api_function("EnableVideoInput"));
+
+ if(FAILED(input_->EnableAudioInput(bmdAudioSampleRate48kHz, bmdAudioSampleType32bitInteger, static_cast<int>(channel_layout_.num_channels))))
+ CASPAR_THROW_EXCEPTION(caspar_exception()
+ << msg_info(print() + L" Could not enable audio input.")
+ << boost::errinfo_api_function("EnableAudioInput"));
+
+ if (FAILED(input_->SetCallback(this)) != S_OK)
+ CASPAR_THROW_EXCEPTION(caspar_exception()
+ << msg_info(print() + L" Failed to set input callback.")
+ << boost::errinfo_api_function("SetCallback"));
+
+ if(FAILED(input_->StartStreams()))
+ CASPAR_THROW_EXCEPTION(caspar_exception()
+ << msg_info(print() + L" Failed to start input stream.")
+ << boost::errinfo_api_function("StartStreams"));
+
+ // Wait for first frame until returning or give up after 2 seconds.
+ caspar::timer timeout_timer;
+
+ while (frame_buffer_.size() < 1 && timeout_timer.elapsed() < 2.0)
+ boost::this_thread::sleep_for(boost::chrono::milliseconds(1));
+
+ CASPAR_LOG(info) << print() << L" Initialized";
+ }
+
+ ~decklink_producer()
+ {
+ if(input_ != nullptr)
+ {
+ input_->StopStreams();
+ input_->DisableVideoInput();
+ }
+ }
+
+ core::constraints& pixel_constraints()
+ {
+ return constraints_;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface (REFIID, LPVOID*) {return E_NOINTERFACE;}
+ virtual ULONG STDMETHODCALLTYPE AddRef () {return 1;}
+ virtual ULONG STDMETHODCALLTYPE Release () {return 1;}
+
+ virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(BMDVideoInputFormatChangedEvents /*notificationEvents*/, IDeckLinkDisplayMode* newDisplayMode, BMDDetectedVideoInputFormatFlags /*detectedSignalFlags*/)
+ {
+ return S_OK;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame* video, IDeckLinkAudioInputPacket* audio)
+ {
+ ensure_gpf_handler_installed_for_thread("decklink-VideoInputFrameArrived");
+ if(!video)
+ return S_OK;
+
+ try
+ {
+ graph_->set_value("tick-time", tick_timer_.elapsed()*out_format_desc_.fps*0.5);
+ tick_timer_.restart();
+
+ caspar::timer frame_timer;
+
+ // Video
+
+ void* video_bytes = nullptr;
+ if(FAILED(video->GetBytes(&video_bytes)) || !video_bytes)
+ return S_OK;
+
+ auto video_frame = ffmpeg::create_frame();
+
+ video_frame->data[0] = reinterpret_cast<uint8_t*>(video_bytes);
+ video_frame->linesize[0] = video->GetRowBytes();
+ video_frame->format = AVPixelFormat::AV_PIX_FMT_UYVY422;
+ video_frame->width = video->GetWidth();
+ video_frame->height = video->GetHeight();
+ video_frame->interlaced_frame = in_format_desc_.field_mode != core::field_mode::progressive;
+ video_frame->top_field_first = in_format_desc_.field_mode == core::field_mode::upper ? 1 : 0;
+ video_frame->key_frame = 1;
+
+ monitor_subject_
+ << core::monitor::message("/file/name") % model_name_
+ << core::monitor::message("/file/path") % device_index_
+ << core::monitor::message("/file/video/width") % video->GetWidth()
+ << core::monitor::message("/file/video/height") % video->GetHeight()
+ << core::monitor::message("/file/video/field") % u8(!video_frame->interlaced_frame ? "progressive" : (video_frame->top_field_first ? "upper" : "lower"))
+ << core::monitor::message("/file/audio/sample-rate") % 48000
+ << core::monitor::message("/file/audio/channels") % 2
+ << core::monitor::message("/file/audio/format") % u8(av_get_sample_fmt_name(AV_SAMPLE_FMT_S32))
+ << core::monitor::message("/file/fps") % in_format_desc_.fps;
+
+ // Audio
+
+ std::shared_ptr<core::mutable_audio_buffer> audio_buffer;
+ void* audio_bytes = nullptr;
+
+ // It is assumed that audio is always equal or ahead of video.
+ if (audio && SUCCEEDED(audio->GetBytes(&audio_bytes)) && audio_bytes)
+ {
+ auto sample_frame_count = audio->GetSampleFrameCount();
+ auto audio_data = reinterpret_cast<int32_t*>(audio_bytes);
+
+ audio_buffer = std::make_shared<core::mutable_audio_buffer>(
+ audio_data,
+ audio_data + sample_frame_count * channel_layout_.num_channels);
+ }
+ else
+ audio_buffer = std::make_shared<core::mutable_audio_buffer>(audio_cadence_.front() * channel_layout_.num_channels, 0);
+
+ // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
+ // This cadence fills the audio mixer most optimally.
+
+ sync_buffer_.push_back(audio_buffer->size() / channel_layout_.num_channels);
+ if(!boost::range::equal(sync_buffer_, audio_cadence_))
+ {
+ CASPAR_LOG(trace) << print() << L" Syncing audio. Expected cadence: " << to_string(audio_cadence_) << L" Got cadence: " << to_string(sync_buffer_);
+ return S_OK;
+ }
+ boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1);
+
+ // PUSH
+
+ muxer_.push({ audio_buffer });
+ muxer_.push(static_cast<std::shared_ptr<AVFrame>>(video_frame));
+
+ // POLL
+
+ for (auto frame = muxer_.poll(); frame != core::draw_frame::empty(); frame = muxer_.poll())
+ {
+ if (!frame_buffer_.try_push(frame))
+ {
+ auto dummy = core::draw_frame::empty();
+ frame_buffer_.try_pop(dummy);
+
+ frame_buffer_.try_push(frame);
+
+ graph_->set_tag(diagnostics::tag_severity::WARNING, "dropped-frame");
+ }
+ }
+
+ graph_->set_value("frame-time", frame_timer.elapsed()*out_format_desc_.fps*0.5);
+ monitor_subject_ << core::monitor::message("/profiler/time") % frame_timer.elapsed() % out_format_desc_.fps;
+
+ graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity()));
+ monitor_subject_ << core::monitor::message("/buffer") % frame_buffer_.size() % frame_buffer_.capacity();
+ }
+ catch(...)
+ {
+ exception_ = std::current_exception();
+ return E_FAIL;
+ }
+
+ return S_OK;
+ }
+
+ core::draw_frame get_frame()
+ {
+ if(exception_ != nullptr)
+ std::rethrow_exception(exception_);
+
+ core::draw_frame frame = last_frame_;
+
+ if (!frame_buffer_.try_pop(frame))
+ graph_->set_tag(diagnostics::tag_severity::WARNING, "late-frame");
+ else
+ last_frame_ = frame;
+
+ graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size()) / static_cast<float>(frame_buffer_.capacity()));
+
+ return frame;
+ }
+
+ std::wstring print() const
+ {
+ return model_name_ + L" [" + boost::lexical_cast<std::wstring>(device_index_) + L"|" + in_format_desc_.name + L"]";
+ }
+
+ boost::rational<int> get_out_framerate() const
+ {
+ return muxer_.out_framerate();
+ }
+
+ core::monitor::subject& monitor_output()
+ {
+ return monitor_subject_;
+ }
+};
+
+class decklink_producer_proxy : public core::frame_producer_base
+{
+ std::unique_ptr<decklink_producer> producer_;
+ const uint32_t length_;
+ executor executor_;
+public:
+ explicit decklink_producer_proxy(
+ const core::video_format_desc& in_format_desc,
+ const spl::shared_ptr<core::frame_factory>& frame_factory,
+ const core::video_format_desc& out_format_desc,
+ const core::audio_channel_layout& channel_layout,
+ int device_index,
+ const std::wstring& filter_str,
+ uint32_t length)
+ : executor_(L"decklink_producer[" + boost::lexical_cast<std::wstring>(device_index) + L"]")
+ , length_(length)
+ {
+ auto ctx = core::diagnostics::call_context::for_thread();
+ executor_.invoke([=]
+ {
+ core::diagnostics::call_context::for_thread() = ctx;
+ com_initialize();
+ producer_.reset(new decklink_producer(in_format_desc, device_index, frame_factory, out_format_desc, channel_layout, filter_str));
+ });
+ }
+
+ ~decklink_producer_proxy()
+ {
+ executor_.invoke([=]
+ {
+ producer_.reset();
+ com_uninitialize();
+ });
+ }
+
+ core::monitor::subject& monitor_output()
+ {
+ return producer_->monitor_output();
+ }
+
+ // frame_producer
+
+ core::draw_frame receive_impl() override
+ {
+ return producer_->get_frame();
+ }
+
+ core::constraints& pixel_constraints() override
+ {
+ return producer_->pixel_constraints();
+ }
+
+ uint32_t nb_frames() const override
+ {
+ return length_;
+ }
+
+ std::wstring print() const override
+ {
+ return producer_->print();
+ }
+
+ std::wstring name() const override
+ {
+ return L"decklink";
+ }
+
+ boost::property_tree::wptree info() const override
+ {
+ boost::property_tree::wptree info;
+ info.add(L"type", L"decklink");
+ return info;
+ }
+
+ boost::rational<int> get_out_framerate() const
+ {
+ return producer_->get_out_framerate();
+ }
+};
+
+void describe_producer(core::help_sink& sink, const core::help_repository& repo)
+{
+ sink.short_description(L"Allows video sources to be input from BlackMagic Design cards.");
+ sink.syntax(L"DECKLINK [device:int],DEVICE [device:int] {FILTER [filter:string]} {LENGTH [length:int]} {FORMAT [format:string]} {CHANNEL_LAYOUT [channel_layout:string]}");
+ sink.para()->text(L"Allows video sources to be input from BlackMagic Design cards. Parameters:");
+ sink.definitions()
+ ->item(L"device", L"The decklink device to stream the input from. See the Blackmagic control panel for the order of devices in your system.")
+ ->item(L"filter", L"If specified, sets an FFmpeg video filter to use.")
+ ->item(L"length", L"Optionally specify a limit on how many frames to produce.")
+ ->item(L"format", L"Specifies what video format to expect on the incoming SDI/HDMI signal. If not specified the video format of the channel is assumed.")
+ ->item(L"channel_layout", L"Specifies what audio channel layout to expect on the incoming SDI/HDMI signal. If not specified, stereo is assumed.");
+ sink.para()->text(L"Examples:");
+ sink.example(L">> PLAY 1-10 DECKLINK DEVICE 2", L"Play using decklink device 2 expecting the video signal to have the same video format as the channel.");
+ sink.example(L">> PLAY 1-10 DECKLINK DEVICE 2 FORMAT PAL FILTER yadif=1:-1", L"Play using decklink device 2 expecting the video signal to be in PAL and deinterlace it.");
+ sink.example(L">> PLAY 1-10 DECKLINK DEVICE 2 LENGTH 1000", L"Play using decklink device 2 but only produce 1000 frames.");
+ sink.example(L">> PLAY 1-10 DECKLINK DEVICE 2 CHANNEL_LAYOUT smpte", L"Play using decklink device 2 and expect smpte surround sound.");
+}
+
+spl::shared_ptr<core::frame_producer> create_producer(const core::frame_producer_dependencies& dependencies, const std::vector<std::wstring>& params)
+{
+ if(params.empty() || !boost::iequals(params.at(0), "decklink"))
+ return core::frame_producer::empty();
+
+ auto device_index = get_param(L"DEVICE", params, -1);
+ if(device_index == -1)
+ device_index = boost::lexical_cast<int>(params.at(1));
+
+ auto filter_str = get_param(L"FILTER", params);
+ auto length = get_param(L"LENGTH", params, std::numeric_limits<uint32_t>::max());
+ auto in_format_desc = core::video_format_desc(get_param(L"FORMAT", params, L"INVALID"));
+
+ if(in_format_desc.format == core::video_format::invalid)
+ in_format_desc = dependencies.format_desc;
+
+ auto channel_layout_spec = get_param(L"CHANNEL_LAYOUT", params);
+ auto channel_layout = *core::audio_channel_layout_repository::get_default()->get_layout(L"stereo");
+
+ if (!channel_layout_spec.empty())
+ {
+ auto found_layout = core::audio_channel_layout_repository::get_default()->get_layout(channel_layout_spec);
+
+ if (!found_layout)
+ CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Channel layout not found."));
+
+ channel_layout = *found_layout;
+ }
+
+ boost::ireplace_all(filter_str, L"DEINTERLACE_BOB", L"YADIF=1:-1");
+ boost::ireplace_all(filter_str, L"DEINTERLACE_LQ", L"SEPARATEFIELDS");
+ boost::ireplace_all(filter_str, L"DEINTERLACE", L"YADIF=0:-1");
+
+ auto producer = spl::make_shared<decklink_producer_proxy>(
+ in_format_desc,
+ dependencies.frame_factory,
+ dependencies.format_desc,
+ channel_layout,
+ device_index,
+ filter_str,
+ length);
+
+ auto get_source_framerate = [=] { return producer->get_out_framerate(); };
+ auto target_framerate = dependencies.format_desc.framerate;
+
+ return core::create_destroy_proxy(core::create_framerate_producer(
+ producer,
+ get_source_framerate,
+ target_framerate,
+ dependencies.format_desc.field_mode,
+ dependencies.format_desc.audio_cadence));
+}
+}}