* Author: Robert Nagy, ronag89@gmail.com
*/
-#include "../stdafx.h"
+#include "../StdAfx.h"
#include "decklink_producer.h"
-#include "../interop/DeckLinkAPI_h.h"
#include "../util/util.h"
#include "../../ffmpeg/producer/filter/filter.h"
#include <common/except.h>
#include <common/log.h>
#include <common/param.h>
+#include <common/timer.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/frame/frame.h>
#include <core/frame/draw_frame.h>
#include <core/frame/frame_transform.h>
#include <core/frame/frame_factory.h>
+#include <core/producer/frame_producer.h>
#include <core/monitor/monitor.h>
+#include <core/diagnostics/call_context.h>
#include <core/mixer/audio/audio_mixer.h>
+#include <core/help/help_repository.h>
+#include <core/help/help_sink.h>
#include <tbb/concurrent_queue.h>
#include <boost/algorithm/string.hpp>
-#include <boost/foreach.hpp>
#include <boost/property_tree/ptree.hpp>
-#include <boost/timer.hpp>
+#include <boost/range/adaptor/transformed.hpp>
#if defined(_MSC_VER)
#pragma warning (push)
#pragma warning (pop)
#endif
-#pragma warning(push)
-#pragma warning(disable : 4996)
-
- #include <atlbase.h>
-
- #include <atlcom.h>
- #include <atlhost.h>
-
-#pragma warning(push)
+#include "../decklink_api.h"
#include <functional>
namespace caspar { namespace decklink {
+
+core::audio_channel_layout get_adjusted_channel_layout(core::audio_channel_layout layout)
+{
+ if (layout.num_channels <= 2)
+ layout.num_channels = 2;
+ else if (layout.num_channels <= 8)
+ layout.num_channels = 8;
+ else
+ layout.num_channels = 16;
+
+ return layout;
+}
+
+template <typename T>
+std::wstring to_string(const T& cadence)
+{
+ return boost::join(cadence | boost::adaptors::transformed([](size_t i) { return boost::lexical_cast<std::wstring>(i); }), L", ");
+}
class decklink_producer : boost::noncopyable, public IDeckLinkInputCallback
{
- monitor::basic_subject event_subject_;
+ const int device_index_;
+ core::monitor::subject monitor_subject_;
spl::shared_ptr<diagnostics::graph> graph_;
- boost::timer tick_timer_;
+ caspar::timer tick_timer_;
- CComPtr<IDeckLink> decklink_;
- CComQIPtr<IDeckLinkInput> input_;
- CComQIPtr<IDeckLinkAttributes > attributes_;
+ com_ptr<IDeckLink> decklink_ = get_device(device_index_);
+ com_iface_ptr<IDeckLinkInput> input_ = iface_cast<IDeckLinkInput>(decklink_);
+ com_iface_ptr<IDeckLinkAttributes> attributes_ = iface_cast<IDeckLinkAttributes>(decklink_);
- const std::wstring model_name_;
- const int device_index_;
+ const std::wstring model_name_ = get_model_name(decklink_);
const std::wstring filter_;
- std::vector<int> audio_cadence_;
- boost::circular_buffer<size_t> sync_buffer_;
- ffmpeg::frame_muxer muxer_;
-
- spl::shared_ptr<core::frame_factory> frame_factory_;
core::video_format_desc in_format_desc_;
core::video_format_desc out_format_desc_;
+ std::vector<int> audio_cadence_ = out_format_desc_.audio_cadence;
+ boost::circular_buffer<size_t> sync_buffer_ { audio_cadence_.size() };
+ spl::shared_ptr<core::frame_factory> frame_factory_;
+ core::audio_channel_layout channel_layout_;
+ ffmpeg::frame_muxer muxer_ { in_format_desc_.fps, frame_factory_, out_format_desc_, channel_layout_, filter_ };
+
+ core::constraints constraints_ { in_format_desc_.width, in_format_desc_.height };
tbb::concurrent_bounded_queue<core::draw_frame> frame_buffer_;
- std::exception_ptr exception_;
+ std::exception_ptr exception_;
public:
- decklink_producer(const core::video_format_desc& in_format_desc,
- int device_index,
- const spl::shared_ptr<core::frame_factory>& frame_factory,
- const core::video_format_desc& out_format_desc,
- const std::wstring& filter)
- : decklink_(get_device(device_index))
- , input_(decklink_)
- , attributes_(decklink_)
- , model_name_(get_model_name(decklink_))
- , device_index_(device_index)
+ decklink_producer(
+ const core::video_format_desc& in_format_desc,
+ int device_index,
+ const spl::shared_ptr<core::frame_factory>& frame_factory,
+ const core::video_format_desc& out_format_desc,
+ const core::audio_channel_layout& channel_layout,
+ const std::wstring& filter)
+ : device_index_(device_index)
, filter_(filter)
, in_format_desc_(in_format_desc)
, out_format_desc_(out_format_desc)
- , muxer_(in_format_desc.fps, frame_factory, out_format_desc, filter)
- , audio_cadence_(out_format_desc.audio_cadence)
- , sync_buffer_(out_format_desc.audio_cadence.size())
, frame_factory_(frame_factory)
- {
+ , channel_layout_(get_adjusted_channel_layout(channel_layout))
+ {
frame_buffer_.set_capacity(2);
- graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));
+ graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));
graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f));
graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));
graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f));
graph_->set_text(print());
diagnostics::register_graph(graph_);
- auto display_mode = get_display_mode(input_, in_format_desc.format, bmdFormat8BitYUV, bmdVideoInputFlagDefault);
+ bool will_attempt_dma;
+ auto display_mode = get_display_mode(input_, in_format_desc.format, bmdFormat8BitYUV, bmdVideoInputFlagDefault, will_attempt_dma);
// NOTE: bmdFormat8BitARGB is currently not supported by any decklink card. (2011-05-08)
if(FAILED(input_->EnableVideoInput(display_mode, bmdFormat8BitYUV, 0)))
<< msg_info(print() + L" Could not enable video input.")
<< boost::errinfo_api_function("EnableVideoInput"));
- if(FAILED(input_->EnableAudioInput(bmdAudioSampleRate48kHz, bmdAudioSampleType32bitInteger, static_cast<int>(in_format_desc.audio_channels))))
- CASPAR_THROW_EXCEPTION(caspar_exception()
+ if(FAILED(input_->EnableAudioInput(bmdAudioSampleRate48kHz, bmdAudioSampleType32bitInteger, static_cast<int>(channel_layout_.num_channels))))
+ CASPAR_THROW_EXCEPTION(caspar_exception()
<< msg_info(print() + L" Could not enable audio input.")
<< boost::errinfo_api_function("EnableAudioInput"));
}
}
+ core::constraints& pixel_constraints()
+ {
+ return constraints_;
+ }
+
virtual HRESULT STDMETHODCALLTYPE QueryInterface (REFIID, LPVOID*) {return E_NOINTERFACE;}
virtual ULONG STDMETHODCALLTYPE AddRef () {return 1;}
virtual ULONG STDMETHODCALLTYPE Release () {return 1;}
graph_->set_value("tick-time", tick_timer_.elapsed()*out_format_desc_.fps*0.5);
tick_timer_.restart();
- boost::timer frame_timer;
+ caspar::timer frame_timer;
// Video
video_frame->interlaced_frame = in_format_desc_.field_mode != core::field_mode::progressive;
video_frame->top_field_first = in_format_desc_.field_mode == core::field_mode::upper ? 1 : 0;
- event_subject_ << monitor::event("file/name") % model_name_
- << monitor::event("file/path") % device_index_
- << monitor::event("file/video/width") % video->GetWidth()
- << monitor::event("file/video/height") % video->GetHeight()
- << monitor::event("file/video/field") % u8(!video_frame->interlaced_frame ? "progressive" : (video_frame->top_field_first ? "upper" : "lower"))
- << monitor::event("file/audio/sample-rate") % 48000
- << monitor::event("file/audio/channels") % 2
- << monitor::event("file/audio/format") % u8(av_get_sample_fmt_name(AV_SAMPLE_FMT_S32))
- << monitor::event("file/fps") % in_format_desc_.fps;
+ monitor_subject_
+ << core::monitor::message("/file/name") % model_name_
+ << core::monitor::message("/file/path") % device_index_
+ << core::monitor::message("/file/video/width") % video->GetWidth()
+ << core::monitor::message("/file/video/height") % video->GetHeight()
+ << core::monitor::message("/file/video/field") % u8(!video_frame->interlaced_frame ? "progressive" : (video_frame->top_field_first ? "upper" : "lower"))
+ << core::monitor::message("/file/audio/sample-rate") % 48000
+ << core::monitor::message("/file/audio/channels") % 2
+ << core::monitor::message("/file/audio/format") % u8(av_get_sample_fmt_name(AV_SAMPLE_FMT_S32))
+ << core::monitor::message("/file/fps") % in_format_desc_.fps;
// Audio
- std::shared_ptr<core::audio_buffer> audio_buffer;
-
- void* audio_bytes = nullptr;
- if(FAILED(audio->GetBytes(&audio_bytes)) || !audio_bytes)
- return S_OK;
-
auto audio_frame = ffmpeg::create_frame();
+ audio_frame->format = AV_SAMPLE_FMT_S32;
+ core::mutable_audio_buffer audio_buf;
- audio_frame->data[0] = reinterpret_cast<uint8_t*>(audio_bytes);
- audio_frame->linesize[0] = audio->GetSampleFrameCount()*out_format_desc_.audio_channels*sizeof(int32_t);
- audio_frame->nb_samples = audio->GetSampleFrameCount();
- audio_frame->format = AV_SAMPLE_FMT_S32;
+ if (audio)
+ {
+ void* audio_bytes = nullptr;
+ if (FAILED(audio->GetBytes(&audio_bytes)) || !audio_bytes)
+ return S_OK;
+
+
+ audio_frame->data[0] = reinterpret_cast<uint8_t*>(audio_bytes);
+ audio_frame->linesize[0] = audio->GetSampleFrameCount() * channel_layout_.num_channels * sizeof(int32_t);
+ audio_frame->nb_samples = audio->GetSampleFrameCount();
+ }
+ else
+ {
+ audio_buf.resize(audio_cadence_.front() * channel_layout_.num_channels, 0);
+ audio_frame->data[0] = reinterpret_cast<uint8_t*>(audio_buf.data());
+ audio_frame->linesize[0] = audio_cadence_.front() * channel_layout_.num_channels * sizeof(int32_t);
+ audio_frame->nb_samples = audio_cadence_.front();
+ }
// Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
// This cadence fills the audio mixer most optimally.
- sync_buffer_.push_back(audio->GetSampleFrameCount()*out_format_desc_.audio_channels);
+ sync_buffer_.push_back(audio_frame->nb_samples);
if(!boost::range::equal(sync_buffer_, audio_cadence_))
{
- CASPAR_LOG(trace) << print() << L" Syncing audio.";
+ CASPAR_LOG(trace) << print() << L" Syncing audio. Expected cadence: " << to_string(audio_cadence_) << L" Got cadence: " << to_string(sync_buffer_);
return S_OK;
}
boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1);
// PUSH
- muxer_.push_video(video_frame);
- muxer_.push_audio(audio_frame);
+ muxer_.push_video(video_frame);
+ muxer_.push_audio(audio_frame);
// POLL
frame_buffer_.try_pop(dummy);
frame_buffer_.try_push(frame);
- graph_->set_tag("dropped-frame");
+ graph_->set_tag(diagnostics::tag_severity::WARNING, "dropped-frame");
}
}
graph_->set_value("frame-time", frame_timer.elapsed()*out_format_desc_.fps*0.5);
- event_subject_ << monitor::event("profiler/time") % frame_timer.elapsed() % out_format_desc_.fps;
+ monitor_subject_ << core::monitor::message("/profiler/time") % frame_timer.elapsed() % out_format_desc_.fps;
graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity()));
- event_subject_ << monitor::event("buffer") % frame_buffer_.size() % frame_buffer_.capacity();
+ monitor_subject_ << core::monitor::message("/buffer") % frame_buffer_.size() % frame_buffer_.capacity();
}
catch(...)
{
core::draw_frame frame = core::draw_frame::late();
if(!frame_buffer_.try_pop(frame))
- graph_->set_tag("late-frame");
+ graph_->set_tag(diagnostics::tag_severity::WARNING, "late-frame");
graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity()));
return frame;
}
return model_name_ + L" [" + boost::lexical_cast<std::wstring>(device_index_) + L"|" + in_format_desc_.name + L"]";
}
- void subscribe(const monitor::observable::observer_ptr& o)
+ core::monitor::subject& monitor_output()
{
- event_subject_.subscribe(o);
- }
-
- void unsubscribe(const monitor::observable::observer_ptr& o)
- {
- event_subject_.unsubscribe(o);
+ return monitor_subject_;
}
};
const uint32_t length_;
executor executor_;
public:
- explicit decklink_producer_proxy(const core::video_format_desc& in_format_desc,
- const spl::shared_ptr<core::frame_factory>& frame_factory,
- const core::video_format_desc& out_format_desc,
- int device_index,
- const std::wstring& filter_str, uint32_t length)
+ explicit decklink_producer_proxy(
+ const core::video_format_desc& in_format_desc,
+ const spl::shared_ptr<core::frame_factory>& frame_factory,
+ const core::video_format_desc& out_format_desc,
+ const core::audio_channel_layout& channel_layout,
+ int device_index,
+ const std::wstring& filter_str,
+ uint32_t length)
: executor_(L"decklink_producer[" + boost::lexical_cast<std::wstring>(device_index) + L"]")
, length_(length)
{
+ auto ctx = core::diagnostics::call_context::for_thread();
executor_.invoke([=]
{
- CoInitialize(nullptr);
- producer_.reset(new decklink_producer(in_format_desc, device_index, frame_factory, out_format_desc, filter_str));
+ core::diagnostics::call_context::for_thread() = ctx;
+ com_initialize();
+ producer_.reset(new decklink_producer(in_format_desc, device_index, frame_factory, out_format_desc, channel_layout, filter_str));
});
}
executor_.invoke([=]
{
producer_.reset();
- CoUninitialize();
+ com_uninitialize();
});
}
- void subscribe(const monitor::observable::observer_ptr& o) override
- {
- producer_->subscribe(o);
- }
-
- void unsubscribe(const monitor::observable::observer_ptr& o) override
+ core::monitor::subject& monitor_output()
{
- producer_->unsubscribe(o);
+ return producer_->monitor_output();
}
// frame_producer
{
return producer_->get_frame();
}
+
+ core::constraints& pixel_constraints() override
+ {
+ return producer_->pixel_constraints();
+ }
uint32_t nb_frames() const override
{
}
};
-spl::shared_ptr<core::frame_producer> create_producer(const spl::shared_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& out_format_desc, const std::vector<std::wstring>& params)
+void describe_producer(core::help_sink& sink, const core::help_repository& repo)
+{
+ sink.short_description(L"Allows video sources to be input from BlackMagic Design cards.");
+ sink.syntax(L"DECKLINK [device:int],DEVICE [device:int] {FILTER [filter:string]} {LENGTH [length:int]} {FORMAT [format:string]} {CHANNEL_LAYOUT [channel_layout:string]}");
+ sink.para()->text(L"Allows video sources to be input from BlackMagic Design cards. Parameters:");
+ sink.definitions()
+ ->item(L"device", L"The decklink device to stream the input from. See the Blackmagic control panel for the order of devices in your system.")
+ ->item(L"filter", L"If specified, sets an FFmpeg video filter to use.")
+ ->item(L"length", L"Optionally specify a limit on how many frames to produce.")
+ ->item(L"format", L"Specifies what video format to expect on the incoming SDI/HDMI signal. If not specified the video format of the channel is assumed.")
+ ->item(L"channel_layout", L"Specifies what audio channel layout to expect on the incoming SDI/HDMI signal. If not specified, stereo is assumed.");
+ sink.para()->text(L"Examples:");
+ sink.example(L">> PLAY 1-10 DECKLINK DEVICE 2", L"Play using decklink device 2 expecting the video signal to have the same video format as the channel.");
+ sink.example(L">> PLAY 1-10 DECKLINK DEVICE 2 FORMAT PAL FILTER yadif=1:-1", L"Play using decklink device 2 expecting the video signal to be in PAL and deinterlace it.");
+ sink.example(L">> PLAY 1-10 DECKLINK DEVICE 2 LENGTH 1000", L"Play using decklink device 2 but only produce 1000 frames.");
+ sink.example(L">> PLAY 1-10 DECKLINK DEVICE 2 CHANNEL_LAYOUT smpte", L"Play using decklink device 2 and expect smpte surround sound.");
+}
+
+spl::shared_ptr<core::frame_producer> create_producer(const core::frame_producer_dependencies& dependencies, const std::vector<std::wstring>& params)
{
- if(params.empty() || !boost::iequals(params[0], "decklink"))
+ if(params.empty() || !boost::iequals(params.at(0), "decklink"))
return core::frame_producer::empty();
auto device_index = get_param(L"DEVICE", params, -1);
auto in_format_desc = core::video_format_desc(get_param(L"FORMAT", params, L"INVALID"));
if(in_format_desc.format == core::video_format::invalid)
- in_format_desc = out_format_desc;
+ in_format_desc = dependencies.format_desc;
+
+ auto channel_layout_spec = get_param(L"CHANNEL_LAYOUT", params);
+ auto channel_layout = *core::audio_channel_layout_repository::get_default()->get_layout(L"stereo");
+
+ if (!channel_layout_spec.empty())
+ {
+ auto found_layout = core::audio_channel_layout_repository::get_default()->get_layout(channel_layout_spec);
+
+ if (!found_layout)
+ CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Channel layout not found."));
+
+ channel_layout = *found_layout;
+ }
- return create_destroy_proxy(spl::make_shared<decklink_producer_proxy>(in_format_desc, frame_factory, out_format_desc, device_index, filter_str, length));
+ return create_destroy_proxy(spl::make_shared<decklink_producer_proxy>(
+ in_format_desc,
+ dependencies.frame_factory,
+ dependencies.format_desc,
+ channel_layout,
+ device_index,
+ filter_str,
+ length));
}
-}}
\ No newline at end of file
+}}