#include "../interop/DeckLinkAPI_h.h"\r
#include "../util/util.h"\r
\r
+#include "../../ffmpeg/producer/filter/filter.h"\r
+#include "../../ffmpeg/producer/util.h"\r
+#include "../../ffmpeg/producer/frame_muxer.h"\r
+\r
+#include <common/log/log.h>\r
#include <common/diagnostics/graph.h>\r
#include <common/concurrency/com_context.h>\r
#include <common/exception/exceptions.h>\r
#include <common/memory/memclr.h>\r
-#include <common/utility/timer.h>\r
\r
+#include <core/mixer/write_frame.h>\r
+#include <core/producer/frame/frame_transform.h>\r
#include <core/producer/frame/frame_factory.h>\r
-#include <core/producer/frame/write_frame.h>\r
\r
#include <tbb/concurrent_queue.h>\r
-#include <tbb/atomic.h>\r
\r
#include <boost/algorithm/string.hpp>\r
+#include <boost/foreach.hpp>\r
#include <boost/timer.hpp>\r
\r
+#if defined(_MSC_VER)\r
+#pragma warning (push)\r
+#pragma warning (disable : 4244)\r
+#endif\r
+extern "C" \r
+{\r
+ #define __STDC_CONSTANT_MACROS\r
+ #define __STDC_LIMIT_MACROS\r
+ #include <libavcodec/avcodec.h>\r
+}\r
+#if defined(_MSC_VER)\r
+#pragma warning (pop)\r
+#endif\r
+\r
#pragma warning(push)\r
#pragma warning(disable : 4996)\r
\r
\r
#include <functional>\r
\r
-namespace caspar { \r
-\r
-class decklink_producer : public IDeckLinkInputCallback\r
+namespace caspar { namespace decklink {\r
+ \r
+class decklink_producer : boost::noncopyable, public IDeckLinkInputCallback\r
{ \r
- CComPtr<IDeckLink> decklink_;\r
- CComQIPtr<IDeckLinkInput> input_;\r
- \r
- const std::wstring model_name_;\r
- const core::video_format_desc format_desc_;\r
- const size_t device_index_;\r
-\r
- std::shared_ptr<diagnostics::graph> graph_;\r
- boost::timer perf_timer_;\r
+ CComPtr<IDeckLink> decklink_;\r
+ CComQIPtr<IDeckLinkInput> input_;\r
\r
- std::vector<short> audio_data_;\r
+ const std::wstring model_name_;\r
+ const core::video_format_desc format_desc_;\r
+ const size_t device_index_;\r
\r
- std::shared_ptr<core::frame_factory> frame_factory_;\r
+ std::shared_ptr<diagnostics::graph> graph_;\r
+ boost::timer tick_timer_;\r
+ boost::timer frame_timer_;\r
+ \r
+ safe_ptr<core::frame_factory> frame_factory_;\r
\r
- tbb::concurrent_bounded_queue<safe_ptr<core::basic_frame>> frame_buffer_;\r
- safe_ptr<core::basic_frame> tail_;\r
+ tbb::concurrent_bounded_queue<safe_ptr<core::basic_frame>> frame_buffer_;\r
\r
- std::exception_ptr exception_;\r
+ std::exception_ptr exception_;\r
+ ffmpeg::filter filter_;\r
+ \r
+ ffmpeg::frame_muxer muxer_;\r
\r
public:\r
- decklink_producer(const core::video_format_desc& format_desc, size_t device_index, const std::shared_ptr<core::frame_factory>& frame_factory)\r
+ decklink_producer(const core::video_format_desc& format_desc, size_t device_index, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter)\r
: decklink_(get_device(device_index))\r
, input_(decklink_)\r
, model_name_(get_model_name(decklink_))\r
, format_desc_(format_desc)\r
, device_index_(device_index)\r
, frame_factory_(frame_factory)\r
- , tail_(core::basic_frame::empty())\r
+ , filter_(filter)\r
+ , muxer_(ffmpeg::double_rate(filter) ? format_desc.fps * 2.0 : format_desc.fps, frame_factory)\r
{\r
frame_buffer_.set_capacity(2);\r
\r
graph_ = diagnostics::create_graph(boost::bind(&decklink_producer::print, this));\r
graph_->add_guide("tick-time", 0.5);\r
- graph_->set_color("tick-time", diagnostics::color(0.1f, 0.7f, 0.8f));\r
+ graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f)); \r
graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f));\r
+ graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f));\r
graph_->set_color("output-buffer", diagnostics::color(0.0f, 1.0f, 0.0f));\r
\r
- auto display_mode = get_display_mode(input_, format_desc_.format);\r
- if(!display_mode) \r
- BOOST_THROW_EXCEPTION(caspar_exception() << msg_info(narrow(print()) + " Card does not support requested videoformat."));\r
+ auto display_mode = get_display_mode(input_, format_desc_.format, bmdFormat8BitYUV, bmdVideoInputFlagDefault);\r
\r
- // NOTE: For some reason the code below fails even for PAL.\r
- //BMDDisplayModeSupport displayModeSupport;\r
- //if(FAILED(input_->DoesSupportVideoMode(display_mode->GetDisplayMode(), bmdFormat8BitBGRA, bmdVideoOutputFlagDefault, &displayModeSupport, nullptr)) || displayModeSupport == bmdDisplayModeNotSupported)\r
- // BOOST_THROW_EXCEPTION(caspar_exception() << msg_info(narrow(print()) + " Card does not support requested videoformat."));\r
- //else if(displayModeSupport == bmdDisplayModeSupportedWithConversion)\r
- // CASPAR_LOG(warning) << print() << " Display mode is supported with conversion.";\r
-\r
// NOTE: bmdFormat8BitARGB is currently not supported by any decklink card. (2011-05-08)\r
- if(FAILED(input_->EnableVideoInput(display_mode->GetDisplayMode(), bmdFormat8BitYUV, 0))) \r
- BOOST_THROW_EXCEPTION(caspar_exception() << msg_info(narrow(print()) + " Could not enable video input."));\r
-\r
- if(FAILED(input_->EnableAudioInput(bmdAudioSampleRate48kHz, bmdAudioSampleType16bitInteger, 2))) \r
- BOOST_THROW_EXCEPTION(caspar_exception() << msg_info(narrow(print()) + " Could not enable audio input."));\r
+ if(FAILED(input_->EnableVideoInput(display_mode, bmdFormat8BitYUV, 0))) \r
+ BOOST_THROW_EXCEPTION(caspar_exception() \r
+ << msg_info(narrow(print()) + " Could not enable video input.")\r
+ << boost::errinfo_api_function("EnableVideoInput"));\r
+\r
+ if(FAILED(input_->EnableAudioInput(bmdAudioSampleRate48kHz, bmdAudioSampleType32bitInteger, format_desc_.audio_channels))) \r
+ BOOST_THROW_EXCEPTION(caspar_exception() \r
+ << msg_info(narrow(print()) + " Could not enable audio input.")\r
+ << boost::errinfo_api_function("EnableAudioInput"));\r
\r
if (FAILED(input_->SetCallback(this)) != S_OK)\r
- BOOST_THROW_EXCEPTION(caspar_exception() << msg_info(narrow(print()) + " Failed to set input callback."));\r
+ BOOST_THROW_EXCEPTION(caspar_exception() \r
+ << msg_info(narrow(print()) + " Failed to set input callback.")\r
+ << boost::errinfo_api_function("SetCallback"));\r
\r
if(FAILED(input_->StartStreams()))\r
- BOOST_THROW_EXCEPTION(caspar_exception() << msg_info(narrow(print()) + " Failed to start input stream."));\r
+ BOOST_THROW_EXCEPTION(caspar_exception() \r
+ << msg_info(narrow(print()) + " Failed to start input stream.")\r
+ << boost::errinfo_api_function("StartStreams"));\r
\r
- CASPAR_LOG(info) << print() << " successfully initialized decklink for " << format_desc_.name;\r
+ CASPAR_LOG(info) << print() << L" Successfully Initialized.";\r
}\r
\r
~decklink_producer()\r
\r
virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame* video, IDeckLinkAudioInputPacket* audio)\r
{ \r
+ if(!video)\r
+ return S_OK;\r
+\r
try\r
{\r
- graph_->update_value("tick-time", static_cast<float>(perf_timer_.elapsed()/format_desc_.interval*0.5));\r
- perf_timer_.restart();\r
- \r
- core::pixel_format_desc desc;\r
- desc.pix_fmt = core::pixel_format::ycbcr;\r
- desc.planes.push_back(core::pixel_format_desc::plane(format_desc_.width, format_desc_.height, 1));\r
- desc.planes.push_back(core::pixel_format_desc::plane(format_desc_.width/2, format_desc_.height, 1));\r
- desc.planes.push_back(core::pixel_format_desc::plane(format_desc_.width/2, format_desc_.height, 1)); \r
- auto frame = frame_factory_->create_frame(this, desc);\r
- \r
- void* bytes = nullptr;\r
- video->GetBytes(&bytes);\r
- unsigned char* data = reinterpret_cast<unsigned char*>(bytes);\r
- const size_t frame_size = (format_desc_.width * 16 / 8) * format_desc_.height;\r
-\r
- // Convert to planar YUV422\r
- unsigned char* y = frame->image_data(0).begin();\r
- unsigned char* cb = frame->image_data(1).begin();\r
- unsigned char* cr = frame->image_data(2).begin();\r
- \r
- tbb::parallel_for(tbb::blocked_range<size_t>(0, frame_size/4), [&](const tbb::blocked_range<size_t>& r)\r
- {\r
- for(auto n = r.begin(); n != r.end(); ++n)\r
- {\r
- cb[n] = data[n*4+0];\r
- y [n*2+0] = data[n*4+1];\r
- cr[n] = data[n*4+2];\r
- y [n*2+1] = data[n*4+3];\r
- }\r
- });\r
+ graph_->update_value("tick-time", tick_timer_.elapsed()*format_desc_.fps*0.5);\r
+ tick_timer_.restart();\r
\r
+ frame_timer_.restart();\r
+\r
+ void* bytes = nullptr;\r
+ if(FAILED(video->GetBytes(&bytes)) || !bytes)\r
+ return S_OK;\r
+ \r
+ safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free); \r
+ avcodec_get_frame_defaults(av_frame.get());\r
+ \r
+ av_frame->data[0] = reinterpret_cast<uint8_t*>(bytes);\r
+ av_frame->linesize[0] = video->GetRowBytes(); \r
+ av_frame->format = PIX_FMT_UYVY422;\r
+ av_frame->width = video->GetWidth();\r
+ av_frame->height = video->GetHeight();\r
+ av_frame->interlaced_frame = format_desc_.field_mode != core::field_mode::progressive;\r
+ av_frame->top_field_first = format_desc_.field_mode == core::field_mode::upper ? 1 : 0;\r
+ \r
+ BOOST_FOREACH(auto& av_frame2, filter_.execute(av_frame))\r
+ muxer_.push(av_frame2); \r
+ \r
// It is assumed that audio is always equal or ahead of video.\r
if(audio && SUCCEEDED(audio->GetBytes(&bytes)))\r
{\r
- const size_t audio_samples = static_cast<size_t>(48000.0 / format_desc_.fps);\r
- const size_t audio_nchannels = 2;\r
-\r
auto sample_frame_count = audio->GetSampleFrameCount();\r
- auto audio_data = reinterpret_cast<short*>(bytes);\r
- audio_data_.insert(audio_data_.end(), audio_data, audio_data + sample_frame_count*2);\r
-\r
- if(audio_data_.size() > audio_samples*audio_nchannels)\r
- {\r
- frame->audio_data() = std::vector<short>(audio_data_.begin(), audio_data_.begin() + audio_samples*audio_nchannels);\r
- audio_data_.erase(audio_data_.begin(), audio_data_.begin() + audio_samples*audio_nchannels);\r
- }\r
+ auto audio_data = reinterpret_cast<int32_t*>(bytes);\r
+ muxer_.push(std::make_shared<core::audio_buffer>(audio_data, audio_data + sample_frame_count*format_desc_.audio_channels));\r
+ }\r
+ else\r
+ muxer_.push(std::make_shared<core::audio_buffer>(frame_factory_->get_video_format_desc().audio_samples_per_frame, 0));\r
+ \r
+ muxer_.commit();\r
+\r
+ while(!muxer_.empty())\r
+ {\r
+ if(!frame_buffer_.try_push(muxer_.pop()))\r
+ graph_->add_tag("dropped-frame");\r
}\r
\r
- if(!frame_buffer_.try_push(frame))\r
- graph_->add_tag("dropped-frame");\r
+ graph_->update_value("frame-time", frame_timer_.elapsed()*format_desc_.fps*0.5);\r
+\r
graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity())); \r
}\r
catch(...)\r
if(exception_ != nullptr)\r
std::rethrow_exception(exception_);\r
\r
- if(!frame_buffer_.try_pop(tail_))\r
+ safe_ptr<core::basic_frame> frame = core::basic_frame::late();\r
+ if(!frame_buffer_.try_pop(frame))\r
graph_->add_tag("late-frame");\r
graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity())); \r
- return tail_;\r
+ return frame;\r
}\r
\r
std::wstring print() const\r
\r
class decklink_producer_proxy : public core::frame_producer\r
{ \r
- com_context<decklink_producer> context_;\r
+ safe_ptr<core::basic_frame> last_frame_;\r
+ com_context<decklink_producer> context_;\r
+ const int64_t length_;\r
public:\r
\r
- explicit decklink_producer_proxy(const safe_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, size_t device_index)\r
+ explicit decklink_producer_proxy(const safe_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, size_t device_index, const std::wstring& filter_str, int64_t length)\r
: context_(L"decklink_producer[" + boost::lexical_cast<std::wstring>(device_index) + L"]")\r
+ , last_frame_(core::basic_frame::empty())\r
+ , length_(length)\r
+ {\r
+ context_.reset([&]{return new decklink_producer(format_desc, device_index, frame_factory, filter_str);}); \r
+ }\r
+\r
+ ~decklink_producer_proxy()\r
{\r
- context_.reset([&]{return new decklink_producer(format_desc, device_index, frame_factory);}); \r
+ auto str = print();\r
+ context_.reset();\r
+ CASPAR_LOG(info) << str << L" Successfully Uninitialized."; \r
}\r
\r
- virtual safe_ptr<core::basic_frame> receive()\r
+ virtual safe_ptr<core::basic_frame> receive(int)\r
{\r
- return context_->get_frame();\r
+ auto frame = context_->get_frame();\r
+ if(frame != core::basic_frame::late())\r
+ last_frame_ = frame;\r
+ return frame;\r
+ }\r
+\r
+ virtual safe_ptr<core::basic_frame> last_frame() const\r
+ {\r
+ return disable_audio(last_frame_);\r
+ }\r
+ \r
+ virtual int64_t nb_frames() const \r
+ {\r
+ return length_;\r
}\r
\r
std::wstring print() const\r
}\r
};\r
\r
-safe_ptr<core::frame_producer> create_decklink_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::vector<std::wstring>& params)\r
+safe_ptr<core::frame_producer> create_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::vector<std::wstring>& params)\r
{\r
if(params.empty() || !boost::iequals(params[0], "decklink"))\r
return core::frame_producer::empty();\r
\r
- size_t device_index = 1;\r
- if(params.size() > 1)\r
- device_index = lexical_cast_or_default(params[1], 1);\r
+ auto device_index = core::get_param(L"DEVICE", params, 1);\r
+ auto filter_str = core::get_param<std::wstring>(L"FILTER", params, L""); \r
+ auto length = core::get_param(L"LENGTH", params, std::numeric_limits<int64_t>::max()); \r
+ \r
+ boost::replace_all(filter_str, L"DEINTERLACE", L"YADIF=0:-1");\r
+ boost::replace_all(filter_str, L"DEINTERLACE_BOB", L"YADIF=1:-1");\r
\r
- core::video_format_desc format_desc = core::video_format_desc::get(L"PAL");\r
- if(params.size() > 2)\r
- {\r
- auto desc = core::video_format_desc::get(params[2]);\r
- if(desc.format != core::video_format::invalid)\r
- format_desc = desc;\r
- }\r
+ auto format_desc = core::video_format_desc::get(core::get_param<std::wstring>(L"FORMAT", params, L"INVALID"));\r
\r
- return make_safe<decklink_producer_proxy>(frame_factory, format_desc, device_index);\r
+ if(format_desc.format == core::video_format::invalid)\r
+ format_desc = frame_factory->get_video_format_desc();\r
+ \r
+ return make_safe<decklink_producer_proxy>(frame_factory, format_desc, device_index, filter_str, length);\r
}\r
\r
-}
\ No newline at end of file
+}}
\ No newline at end of file