2 * copyright (c) 2010 Sveriges Television AB <info@casparcg.com>
\r
4 * This file is part of CasparCG.
\r
6 * CasparCG is free software: you can redistribute it and/or modify
\r
7 * it under the terms of the GNU General Public License as published by
\r
8 * the Free Software Foundation, either version 3 of the License, or
\r
9 * (at your option) any later version.
\r
11 * CasparCG is distributed in the hope that it will be useful,
\r
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
\r
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
\r
14 * GNU General Public License for more details.
\r
16 * You should have received a copy of the GNU General Public License
\r
17 * along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
\r
21 #include "../stdafx.h"
\r
23 #include "decklink_producer.h"
\r
25 #include "../interop/DeckLinkAPI_h.h"
\r
26 #include "../util/util.h"
\r
28 #include "../../ffmpeg/producer/filter/filter.h"
\r
29 #include "../../ffmpeg/producer/util.h"
\r
31 #include <common/diagnostics/graph.h>
\r
32 #include <common/concurrency/com_context.h>
\r
33 #include <common/exception/exceptions.h>
\r
34 #include <common/memory/memclr.h>
\r
36 #include <core/producer/frame/frame_factory.h>
\r
37 #include <core/mixer/write_frame.h>
\r
38 #include <core/producer/frame/audio_transform.h>
\r
40 #include <tbb/concurrent_queue.h>
\r
41 #include <tbb/atomic.h>
\r
43 #include <boost/algorithm/string.hpp>
\r
44 #include <boost/timer.hpp>
\r
46 #if defined(_MSC_VER)
\r
47 #pragma warning (push)
\r
48 #pragma warning (disable : 4244)
\r
52 #define __STDC_CONSTANT_MACROS
\r
53 #define __STDC_LIMIT_MACROS
\r
54 #include <libavcodec/avcodec.h>
\r
56 #if defined(_MSC_VER)
\r
57 #pragma warning (pop)
\r
60 #pragma warning(push)
\r
61 #pragma warning(disable : 4996)
\r
63 #include <atlbase.h>
\r
66 #include <atlhost.h>
\r
68 #pragma warning(push)
\r
70 #include <functional>
\r
76 std::unique_ptr<filter> filter_;
\r
77 safe_ptr<core::frame_factory> frame_factory_;
\r
80 frame_filter(const std::string& filter_str, const safe_ptr<core::frame_factory>& frame_factory)
\r
81 : filter_(filter_str.empty() ? nullptr : new filter(filter_str))
\r
82 , frame_factory_(frame_factory)
\r
86 std::vector<safe_ptr<core::basic_frame>> execute(const safe_ptr<core::write_frame>& input_frame)
\r
88 std::vector<safe_ptr<core::basic_frame>> result;
\r
92 input_frame->commit();
\r
93 result.push_back(input_frame);
\r
97 auto desc = input_frame->get_pixel_format_desc();
\r
99 auto av_frame = as_av_frame(input_frame);
\r
101 filter_->push(av_frame);
\r
102 auto buffer = filter_->poll();
\r
104 if(buffer.size() == 2)
\r
106 auto frame1 = make_write_frame(this, buffer[0], frame_factory_);
\r
107 auto frame2 = make_write_frame(this, buffer[1], frame_factory_);
\r
108 frame1->audio_data() = std::move(input_frame->audio_data());
\r
110 if(frame_factory_->get_video_format_desc().mode == core::video_mode::progressive)
\r
112 frame2->audio_data().insert(frame2->audio_data().begin(), frame1->audio_data().begin() + frame1->audio_data().size()/2, frame1->audio_data().end());
\r
113 frame1->audio_data().erase(frame1->audio_data().begin() + frame1->audio_data().size()/2, frame1->audio_data().end());
\r
114 result.push_back(frame1);
\r
115 result.push_back(frame2);
\r
119 frame2->get_audio_transform().set_has_audio(false);
\r
120 result.push_back(core::basic_frame::interlace(frame1, frame2, frame_factory_->get_video_format_desc().mode));
\r
123 else if(buffer.size() > 0)
\r
125 auto frame1 = make_write_frame(this, buffer[0], frame_factory_);
\r
126 frame1->audio_data() = std::move(input_frame->audio_data());
\r
127 result.push_back(frame1);
\r
136 class decklink_producer : public IDeckLinkInputCallback
\r
138 CComPtr<IDeckLink> decklink_;
\r
139 CComQIPtr<IDeckLinkInput> input_;
\r
141 const std::wstring model_name_;
\r
142 const core::video_format_desc format_desc_;
\r
143 const size_t device_index_;
\r
145 std::shared_ptr<diagnostics::graph> graph_;
\r
146 boost::timer tick_timer_;
\r
147 boost::timer frame_timer_;
\r
149 std::vector<short> audio_data_;
\r
151 safe_ptr<core::frame_factory> frame_factory_;
\r
153 tbb::concurrent_bounded_queue<safe_ptr<core::basic_frame>> frame_buffer_;
\r
154 safe_ptr<core::basic_frame> tail_;
\r
156 std::exception_ptr exception_;
\r
157 frame_filter filter_;
\r
160 decklink_producer(const core::video_format_desc& format_desc, size_t device_index, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter_str)
\r
161 : decklink_(get_device(device_index))
\r
162 , input_(decklink_)
\r
163 , model_name_(get_model_name(decklink_))
\r
164 , format_desc_(format_desc)
\r
165 , device_index_(device_index)
\r
166 , frame_factory_(frame_factory)
\r
167 , tail_(core::basic_frame::empty())
\r
168 , filter_(narrow(filter_str), frame_factory_)
\r
170 frame_buffer_.set_capacity(2);
\r
172 graph_ = diagnostics::create_graph(boost::bind(&decklink_producer::print, this));
\r
173 graph_->add_guide("tick-time", 0.5);
\r
174 graph_->set_color("tick-time", diagnostics::color(0.1f, 0.7f, 0.8f));
\r
175 graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f));
\r
176 graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));
\r
177 graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f));
\r
178 graph_->set_color("output-buffer", diagnostics::color(0.0f, 1.0f, 0.0f));
\r
180 auto display_mode = get_display_mode(input_, format_desc_.format, bmdFormat8BitYUV, bmdVideoInputFlagDefault);
\r
182 // NOTE: bmdFormat8BitARGB is currently not supported by any decklink card. (2011-05-08)
\r
183 if(FAILED(input_->EnableVideoInput(display_mode, bmdFormat8BitYUV, 0)))
\r
184 BOOST_THROW_EXCEPTION(caspar_exception()
\r
185 << msg_info(narrow(print()) + " Could not enable video input.")
\r
186 << boost::errinfo_api_function("EnableVideoInput"));
\r
188 if(FAILED(input_->EnableAudioInput(bmdAudioSampleRate48kHz, bmdAudioSampleType16bitInteger, 2)))
\r
189 BOOST_THROW_EXCEPTION(caspar_exception()
\r
190 << msg_info(narrow(print()) + " Could not enable audio input.")
\r
191 << boost::errinfo_api_function("EnableAudioInput"));
\r
193 if (FAILED(input_->SetCallback(this)) != S_OK)
\r
194 BOOST_THROW_EXCEPTION(caspar_exception()
\r
195 << msg_info(narrow(print()) + " Failed to set input callback.")
\r
196 << boost::errinfo_api_function("SetCallback"));
\r
198 if(FAILED(input_->StartStreams()))
\r
199 BOOST_THROW_EXCEPTION(caspar_exception()
\r
200 << msg_info(narrow(print()) + " Failed to start input stream.")
\r
201 << boost::errinfo_api_function("StartStreams"));
\r
203 CASPAR_LOG(info) << print() << L" Successfully Initialized.";
\r
206 ~decklink_producer()
\r
208 if(input_ != nullptr)
\r
210 input_->StopStreams();
\r
211 input_->DisableVideoInput();
\r
215 virtual HRESULT STDMETHODCALLTYPE QueryInterface (REFIID, LPVOID*) {return E_NOINTERFACE;}
\r
216 virtual ULONG STDMETHODCALLTYPE AddRef () {return 1;}
\r
217 virtual ULONG STDMETHODCALLTYPE Release () {return 1;}
\r
219 virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(BMDVideoInputFormatChangedEvents /*notificationEvents*/, IDeckLinkDisplayMode* newDisplayMode, BMDDetectedVideoInputFormatFlags /*detectedSignalFlags*/)
\r
224 virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame* video, IDeckLinkAudioInputPacket* audio)
\r
231 auto result = core::basic_frame::empty();
\r
233 graph_->update_value("tick-time", tick_timer_.elapsed()*format_desc_.fps*0.5);
\r
234 tick_timer_.restart();
\r
236 frame_timer_.restart();
\r
238 core::pixel_format_desc desc;
\r
239 desc.pix_fmt = core::pixel_format::ycbcr;
\r
240 desc.planes.push_back(core::pixel_format_desc::plane(video->GetWidth(), video->GetHeight(), 1));
\r
241 desc.planes.push_back(core::pixel_format_desc::plane(video->GetWidth()/2, video->GetHeight(), 1));
\r
242 desc.planes.push_back(core::pixel_format_desc::plane(video->GetWidth()/2, video->GetHeight(), 1));
\r
243 auto frame = frame_factory_->create_frame(this, desc);
\r
245 void* bytes = nullptr;
\r
246 if(FAILED(video->GetBytes(&bytes)) || !bytes)
\r
249 unsigned char* data = reinterpret_cast<unsigned char*>(bytes);
\r
250 const size_t frame_size = (format_desc_.width * 16 / 8) * format_desc_.height;
\r
252 // Convert to planar YUV422
\r
253 unsigned char* y = frame->image_data(0).begin();
\r
254 unsigned char* cb = frame->image_data(1).begin();
\r
255 unsigned char* cr = frame->image_data(2).begin();
\r
257 tbb::parallel_for(tbb::blocked_range<size_t>(0, frame_size/4), [&](const tbb::blocked_range<size_t>& r)
\r
259 for(auto n = r.begin(); n != r.end(); ++n)
\r
261 cb[n] = data[n*4+0];
\r
262 y [n*2+0] = data[n*4+1];
\r
263 cr[n] = data[n*4+2];
\r
264 y [n*2+1] = data[n*4+3];
\r
267 frame->set_type(format_desc_.mode);
\r
269 // It is assumed that audio is always equal or ahead of video.
\r
270 if(audio && SUCCEEDED(audio->GetBytes(&bytes)))
\r
272 const size_t audio_samples = static_cast<size_t>(48000.0 / format_desc_.fps);
\r
273 const size_t audio_nchannels = 2;
\r
275 auto sample_frame_count = audio->GetSampleFrameCount();
\r
276 auto audio_data = reinterpret_cast<short*>(bytes);
\r
277 audio_data_.insert(audio_data_.end(), audio_data, audio_data + sample_frame_count*2);
\r
279 if(audio_data_.size() > audio_samples*audio_nchannels)
\r
281 frame->audio_data() = std::vector<short>(audio_data_.begin(), audio_data_.begin() + audio_samples*audio_nchannels);
\r
282 audio_data_.erase(audio_data_.begin(), audio_data_.begin() + audio_samples*audio_nchannels);
\r
286 auto frames = filter_.execute(frame);
\r
288 for(size_t n = 0; n < frames.size(); ++n)
\r
290 if(!frame_buffer_.try_push(frames[n]))
\r
291 graph_->add_tag("dropped-frame");
\r
294 graph_->update_value("frame-time", frame_timer_.elapsed()*format_desc_.fps*0.5);
\r
296 graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity()));
\r
300 exception_ = std::current_exception();
\r
307 safe_ptr<core::basic_frame> get_frame()
\r
309 if(exception_ != nullptr)
\r
310 std::rethrow_exception(exception_);
\r
312 if(!frame_buffer_.try_pop(tail_))
\r
313 graph_->add_tag("late-frame");
\r
314 graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity()));
\r
318 std::wstring print() const
\r
320 return model_name_ + L" [" + boost::lexical_cast<std::wstring>(device_index_) + L"]";
\r
324 class decklink_producer_proxy : public core::frame_producer
\r
326 com_context<decklink_producer> context_;
\r
329 explicit decklink_producer_proxy(const safe_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, size_t device_index, const std::wstring& filter_str = L"")
\r
330 : context_(L"decklink_producer[" + boost::lexical_cast<std::wstring>(device_index) + L"]")
\r
332 context_.reset([&]{return new decklink_producer(format_desc, device_index, frame_factory, filter_str);});
\r
335 virtual safe_ptr<core::basic_frame> receive()
\r
337 return context_->get_frame();
\r
340 std::wstring print() const
\r
342 return context_->print();
\r
346 safe_ptr<core::frame_producer> create_decklink_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::vector<std::wstring>& params)
\r
348 if(params.empty() || !boost::iequals(params[0], "decklink"))
\r
349 return core::frame_producer::empty();
\r
351 size_t device_index = 1;
\r
352 if(params.size() > 1)
\r
353 device_index = lexical_cast_or_default(params[1], 1);
\r
355 core::video_format_desc format_desc = core::video_format_desc::get(L"PAL");
\r
356 if(params.size() > 2)
\r
358 auto desc = core::video_format_desc::get(params[2]);
\r
359 if(desc.format != core::video_format::invalid)
\r
360 format_desc = desc;
\r
363 std::wstring filter_str = L"";
\r
365 auto filter_it = std::find(params.begin(), params.end(), L"FILTER");
\r
366 if(filter_it != params.end())
\r
368 if(++filter_it != params.end())
\r
369 filter_str = *filter_it;
\r
372 return make_safe<decklink_producer_proxy>(frame_factory, format_desc, device_index, filter_str);
\r