audio_data_.push_back(std::vector<int16_t>()); // One frame delay\r
}\r
\r
- void begin(const core::basic_frame& frame)\r
+ void begin(core::basic_frame& frame)\r
{\r
transform_stack_.push(transform_stack_.top()*frame.get_audio_transform());\r
}\r
};\r
\r
audio_mixer::audio_mixer() : impl_(new implementation()){}\r
-void audio_mixer::begin(const core::basic_frame& frame){impl_->begin(frame);}\r
+void audio_mixer::begin(core::basic_frame& frame){impl_->begin(frame);}\r
void audio_mixer::visit(core::write_frame& frame){impl_->visit(frame);}\r
void audio_mixer::end(){impl_->end();}\r
std::vector<int16_t> audio_mixer::mix(){return impl_->mix();}\r
public:\r
audio_mixer();\r
\r
- virtual void begin(const core::basic_frame& frame);\r
+ virtual void begin(core::basic_frame& frame);\r
virtual void visit(core::write_frame& frame);\r
virtual void end();\r
\r
});\r
}\r
\r
- void begin(const core::basic_frame& frame)\r
+ void begin(core::basic_frame& frame)\r
{\r
transform_stack_.push(transform_stack_.top()*frame.get_image_transform());\r
}\r
};\r
\r
image_mixer::image_mixer(video_channel_context& video_channel) : impl_(new implementation(video_channel)){}\r
-void image_mixer::begin(const core::basic_frame& frame){impl_->begin(frame);}\r
+void image_mixer::begin(core::basic_frame& frame){impl_->begin(frame);}\r
void image_mixer::visit(core::write_frame& frame){impl_->visit(frame);}\r
void image_mixer::end(){impl_->end();}\r
safe_ptr<host_buffer> image_mixer::render(){return impl_->render();}\r
public:\r
image_mixer(video_channel_context& context);\r
\r
- virtual void begin(const core::basic_frame& frame);\r
+ virtual void begin(core::basic_frame& frame);\r
virtual void visit(core::write_frame& frame);\r
virtual void end();\r
\r
\r
struct write_frame::implementation : boost::noncopyable\r
{ \r
- ogl_device& ogl_;\r
- std::vector<safe_ptr<host_buffer>> buffers_;\r
- std::vector<safe_ptr<device_buffer>> textures_;\r
- std::vector<int16_t> audio_data_;\r
- const core::pixel_format_desc desc_;\r
- int tag_;\r
- bool is_interlaced_;\r
+ ogl_device& ogl_;\r
+ std::vector<std::shared_ptr<host_buffer>> buffers_;\r
+ std::vector<safe_ptr<device_buffer>> textures_;\r
+ std::vector<int16_t> audio_data_;\r
+ const core::pixel_format_desc desc_;\r
+ int tag_;\r
+ core::video_mode::type mode_;\r
\r
implementation(ogl_device& ogl, int tag, const core::pixel_format_desc& desc) \r
: ogl_(ogl)\r
, desc_(desc)\r
, tag_(tag)\r
- , is_interlaced_(false)\r
+ , mode_(core::video_mode::progressive)\r
{\r
ogl_.invoke([&]\r
{\r
if(plane_index >= buffers_.size())\r
return;\r
\r
- auto texture = textures_[plane_index];\r
auto buffer = std::move(buffers_[plane_index]); // Release buffer once done.\r
\r
+ if(!buffer)\r
+ return;\r
+\r
+ auto texture = textures_[plane_index];\r
+\r
ogl_.begin_invoke([=]\r
{\r
texture->read(*buffer);\r
const std::vector<safe_ptr<device_buffer>>& write_frame::get_textures() const{return impl_->textures_;}\r
void write_frame::commit(size_t plane_index){impl_->commit(plane_index);}\r
void write_frame::commit(){impl_->commit();}\r
-void write_frame::set_is_interlaced(bool value){impl_->is_interlaced_ = true;}\r
-bool write_frame::get_is_interlaced() const{return impl_->is_interlaced_;}\r
+void write_frame::set_type(const video_mode::type& mode){impl_->mode_ = mode;}\r
+core::video_mode::type write_frame::get_type() const{return impl_->mode_;}\r
\r
}}
\ No newline at end of file
#include <common/memory/safe_ptr.h>\r
\r
#include <core/producer/frame/basic_frame.h>\r
+#include <core/video_format.h>\r
\r
#include <boost/noncopyable.hpp>\r
#include <boost/range/iterator_range.hpp>\r
void commit(uint32_t plane_index);\r
void commit();\r
\r
- void set_is_interlaced(bool value);\r
- bool get_is_interlaced() const;\r
+ void set_type(const core::video_mode::type& mode);\r
+ core::video_mode::type get_type() const;\r
\r
virtual void accept(core::frame_visitor& visitor);\r
\r
implementation(const safe_ptr<basic_frame>& frame) \r
{ frames_.push_back(frame);}\r
\r
- void accept(const basic_frame& self, frame_visitor& visitor)\r
+ void accept(basic_frame& self, frame_visitor& visitor)\r
{\r
visitor.begin(self);\r
BOOST_FOREACH(auto frame, frames_)\r
\r
struct frame_visitor\r
{\r
- virtual void begin(const basic_frame& frame) = 0;\r
+ virtual void begin(basic_frame& frame) = 0;\r
virtual void end() = 0;\r
virtual void visit(write_frame& frame) = 0;\r
};\r
\r
#include <core/producer/frame/frame_factory.h>\r
#include <core/mixer/write_frame.h>\r
+#include <core/producer/frame/audio_transform.h>\r
\r
#include <tbb/concurrent_queue.h>\r
#include <tbb/atomic.h>\r
+#include <tbb/task_group.h>\r
\r
#include <boost/algorithm/string.hpp>\r
#include <boost/timer.hpp>\r
#include <functional>\r
\r
namespace caspar { \r
+\r
+class frame_filter\r
+{\r
+ std::unique_ptr<filter> filter_;\r
+ safe_ptr<core::frame_factory> frame_factory_;\r
+ std::deque<std::vector<int16_t>> audio_buffer_;\r
+ tbb::task_group task_group_;\r
+\r
+ std::vector<safe_ptr<AVFrame>> buffer_;\r
+\r
+public:\r
+ frame_filter(const std::string& filter_str, const safe_ptr<core::frame_factory>& frame_factory) \r
+ : filter_(filter_str.empty() ? nullptr : new filter(filter_str))\r
+ , frame_factory_(frame_factory)\r
+ {\r
+ }\r
+\r
+ bool execute(const safe_ptr<core::write_frame>& input_frame, safe_ptr<core::basic_frame>& output_frame)\r
+ { \r
+ if(!filter_)\r
+ {\r
+ input_frame->commit();\r
+ output_frame = input_frame;\r
+ return true;\r
+ }\r
+ \r
+ auto desc = input_frame->get_pixel_format_desc();\r
+\r
+ auto av_frame = as_av_frame(input_frame);\r
+\r
+ audio_buffer_.push_back(std::move(input_frame->audio_data()));\r
+ \r
+ task_group_.wait();\r
+\r
+ filter_->push(av_frame); \r
+ \r
+ bool result = try_pop(output_frame);\r
+\r
+ task_group_.run([this]\r
+ {\r
+ buffer_ = filter_->poll();\r
+ });\r
+\r
+ return result; \r
+ }\r
+\r
+private: \r
+\r
+ bool try_pop(safe_ptr<core::basic_frame>& output)\r
+ {\r
+ if(buffer_.empty())\r
+ return false;\r
+\r
+ auto audio_data = std::move(audio_buffer_.front());\r
+ audio_buffer_.pop_back();\r
+\r
+ if(buffer_.size() == 2)\r
+ {\r
+ auto frame1 = make_write_frame(this, buffer_[0], frame_factory_);\r
+ auto frame2 = make_write_frame(this, buffer_[1], frame_factory_);\r
+ frame1->audio_data() = std::move(audio_data);\r
+ frame2->get_audio_transform().set_has_audio(false);\r
+ output = core::basic_frame::interlace(frame1, frame2, frame_factory_->get_video_format_desc().mode);\r
+ }\r
+ else if(buffer_.size() > 0)\r
+ {\r
+ auto frame1 = make_write_frame(this, buffer_[0], frame_factory_);\r
+ frame1->audio_data() = std::move(audio_data);\r
+ output = frame1;\r
+ }\r
+ buffer_.clear();\r
+\r
+ return true;\r
+ }\r
+};\r
\r
class decklink_producer : public IDeckLinkInputCallback\r
{ \r
\r
std::vector<short> audio_data_;\r
\r
- std::shared_ptr<core::frame_factory> frame_factory_;\r
+ safe_ptr<core::frame_factory> frame_factory_;\r
\r
tbb::concurrent_bounded_queue<safe_ptr<core::basic_frame>> frame_buffer_;\r
safe_ptr<core::basic_frame> tail_;\r
\r
std::exception_ptr exception_;\r
- std::shared_ptr<filter> filter_;\r
+ frame_filter filter_;\r
\r
public:\r
- decklink_producer(const core::video_format_desc& format_desc, size_t device_index, const std::shared_ptr<core::frame_factory>& frame_factory, const std::wstring& filter_str)\r
+ decklink_producer(const core::video_format_desc& format_desc, size_t device_index, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter_str)\r
: decklink_(get_device(device_index))\r
, input_(decklink_)\r
, model_name_(get_model_name(decklink_))\r
, device_index_(device_index)\r
, frame_factory_(frame_factory)\r
, tail_(core::basic_frame::empty())\r
- , filter_(filter_str.empty() ? nullptr : new filter(narrow(filter_str)))\r
+ , filter_(narrow(filter_str), frame_factory_)\r
{\r
frame_buffer_.set_capacity(2);\r
\r
\r
virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame* video, IDeckLinkAudioInputPacket* audio)\r
{ \r
- if(!video)\r
+ if(!video || video->GetWidth() != static_cast<int>(format_desc_.width) || video->GetHeight() != static_cast<int>(format_desc_.height))\r
return S_OK;\r
\r
try\r
y [n*2+1] = data[n*4+3];\r
}\r
});\r
+ frame->set_type(format_desc_.mode);\r
\r
// It is assumed that audio is always equal or ahead of video.\r
if(audio && SUCCEEDED(audio->GetBytes(&bytes)))\r
audio_data_.erase(audio_data_.begin(), audio_data_.begin() + audio_samples*audio_nchannels);\r
}\r
}\r
-\r
- if(filter_)\r
- {\r
- auto desc = frame->get_pixel_format_desc();\r
-\r
- safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free); \r
- avcodec_get_frame_defaults(av_frame.get());\r
-\r
- for(size_t n = 0; n < desc.planes.size(); ++n)\r
- { \r
- av_frame->data[n] = frame->image_data(n).begin();\r
- av_frame->linesize[n] = desc.planes[n].width;\r
- }\r
-\r
- av_frame->format = get_ffmpeg_pixel_format(desc);\r
- av_frame->width = desc.planes[0].width;\r
- av_frame->height = desc.planes[0].height;\r
-\r
- if(format_desc_.mode != core::video_mode::progressive)\r
- {\r
- av_frame->interlaced_frame = 1;\r
- av_frame->top_field_first = format_desc_.mode == core::video_mode::upper ? 1 : 0;\r
- }\r
-\r
- filter_->push(av_frame);\r
- auto frames = filter_->poll();\r
-\r
- if(frames.size() == 2)\r
- {\r
- auto frame1 = make_write_frame(this, frames[0], make_safe(frame_factory_));\r
- auto frame2 = make_write_frame(this, frames[1], make_safe(frame_factory_));\r
- frame1->audio_data() = std::move(frame->audio_data());\r
- result = core::basic_frame::interlace(frame1, frame2, frame_factory_->get_video_format_desc().mode);\r
- }\r
- else if(frames.size() > 0)\r
- {\r
- auto frame1 = make_write_frame(this, frames[0], make_safe(frame_factory_));\r
- frame1->audio_data() = std::move(frame->audio_data());\r
- result = frame1;\r
- }\r
- }\r
- else\r
- {\r
- frame->commit();\r
- frame->set_is_interlaced(format_desc_.mode != core::video_mode::progressive);\r
- result = frame;\r
- }\r
-\r
+ \r
+ filter_.execute(frame, result); \r
+ \r
if(!frame_buffer_.try_push(result))\r
graph_->add_tag("dropped-frame");\r
\r
\r
namespace caspar { \r
\r
-namespace internal {\r
-\r
static BMDDisplayMode get_decklink_video_format(core::video_format::type fmt) \r
{\r
switch(fmt)\r
}\r
}\r
\r
+static core::video_format::type get_caspar_video_format(BMDDisplayMode fmt) \r
+{\r
+ switch(fmt)\r
+ {\r
+ case bmdModePAL: return core::video_format::pal; \r
+ case bmdModeNTSC: return core::video_format::ntsc; \r
+ case bmdModeHD720p50: return core::video_format::x720p5000; \r
+ case bmdModeHD720p5994: return core::video_format::x720p5994; \r
+ case bmdModeHD720p60: return core::video_format::x720p6000; \r
+ case bmdModeHD1080p2398: return core::video_format::x1080p2397; \r
+ case bmdModeHD1080p24: return core::video_format::x1080p2400; \r
+ case bmdModeHD1080i50: return core::video_format::x1080i5000; \r
+ case bmdModeHD1080i5994: return core::video_format::x1080i5994; \r
+ case bmdModeHD1080i6000: return core::video_format::x1080i6000; \r
+ case bmdModeHD1080p25: return core::video_format::x1080p2500; \r
+ case bmdModeHD1080p2997: return core::video_format::x1080p2997; \r
+ case bmdModeHD1080p30: return core::video_format::x1080p3000; \r
+ default: return core::video_format::invalid; \r
+ }\r
+}\r
+\r
template<typename T, typename F>\r
BMDDisplayMode get_display_mode(const T& device, BMDDisplayMode format, BMDPixelFormat pix_fmt, F flag)\r
{\r
return mode->GetDisplayMode();\r
}\r
\r
-} \r
-\r
template<typename T, typename F>\r
static BMDDisplayMode get_display_mode(const T& device, core::video_format::type fmt, BMDPixelFormat pix_fmt, F flag)\r
{ \r
- return internal::get_display_mode(device, internal::get_decklink_video_format(fmt), pix_fmt, flag);\r
+ return get_display_mode(device, get_decklink_video_format(fmt), pix_fmt, flag);\r
}\r
\r
template<typename T>\r
return PIX_FMT_NONE;\r
}\r
\r
+static safe_ptr<AVFrame> as_av_frame(const safe_ptr<core::write_frame>& frame)\r
+{\r
+ auto desc = frame->get_pixel_format_desc();\r
+ safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free); \r
+ avcodec_get_frame_defaults(av_frame.get());\r
+\r
+ for(size_t n = 0; n < desc.planes.size(); ++n)\r
+ { \r
+ av_frame->data[n] = frame->image_data(n).begin();\r
+ av_frame->linesize[n] = desc.planes[n].width;\r
+ }\r
+\r
+ av_frame->format = get_ffmpeg_pixel_format(desc);\r
+ av_frame->width = desc.planes[0].width;\r
+ av_frame->height = desc.planes[0].height;\r
+\r
+ if(frame->get_type() != core::video_mode::progressive)\r
+ {\r
+ av_frame->interlaced_frame = 1;\r
+ av_frame->top_field_first = frame->get_type() == core::video_mode::upper ? 1 : 0;\r
+ }\r
+\r
+ return av_frame;\r
+}\r
+\r
static core::pixel_format_desc get_pixel_format_desc(PixelFormat pix_fmt, size_t width, size_t height)\r
{\r
// Get linesizes\r
auto desc = get_pixel_format_desc(pix_fmt, width, height);\r
\r
auto write = frame_factory->create_frame(tag, desc.pix_fmt != core::pixel_format::invalid ? desc : get_pixel_format_desc(PIX_FMT_BGRA, width, height));\r
- write->set_is_interlaced(decoded_frame->interlaced_frame != 0);\r
+ if(decoded_frame->interlaced_frame)\r
+ write->set_type(decoded_frame->top_field_first ? core::video_mode::upper : core::video_mode::lower);\r
+ else\r
+ write->set_type(core::video_mode::progressive);\r
\r
if(desc.pix_fmt == core::pixel_format::invalid)\r
{\r