\r
stream_sink->locked_backend()->set_formatter(\r
boost::log::formatters::wstream\r
- << L"[" << boost::log::formatters::attr<boost::log::attributes::current_thread_id::held_type >(traits_t::thread_id_attr_name())\r
- << L"] [" << boost::log::formatters::attr<severity_level >(boost::log::sources::aux::severity_attribute_name<wchar_t>::get())\r
- << L"] " << boost::log::formatters::message<wchar_t>()\r
+ //<< L"[" << boost::log::formatters::date_time<std::tm>(L"TimeStamp") << L"] "\r
+ << L"[" << boost::log::formatters::attr<boost::log::attributes::current_thread_id::held_type >(L"ThreadID") << L"] "\r
+ << L"[" << boost::log::formatters::attr<severity_level>(boost::log::sources::aux::severity_attribute_name<wchar_t>::get()) << L"] "\r
+ << boost::log::formatters::message<wchar_t>()\r
);\r
\r
boost::log::wcore::get()->add_sink(stream_sink);\r
\r
file_sink->locked_backend()->set_formatter(\r
boost::log::formatters::wstream\r
- << boost::log::formatters::attr<unsigned int>(traits_t::line_id_attr_name())\r
- << L" [" << boost::log::formatters::date_time< posix_time::ptime >(traits_t::time_stamp_attr_name())\r
- << L"] [" << boost::log::formatters::attr<boost::log::attributes::current_thread_id::held_type >(traits_t::thread_id_attr_name())\r
- << L"] [" << boost::log::formatters::attr<severity_level>(boost::log::sources::aux::severity_attribute_name<wchar_t>::get())\r
- << L"] " << boost::log::formatters::message<wchar_t>()\r
+ //<< L"[" << boost::log::formatters::date_time<std::tm>(L"TimeStamp") << L"] "\r
+ << L"[" << boost::log::formatters::attr<boost::log::attributes::current_thread_id::held_type >(L"ThreadID") << L"] "\r
+ << L"[" << boost::log::formatters::attr<severity_level>(boost::log::sources::aux::severity_attribute_name<wchar_t>::get()) << L"] "\r
+ << boost::log::formatters::message<wchar_t>()\r
);\r
\r
file_sink->set_filter(boost::log::filters::attr<severity_level>(boost::log::sources::aux::severity_attribute_name<wchar_t>::get()) >= info);\r
default_latency\r
};\r
\r
+enum output_pixels\r
+{\r
+ fill_and_key,\r
+ fill_only,\r
+ key_only\r
+};\r
+\r
struct configuration\r
{\r
size_t device_index;\r
bool embedded_audio;\r
key keyer;\r
latency latency;\r
+ output_pixels output;\r
\r
configuration()\r
: device_index(1)\r
, embedded_audio(false)\r
, keyer(default_key)\r
- , latency(default_latency){}\r
+ , latency(default_latency)\r
+ , output(fill_and_key){}\r
};\r
\r
class decklink_frame_adapter : public IDeckLinkVideoFrame\r
STDMETHOD(GetAncillaryData(IDeckLinkVideoFrameAncillary** ancillary)) {return S_FALSE;}\r
};\r
\r
+std::shared_ptr<IDeckLinkVideoFrame> make_alpha_only_frame(const CComQIPtr<IDeckLinkOutput>& decklink, const safe_ptr<const core::read_frame>& frame, const core::video_format_desc& format_desc)\r
+{\r
+ IDeckLinkMutableVideoFrame* result;\r
+\r
+ if(FAILED(decklink->CreateVideoFrame(format_desc.width, format_desc.height, format_desc.size/format_desc.height, bmdFormat8BitBGRA, bmdFrameFlagDefault, &result)))\r
+ BOOST_THROW_EXCEPTION(caspar_exception());\r
+\r
+ void* bytes = nullptr;\r
+ if(FAILED(result->GetBytes(&bytes)))\r
+ BOOST_THROW_EXCEPTION(caspar_exception());\r
+ \r
+ unsigned char* data = reinterpret_cast<unsigned char*>(bytes);\r
+\r
+ if(static_cast<size_t>(frame->image_data().size()) == format_desc.size)\r
+ {\r
+ tbb::parallel_for(tbb::blocked_range<int>(0, frame->image_data().size()/4), [&](const tbb::blocked_range<int>& r)\r
+ {\r
+ for(int n = r.begin(); n != r.end(); ++n)\r
+ {\r
+ data[n*4+0] = frame->image_data()[n*4+3];\r
+ data[n*4+1] = frame->image_data()[n*4+3];\r
+ data[n*4+2] = frame->image_data()[n*4+3];\r
+ data[n*4+3] = 255;\r
+ }\r
+ });\r
+ }\r
+ else\r
+ memset(data, 0, format_desc.size);\r
+\r
+ return std::shared_ptr<IDeckLinkVideoFrame>(result, [](IDeckLinkMutableVideoFrame* p) {p->Release();});\r
+}\r
+\r
+std::shared_ptr<IDeckLinkVideoFrame> make_fill_only_frame(const CComQIPtr<IDeckLinkOutput>& decklink, const safe_ptr<const core::read_frame>& frame, const core::video_format_desc& format_desc)\r
+{\r
+ IDeckLinkMutableVideoFrame* result;\r
+\r
+ if(FAILED(decklink->CreateVideoFrame(format_desc.width, format_desc.height, format_desc.size/format_desc.height, bmdFormat8BitBGRA, bmdFrameFlagDefault, &result)))\r
+ BOOST_THROW_EXCEPTION(caspar_exception());\r
+\r
+ void* bytes = nullptr;\r
+ if(FAILED(result->GetBytes(&bytes)))\r
+ BOOST_THROW_EXCEPTION(caspar_exception());\r
+ \r
+ unsigned char* data = reinterpret_cast<unsigned char*>(bytes);\r
+\r
+ if(static_cast<size_t>(frame->image_data().size()) == format_desc.size)\r
+ {\r
+ tbb::parallel_for(tbb::blocked_range<int>(0, frame->image_data().size()/4), [&](const tbb::blocked_range<int>& r)\r
+ {\r
+ for(int n = r.begin(); n != r.end(); ++n)\r
+ {\r
+ data[n*4+0] = frame->image_data()[n*4+0];\r
+ data[n*4+1] = frame->image_data()[n*4+1];\r
+ data[n*4+2] = frame->image_data()[n*4+2];\r
+ data[n*4+3] = 255;\r
+ }\r
+ });\r
+ }\r
+ else\r
+ memset(data, 0, format_desc.size);\r
+\r
+ return std::shared_ptr<IDeckLinkVideoFrame>(result, [](IDeckLinkMutableVideoFrame* p) {p->Release();});\r
+}\r
+\r
struct decklink_consumer : public IDeckLinkVideoOutputCallback, public IDeckLinkAudioOutputCallback, boost::noncopyable\r
{ \r
const configuration config_;\r
unsigned long frames_scheduled_;\r
unsigned long audio_scheduled_;\r
\r
- std::list<decklink_frame_adapter> frame_container_; // Must be std::list in order to guarantee that pointers are always valid.\r
+ std::list<std::shared_ptr<IDeckLinkVideoFrame>> frame_container_; // Must be std::list in order to guarantee that pointers are always valid.\r
boost::circular_buffer<std::vector<short>> audio_container_;\r
\r
tbb::concurrent_bounded_queue<std::shared_ptr<const core::read_frame>> video_frame_buffer_;\r
else if(result == bmdOutputFrameFlushed)\r
graph_->add_tag("flushed-frame");\r
\r
- frame_container_.erase(std::find_if(frame_container_.begin(), frame_container_.end(), [&](const decklink_frame_adapter& frame)\r
+ frame_container_.erase(std::find_if(frame_container_.begin(), frame_container_.end(), [&](const std::shared_ptr<IDeckLinkVideoFrame> frame)\r
{\r
- return &frame == completed_frame;\r
+ return frame.get() == completed_frame;\r
}));\r
\r
std::shared_ptr<const core::read_frame> frame; \r
\r
void schedule_next_video(const safe_ptr<const core::read_frame>& frame)\r
{\r
- frame_container_.push_back(decklink_frame_adapter(frame, format_desc_));\r
- if(FAILED(output_->ScheduleVideoFrame(&frame_container_.back(), (frames_scheduled_++) * format_desc_.duration, format_desc_.duration, format_desc_.time_scale)))\r
+ std::shared_ptr<IDeckLinkVideoFrame> deck_frame;\r
+ if(config_.output == key_only)\r
+ deck_frame = make_alpha_only_frame(output_, frame, format_desc_);\r
+ else if(config_.output == fill_only)\r
+ deck_frame = make_fill_only_frame(output_, frame, format_desc_);\r
+ else \r
+ deck_frame = std::make_shared<decklink_frame_adapter>(frame, format_desc_);\r
+\r
+ frame_container_.push_back(deck_frame);\r
+ if(FAILED(output_->ScheduleVideoFrame(frame_container_.back().get(), (frames_scheduled_++) * format_desc_.duration, format_desc_.duration, format_desc_.time_scale)))\r
CASPAR_LOG(error) << print() << L" Failed to schedule video.";\r
\r
graph_->update_value("tick-time", tick_timer_.elapsed()*format_desc_.fps*0.5);\r
else if(latency_str == "low")\r
config.latency = low_latency;\r
\r
+ auto output_str = ptree.get("output", "fill_and_key");\r
+ if(output_str == "fill_only")\r
+ config.output = fill_only;\r
+ else if(output_str == "key_only")\r
+ config.output = key_only;\r
+\r
config.device_index = ptree.get("device", 0);\r
config.embedded_audio = ptree.get("embedded-audio", false);\r
\r