\r
explicit executor(const std::wstring& name) : name_(narrow(name)) // noexcept\r
{\r
- thread_ = boost::thread([this]{run();});\r
is_running_ = true;\r
+ thread_ = boost::thread([this]{run();});\r
}\r
\r
virtual ~executor() // noexcept\r
{\r
if(p == high_priority_class)\r
SetThreadPriority(GetCurrentThread(), HIGH_PRIORITY_CLASS);\r
- if(p == above_normal_priority_class)\r
+ else if(p == above_normal_priority_class)\r
SetThreadPriority(GetCurrentThread(), ABOVE_NORMAL_PRIORITY_CLASS);\r
else if(p == normal_priority_class)\r
SetThreadPriority(GetCurrentThread(), NORMAL_PRIORITY_CLASS);\r
: p_(other.p_)\r
{\r
}\r
-\r
- template<typename U> \r
- safe_ptr(U&& v, typename std::enable_if<std::is_convertible<U*, T*>::value, void>::type* = 0)\r
- : p_(std::make_shared<U>(std::forward<U>(v))) \r
- {\r
- }\r
-\r
- template<typename U, typename D> \r
- safe_ptr(U&& v, D d, typename std::enable_if<std::is_convertible<U*, T*>::value, void>::type* = 0)\r
- : p_(new U(std::forward<U>(v)), d) \r
- {\r
- }\r
-\r
+ \r
template<typename U> \r
explicit safe_ptr(const std::shared_ptr<U>& p, typename std::enable_if<std::is_convertible<U*, T*>::value, void*>::type = 0) \r
: p_(p)\r
safe_ptr<T>(std::move(other)).swap(*this);\r
return *this;\r
}\r
-\r
- template <typename U>\r
- typename std::enable_if<std::is_convertible<U*, T*>::value, safe_ptr&>::type\r
- operator=(U&& v)\r
- {\r
- safe_ptr(std::forward<U>(v)).swap(*this);\r
- return *this;\r
- }\r
-\r
+ \r
T& operator*() const \r
{ \r
return *p_.get();\r
, image_mixer_(channel_.ogl(), channel_.get_format_desc())\r
, buffer_size_(env::properties().get("configuration.producers.buffer-depth", 1))\r
{ \r
- CASPAR_LOG(info) << print() << L" Successfully initialized . Buffer-depth: " << buffer_size_; \r
+ CASPAR_LOG(info) << print() << L" Successfully initialized."; \r
}\r
\r
safe_ptr<read_frame> execute(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
write_frame::write_frame(ogl_device& ogl, const void* tag, const core::pixel_format_desc& desc) \r
: impl_(new implementation(ogl, tag, desc)){}\r
write_frame::write_frame(const write_frame& other) : impl_(new implementation(*other.impl_)){}\r
-write_frame::write_frame(write_frame&& other) : impl_(std::move(*other.impl_)){}\r
+write_frame::write_frame(write_frame&& other) : impl_(std::move(other.impl_)){}\r
write_frame& write_frame::operator=(const write_frame& other)\r
{\r
basic_frame temp(other);\r
std::vector<safe_ptr<basic_frame>> frames;\r
frames.push_back(my_frame1);\r
frames.push_back(my_frame2);\r
- return basic_frame(std::move(frames));\r
+ return make_safe<basic_frame>(std::move(frames));\r
}\r
\r
safe_ptr<basic_frame> basic_frame::combine(const safe_ptr<basic_frame>& frame1, const safe_ptr<basic_frame>& frame2)\r
std::vector<safe_ptr<basic_frame>> frames;\r
frames.push_back(frame1);\r
frames.push_back(frame2);\r
- return basic_frame(std::move(frames));\r
+ return make_safe<basic_frame>(std::move(frames));\r
}\r
\r
safe_ptr<basic_frame> basic_frame::fill_and_key(const safe_ptr<basic_frame>& fill, const safe_ptr<basic_frame>& key)\r
key->get_frame_transform().is_key = true;\r
frames.push_back(key);\r
frames.push_back(fill);\r
- return basic_frame(std::move(frames));\r
+ return make_safe<basic_frame>(std::move(frames));\r
}\r
\r
safe_ptr<basic_frame> disable_audio(const safe_ptr<basic_frame>& frame)\r
{\r
basic_frame frame2 = frame;\r
frame2.get_frame_transform().volume = 0.0;\r
- return std::move(frame2);\r
+ return make_safe<basic_frame>(std::move(frame2));\r
}\r
\r
}}
\ No newline at end of file
\r
class basic_frame\r
{\r
- basic_frame(std::vector<safe_ptr<basic_frame>>&& frames);\r
public:\r
basic_frame(); \r
basic_frame(const safe_ptr<basic_frame>& frame);\r
basic_frame(safe_ptr<basic_frame>&& frame);\r
basic_frame(const std::vector<safe_ptr<basic_frame>>& frames);\r
+ basic_frame(std::vector<safe_ptr<basic_frame>>&& frames);\r
basic_frame(const basic_frame& other);\r
basic_frame(basic_frame&& other);\r
\r
std::vector<const producer_factory_t> g_factories;\r
\r
class destroy_producer_proxy : public frame_producer\r
-{\r
- safe_ptr<frame_producer> producer_;\r
- executor& destroy_context_;\r
+{ \r
+ std::shared_ptr<frame_producer>* producer_;\r
public:\r
- destroy_producer_proxy(executor& destroy_context, const safe_ptr<frame_producer>& producer) \r
- : producer_(producer)\r
- , destroy_context_(destroy_context){}\r
+ destroy_producer_proxy(safe_ptr<frame_producer>&& producer) \r
+ : producer_(new std::shared_ptr<frame_producer>(std::move(producer)))\r
+ {\r
+ }\r
\r
~destroy_producer_proxy()\r
{ \r
- if(destroy_context_.size() > 4)\r
- CASPAR_LOG(error) << L" Potential destroyer deadlock.";\r
-\r
- // Hacks to bypass compiler bugs.\r
- auto mov_producer = make_move_on_copy<safe_ptr<frame_producer>>(std::move(producer_));\r
- auto empty_producer = frame_producer::empty();\r
- destroy_context_.begin_invoke([=]\r
- { \r
- //if(!mov_producer.value.unique())\r
- // CASPAR_LOG(debug) << mov_producer.value->print() << L" Not destroyed on safe asynchronous destruction thread.";\r
- //else\r
- // CASPAR_LOG(debug) << mov_producer.value->print() << L" Destroying on safe asynchronous destruction thread.";\r
- \r
- mov_producer.value = empty_producer;\r
- });\r
+ static auto destroyers = std::make_shared<tbb::concurrent_bounded_queue<std::shared_ptr<executor>>>();\r
+ static tbb::atomic<int> destroyer_count;\r
+\r
+ try\r
+ {\r
+ std::shared_ptr<executor> destroyer;\r
+ if(!destroyers->try_pop(destroyer))\r
+ {\r
+ destroyer.reset(new executor(L"destroyer"));\r
+ destroyer->set_priority_class(below_normal_priority_class);\r
+ if(++destroyer_count > 16)\r
+ CASPAR_LOG(warning) << L"Potential destroyer dead-lock detected.";\r
+ CASPAR_LOG(trace) << "Created destroyer: " << destroyer_count;\r
+ }\r
+ \r
+ auto producer = producer_;\r
+ auto pool = destroyers;\r
+ destroyer->begin_invoke([=]\r
+ {\r
+ try\r
+ {\r
+ if(!producer->unique())\r
+ CASPAR_LOG(trace) << (*producer)->print() << L" Not destroyed on safe asynchronous destruction thread: " << producer->use_count();\r
+ else\r
+ CASPAR_LOG(trace) << (*producer)->print() << L" Destroying on safe asynchronous destruction thread.";\r
+ }\r
+ catch(...){}\r
+\r
+ delete producer;\r
+ pool->push(destroyer);\r
+ }); \r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
+ try\r
+ {\r
+ delete producer_;\r
+ }\r
+ catch(...){}\r
+ }\r
}\r
\r
- virtual safe_ptr<basic_frame> receive(int hints) {return producer_->receive(hints);}\r
- virtual safe_ptr<basic_frame> last_frame() const {return producer_->last_frame();}\r
- virtual std::wstring print() const {return producer_->print();}\r
- virtual void param(const std::wstring& str) {producer_->param(str);}\r
- virtual safe_ptr<frame_producer> get_following_producer() const {return producer_->get_following_producer();}\r
- virtual void set_leading_producer(const safe_ptr<frame_producer>& producer) {producer_->set_leading_producer(producer);}\r
- virtual int64_t nb_frames() const {return producer_->nb_frames();}\r
+ virtual safe_ptr<basic_frame> receive(int hints) {return (*producer_)->receive(hints);}\r
+ virtual safe_ptr<basic_frame> last_frame() const {return (*producer_)->last_frame();}\r
+ virtual std::wstring print() const {return (*producer_)->print();}\r
+ virtual void param(const std::wstring& str) {(*producer_)->param(str);}\r
+ virtual safe_ptr<frame_producer> get_following_producer() const {return (*producer_)->get_following_producer();}\r
+ virtual void set_leading_producer(const safe_ptr<frame_producer>& producer) {(*producer_)->set_leading_producer(producer);}\r
+ virtual int64_t nb_frames() const {return (*producer_)->nb_frames();}\r
};\r
\r
-safe_ptr<core::frame_producer> create_destroy_producer_proxy(executor& destroy_context, const safe_ptr<frame_producer>& producer)\r
+safe_ptr<core::frame_producer> create_destroy_proxy(safe_ptr<core::frame_producer>&& producer)\r
{\r
- return make_safe<destroy_producer_proxy>(destroy_context, producer);\r
+ return make_safe<destroy_producer_proxy>(std::move(producer));\r
}\r
\r
class last_frame_producer : public frame_producer\r
catch(...){}\r
\r
if(producer != frame_producer::empty() && key_producer != frame_producer::empty())\r
- producer = create_separated_producer(producer, key_producer);\r
+ return create_separated_producer(producer, key_producer);\r
\r
if(producer == frame_producer::empty())\r
{\r
typedef std::function<safe_ptr<core::frame_producer>(const safe_ptr<frame_factory>&, const std::vector<std::wstring>&)> producer_factory_t;\r
void register_producer_factory(const producer_factory_t& factory); // Not thread-safe.\r
safe_ptr<core::frame_producer> create_producer(const safe_ptr<frame_factory>&, const std::vector<std::wstring>& params);\r
-\r
-safe_ptr<core::frame_producer> create_destroy_producer_proxy(executor& destroy_context, const safe_ptr<frame_producer>& producer);\r
+safe_ptr<core::frame_producer> create_destroy_proxy(safe_ptr<core::frame_producer>&& producer);\r
\r
template<typename T>\r
typename std::decay<T>::type get_param(const std::wstring& name, const std::vector<std::wstring>& params, T fail_value)\r
{\r
channel_.execution().invoke([&]\r
{\r
- layers_[index].load(create_destroy_producer_proxy(channel_.destruction(), producer), preview, auto_play_delta);\r
+ layers_[index].load(producer, preview, auto_play_delta);\r
}, high_priority);\r
}\r
\r
// Stop context before destroying devices.\r
context_.execution().stop();\r
context_.execution().join();\r
- context_.destruction().stop();\r
- context_.destruction().join();\r
}\r
\r
void tick()\r
const int index_;\r
video_format_desc format_desc_;\r
executor execution_;\r
- executor destruction_;\r
ogl_device& ogl_;\r
\r
implementation(int index, ogl_device& ogl, const video_format_desc& format_desc)\r
: index_(index)\r
, format_desc_(format_desc)\r
, execution_(print() + L"/execution")\r
- , destruction_(print() + L"/destruction")\r
, ogl_(ogl)\r
{\r
execution_.set_priority_class(above_normal_priority_class);\r
- destruction_.set_priority_class(below_normal_priority_class);\r
}\r
\r
std::wstring print() const\r
}\r
\r
executor& video_channel_context::execution() {return impl_->execution_;}\r
-executor& video_channel_context::destruction() {return impl_->destruction_;}\r
ogl_device& video_channel_context::ogl() { return impl_->ogl_;}\r
\r
std::wstring video_channel_context::print() const\r
video_format_desc get_format_desc();\r
void set_format_desc(const video_format_desc& format_desc);\r
executor& execution();\r
- executor& destruction();\r
ogl_device& ogl();\r
std::wstring print() const;\r
private:\r
return create_consumer(params);\r
});\r
}\r
- catch(...)\r
- {\r
- //CASPAR_LOG_CURRENT_EXCEPTION();\r
- CASPAR_LOG(info) << L"Bluefish not supported.";\r
- }\r
+ catch(...){}\r
}\r
\r
std::wstring get_version()\r
\r
void init()\r
{\r
+ struct co_init\r
+ {\r
+ co_init(){::CoInitialize(nullptr);}\r
+ ~co_init(){::CoUninitialize();}\r
+ } init;\r
+ \r
+ CComPtr<IDeckLinkIterator> pDecklinkIterator;\r
+ if(FAILED(pDecklinkIterator.CoCreateInstance(CLSID_CDeckLinkIterator))) \r
+ return;\r
+ \r
core::register_consumer_factory([](const std::vector<std::wstring>& params){return create_consumer(params);});\r
core::register_producer_factory(create_producer);\r
}\r
std::wstring get_version() \r
{\r
std::wstring version = L"Not found";\r
+ \r
+ struct co_init\r
+ {\r
+ co_init(){::CoInitialize(nullptr);}\r
+ ~co_init(){::CoUninitialize();}\r
+ } init;\r
\r
- ::CoInitialize(nullptr);\r
try\r
{\r
CComPtr<IDeckLinkIterator> pDecklinkIterator;\r
version = get_version(pDecklinkIterator);\r
}\r
catch(...){}\r
- ::CoUninitialize();\r
\r
return version;\r
}\r
std::vector<std::wstring> get_device_list()\r
{\r
std::vector<std::wstring> devices;\r
+ \r
+ struct co_init\r
+ {\r
+ co_init(){::CoInitialize(nullptr);}\r
+ ~co_init(){::CoUninitialize();}\r
+ } init;\r
\r
- ::CoInitialize(nullptr);\r
try\r
{\r
CComPtr<IDeckLinkIterator> pDecklinkIterator;\r
}\r
}\r
catch(...){}\r
- ::CoUninitialize();\r
\r
return devices;\r
}\r
\r
\r
/* File created by MIDL compiler version 7.00.0555 */\r
-/* at Thu Nov 10 16:28:32 2011\r
+/* at Fri Nov 11 23:52:26 2011\r
*/\r
/* Compiler settings for interop\DeckLinkAPI.idl:\r
Oicf, W1, Zp8, env=Win32 (32b run), target_arch=X86 7.00.0555 \r
\r
\r
/* File created by MIDL compiler version 7.00.0555 */\r
-/* at Thu Nov 10 16:28:32 2011\r
+/* at Fri Nov 11 23:52:26 2011\r
*/\r
/* Compiler settings for interop\DeckLinkAPI.idl:\r
Oicf, W1, Zp8, env=Win32 (32b run), target_arch=X86 7.00.0555 \r
#include "../util/util.h"\r
\r
#include "../../ffmpeg/producer/filter/filter.h"\r
-#include "../../ffmpeg/producer/util.h"\r
-#include "../../ffmpeg/producer/frame_muxer.h"\r
+#include "../../ffmpeg/producer/util/util.h"\r
+#include "../../ffmpeg/producer/muxer/frame_muxer.h"\r
\r
#include <common/log/log.h>\r
#include <common/diagnostics/graph.h>\r
tbb::concurrent_bounded_queue<safe_ptr<core::basic_frame>> frame_buffer_;\r
\r
std::exception_ptr exception_;\r
- ffmpeg::filter filter_;\r
\r
ffmpeg::frame_muxer muxer_;\r
\r
, format_desc_(format_desc)\r
, device_index_(device_index)\r
, frame_factory_(frame_factory)\r
- , filter_(filter)\r
- , muxer_(ffmpeg::double_rate(filter) ? format_desc.fps * 2.0 : format_desc.fps, frame_factory)\r
+ , muxer_(format_desc.fps, frame_factory, filter)\r
{\r
frame_buffer_.set_capacity(2);\r
\r
av_frame->interlaced_frame = format_desc_.field_mode != core::field_mode::progressive;\r
av_frame->top_field_first = format_desc_.field_mode == core::field_mode::upper ? 1 : 0;\r
\r
- BOOST_FOREACH(auto& av_frame2, filter_.execute(av_frame))\r
- muxer_.push(av_frame2); \r
+ muxer_.push(av_frame); \r
\r
// It is assumed that audio is always equal or ahead of video.\r
if(audio && SUCCEEDED(audio->GetBytes(&bytes)))\r
else\r
muxer_.push(std::make_shared<core::audio_buffer>(frame_factory_->get_video_format_desc().audio_samples_per_frame, 0));\r
\r
- muxer_.commit();\r
-\r
- while(!muxer_.empty())\r
+ for(auto frame = muxer_.poll(); frame; frame = muxer_.poll())\r
{\r
- if(!frame_buffer_.try_push(muxer_.pop()))\r
+ if(!frame_buffer_.try_push(make_safe_ptr(frame)))\r
graph_->add_tag("dropped-frame");\r
}\r
\r
if(format_desc.format == core::video_format::invalid)\r
format_desc = frame_factory->get_video_format_desc();\r
\r
- return make_safe<decklink_producer_proxy>(frame_factory, format_desc, device_index, filter_str, length);\r
+ return create_destroy_proxy(make_safe<decklink_producer_proxy>(frame_factory, format_desc, device_index, filter_str, length));\r
}\r
\r
}}
\ No newline at end of file
<PrecompiledHeader>Use</PrecompiledHeader>\r
<BrowseInformation>true</BrowseInformation>\r
<WarningLevel>Level4</WarningLevel>\r
- <DebugInformationFormat>EditAndContinue</DebugInformationFormat>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
<PreprocessorDefinitions>TBB_USE_DEBUG;TBB_USE_CAPTURED_EXCEPTION=0;TBB_USE_ASSERT=1;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<TreatWarningAsError>true</TreatWarningAsError>\r
<MultiProcessorCompilation>true</MultiProcessorCompilation>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
</ClCompile>\r
- <ClCompile Include="producer\format\flv.cpp">\r
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">Use</PrecompiledHeader>\r
+ <ClCompile Include="producer\input\input.cpp">\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Use</PrecompiledHeader>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">Use</PrecompiledHeader>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Use</PrecompiledHeader>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
</ClCompile>\r
- <ClCompile Include="producer\frame_muxer.cpp">\r
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
+ <ClCompile Include="producer\muxer\frame_muxer.cpp">\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
</ClCompile>\r
- <ClCompile Include="producer\input.cpp">\r
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
+ <ClCompile Include="producer\util\flv.cpp">\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
</ClCompile>\r
- <ClCompile Include="producer\util.cpp">\r
+ <ClCompile Include="producer\util\util.cpp">\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">Create</PrecompiledHeader>\r
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>\r
</ClCompile>\r
- <ClCompile Include="tbb_avcodec.cpp" />\r
</ItemGroup>\r
<ItemGroup>\r
<ClInclude Include="consumer\ffmpeg_consumer.h" />\r
<ClInclude Include="producer\ffmpeg_producer.h" />\r
<ClInclude Include="producer\filter\filter.h" />\r
<ClInclude Include="producer\filter\parallel_yadif.h" />\r
- <ClInclude Include="producer\format\flv.h" />\r
- <ClInclude Include="producer\frame_muxer.h" />\r
- <ClInclude Include="producer\input.h" />\r
- <ClInclude Include="producer\util.h" />\r
+ <ClInclude Include="producer\input\input.h" />\r
+ <ClInclude Include="producer\muxer\display_mode.h" />\r
+ <ClInclude Include="producer\muxer\frame_muxer.h" />\r
+ <ClInclude Include="producer\util\flv.h" />\r
+ <ClInclude Include="producer\util\util.h" />\r
<ClInclude Include="producer\video\video_decoder.h" />\r
<ClInclude Include="StdAfx.h" />\r
- <ClInclude Include="tbb_avcodec.h" />\r
</ItemGroup>\r
<ItemGroup>\r
<ProjectReference Include="..\..\common\common.vcxproj">\r
<Filter Include="source\producer\video">\r
<UniqueIdentifier>{4b0f3949-6dc5-4895-837f-4c3ef1759a90}</UniqueIdentifier>\r
</Filter>\r
- <Filter Include="source\producer\format">\r
- <UniqueIdentifier>{613cfb2a-5714-46dc-b5ad-6964dded5b02}</UniqueIdentifier>\r
+ <Filter Include="source\producer\util">\r
+ <UniqueIdentifier>{d6af0416-0c85-45f8-97a3-4d0560b18691}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="source\producer\input">\r
+ <UniqueIdentifier>{28be54fb-eb6d-4c56-a0fa-8286ae1032bf}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="source\producer\muxer">\r
+ <UniqueIdentifier>{26599786-a0d9-4cc3-b5a4-633e9c81563a}</UniqueIdentifier>\r
</Filter>\r
</ItemGroup>\r
<ItemGroup>\r
<ClCompile Include="producer\ffmpeg_producer.cpp">\r
<Filter>source\producer</Filter>\r
</ClCompile>\r
- <ClCompile Include="producer\input.cpp">\r
- <Filter>source\producer</Filter>\r
- </ClCompile>\r
<ClCompile Include="producer\video\video_decoder.cpp">\r
<Filter>source\producer\video</Filter>\r
</ClCompile>\r
<Filter>source\consumer</Filter>\r
</ClCompile>\r
<ClCompile Include="StdAfx.cpp" />\r
- <ClCompile Include="tbb_avcodec.cpp">\r
- <Filter>source</Filter>\r
- </ClCompile>\r
<ClCompile Include="ffmpeg.cpp">\r
<Filter>source</Filter>\r
</ClCompile>\r
<ClCompile Include="producer\filter\filter.cpp">\r
<Filter>source\producer\filter</Filter>\r
</ClCompile>\r
- <ClCompile Include="producer\frame_muxer.cpp">\r
- <Filter>source\producer</Filter>\r
- </ClCompile>\r
<ClCompile Include="producer\filter\parallel_yadif.cpp">\r
<Filter>source\producer\filter</Filter>\r
</ClCompile>\r
- <ClCompile Include="producer\util.cpp">\r
- <Filter>source\producer</Filter>\r
- </ClCompile>\r
- <ClCompile Include="producer\format\flv.cpp">\r
- <Filter>source\producer\format</Filter>\r
- </ClCompile>\r
<ClCompile Include="producer\audio\audio_resampler.cpp">\r
<Filter>source\producer\audio</Filter>\r
</ClCompile>\r
+ <ClCompile Include="producer\util\util.cpp">\r
+ <Filter>source\producer\util</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="producer\util\flv.cpp">\r
+ <Filter>source\producer\util</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="producer\input\input.cpp">\r
+ <Filter>source\producer\input</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="producer\muxer\frame_muxer.cpp">\r
+ <Filter>source\producer\muxer</Filter>\r
+ </ClCompile>\r
</ItemGroup>\r
<ItemGroup>\r
<ClInclude Include="producer\ffmpeg_producer.h">\r
<Filter>source\producer</Filter>\r
</ClInclude>\r
- <ClInclude Include="producer\input.h">\r
- <Filter>source\producer</Filter>\r
- </ClInclude>\r
<ClInclude Include="producer\video\video_decoder.h">\r
<Filter>source\producer\video</Filter>\r
</ClInclude>\r
<Filter>source\consumer</Filter>\r
</ClInclude>\r
<ClInclude Include="StdAfx.h" />\r
- <ClInclude Include="tbb_avcodec.h">\r
- <Filter>source</Filter>\r
- </ClInclude>\r
<ClInclude Include="ffmpeg_error.h">\r
<Filter>source</Filter>\r
</ClInclude>\r
<ClInclude Include="producer\filter\filter.h">\r
<Filter>source\producer\filter</Filter>\r
</ClInclude>\r
- <ClInclude Include="producer\util.h">\r
- <Filter>source\producer</Filter>\r
- </ClInclude>\r
- <ClInclude Include="producer\frame_muxer.h">\r
- <Filter>source\producer</Filter>\r
- </ClInclude>\r
<ClInclude Include="producer\filter\parallel_yadif.h">\r
<Filter>source\producer\filter</Filter>\r
</ClInclude>\r
- <ClInclude Include="producer\format\flv.h">\r
- <Filter>source\producer\format</Filter>\r
- </ClInclude>\r
<ClInclude Include="producer\audio\audio_resampler.h">\r
<Filter>source\producer\audio</Filter>\r
</ClInclude>\r
+ <ClInclude Include="producer\util\flv.h">\r
+ <Filter>source\producer\util</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="producer\util\util.h">\r
+ <Filter>source\producer\util</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="producer\input\input.h">\r
+ <Filter>source\producer\input</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="producer\muxer\frame_muxer.h">\r
+ <Filter>source\producer\muxer</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="producer\muxer\display_mode.h">\r
+ <Filter>source\producer\muxer</Filter>\r
+ </ClInclude>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
\r
#include "audio_resampler.h"\r
\r
+#include "../util/util.h"\r
#include "../../ffmpeg_error.h"\r
\r
#include <core/video_format.h>\r
\r
struct audio_decoder::implementation : boost::noncopyable\r
{ \r
- std::shared_ptr<AVCodecContext> codec_context_; \r
- const core::video_format_desc format_desc_;\r
int index_;\r
- std::unique_ptr<audio_resampler> resampler_;\r
+ const safe_ptr<AVCodecContext> codec_context_; \r
+ const core::video_format_desc format_desc_;\r
+\r
+ audio_resampler resampler_;\r
\r
std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> buffer1_;\r
\r
- std::queue<std::shared_ptr<AVPacket>> packets_;\r
+ std::queue<safe_ptr<AVPacket>> packets_;\r
\r
- int64_t nb_frames_;\r
+ const int64_t nb_frames_;\r
public:\r
explicit implementation(const safe_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc) \r
: format_desc_(format_desc) \r
- , nb_frames_(0)\r
- { \r
- try\r
- {\r
- AVCodec* dec;\r
- index_ = THROW_ON_ERROR2(av_find_best_stream(context.get(), AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0), "[audio_decoder]");\r
-\r
- THROW_ON_ERROR2(avcodec_open(context->streams[index_]->codec, dec), "[audio_decoder]");\r
- \r
- codec_context_.reset(context->streams[index_]->codec, avcodec_close);\r
-\r
- buffer1_.resize(AVCODEC_MAX_AUDIO_FRAME_SIZE*2);\r
-\r
- resampler_.reset(new audio_resampler(format_desc_.audio_channels, codec_context_->channels,\r
- format_desc_.audio_sample_rate, codec_context_->sample_rate,\r
- AV_SAMPLE_FMT_S32, codec_context_->sample_fmt)); \r
- }\r
- catch(...)\r
- {\r
- index_ = THROW_ON_ERROR2(av_find_best_stream(context.get(), AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0), "[audio_decoder]");\r
-\r
- CASPAR_LOG_CURRENT_EXCEPTION();\r
- CASPAR_LOG(warning) << "[audio_decoder] Failed to open audio-stream. Running without audio."; \r
- }\r
+ , codec_context_(open_codec(*context, AVMEDIA_TYPE_AUDIO, index_))\r
+ , resampler_(format_desc.audio_channels, codec_context_->channels,\r
+ format_desc.audio_sample_rate, codec_context_->sample_rate,\r
+ AV_SAMPLE_FMT_S32, codec_context_->sample_fmt)\r
+ , buffer1_(AVCODEC_MAX_AUDIO_FRAME_SIZE*2)\r
+ , nb_frames_(context->streams[index_]->nb_frames)\r
+ { \r
+ CASPAR_LOG(debug) << "[audio_decoder] " << context->streams[index_]->codec->codec->long_name; \r
}\r
\r
void push(const std::shared_ptr<AVPacket>& packet)\r
{ \r
- if(packet && packet->stream_index != index_)\r
+ if(!packet)\r
return;\r
\r
- packets_.push(packet);\r
+ if(packet->stream_index == index_ || packet == flush_packet())\r
+ packets_.push(make_safe_ptr(packet));\r
} \r
\r
- std::vector<std::shared_ptr<core::audio_buffer>> poll()\r
+ std::shared_ptr<core::audio_buffer> poll()\r
{\r
- std::vector<std::shared_ptr<core::audio_buffer>> result;\r
-\r
if(packets_.empty())\r
- return result;\r
-\r
- if(!codec_context_)\r
- return empty_poll();\r
- \r
+ return nullptr;\r
+ \r
auto packet = packets_.front();\r
\r
- if(packet) \r
+ if(packet == flush_packet())\r
{\r
- result.push_back(decode(*packet));\r
- if(packet->size == 0) \r
- packets_.pop();\r
- }\r
- else \r
- { \r
- avcodec_flush_buffers(codec_context_.get());\r
- result.push_back(nullptr);\r
packets_.pop();\r
- } \r
+ avcodec_flush_buffers(codec_context_.get());\r
+ return flush_audio();\r
+ }\r
\r
- return result;\r
- }\r
+ auto audio = decode(*packet);\r
\r
- std::vector<std::shared_ptr<core::audio_buffer>> empty_poll()\r
- {\r
- auto packet = packets_.front();\r
- packets_.pop();\r
+ if(packet->size == 0) \r
+ packets_.pop();\r
\r
- if(!packet) \r
- return boost::assign::list_of(nullptr);\r
- \r
- return boost::assign::list_of(std::make_shared<core::audio_buffer>(format_desc_.audio_samples_per_frame, 0)); \r
+ return audio;\r
}\r
\r
std::shared_ptr<core::audio_buffer> decode(AVPacket& pkt)\r
\r
buffer1_.resize(written_bytes);\r
\r
- buffer1_ = resampler_->resample(std::move(buffer1_));\r
+ buffer1_ = resampler_.resample(std::move(buffer1_));\r
\r
const auto n_samples = buffer1_.size() / av_get_bytes_per_sample(AV_SAMPLE_FMT_S32);\r
const auto samples = reinterpret_cast<int32_t*>(buffer1_.data());\r
\r
bool ready() const\r
{\r
- return !packets_.empty();\r
+ return packets_.size() > 10;\r
}\r
};\r
\r
audio_decoder::audio_decoder(const safe_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc) : impl_(new implementation(context, format_desc)){}\r
void audio_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}\r
bool audio_decoder::ready() const{return impl_->ready();}\r
-std::vector<std::shared_ptr<core::audio_buffer>> audio_decoder::poll(){return impl_->poll();}\r
+std::shared_ptr<core::audio_buffer> audio_decoder::poll(){return impl_->poll();}\r
int64_t audio_decoder::nb_frames() const{return impl_->nb_frames_;}\r
\r
}}
\ No newline at end of file
\r
#include <boost/noncopyable.hpp>\r
\r
-#include <vector>\r
-\r
struct AVPacket;\r
struct AVFormatContext;\r
\r
-namespace caspar {\r
-\r
+namespace caspar { \r
+ \r
namespace core {\r
\r
struct video_format_desc;\r
}\r
\r
namespace ffmpeg {\r
-\r
+ \r
class audio_decoder : boost::noncopyable\r
{\r
public:\r
explicit audio_decoder(const safe_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc);\r
\r
- void push(const std::shared_ptr<AVPacket>& packet);\r
bool ready() const;\r
- std::vector<std::shared_ptr<core::audio_buffer>> poll();\r
+ void push(const std::shared_ptr<AVPacket>& packet);\r
+ std::shared_ptr<core::audio_buffer> poll();\r
\r
int64_t nb_frames() const;\r
\r
\r
#include "ffmpeg_producer.h"\r
\r
-#include "frame_muxer.h"\r
-#include "input.h"\r
-#include "util.h"\r
+#include "../ffmpeg_error.h"\r
+#include "muxer/frame_muxer.h"\r
+#include "input/input.h"\r
+#include "util/util.h"\r
#include "audio/audio_decoder.h"\r
#include "video/video_decoder.h"\r
\r
#include <core/producer/frame_producer.h>\r
#include <core/producer/frame/frame_factory.h>\r
#include <core/producer/frame/basic_frame.h>\r
+#include <core/producer/frame/frame_transform.h>\r
\r
#include <boost/algorithm/string.hpp>\r
#include <boost/assign.hpp>\r
\r
#include <tbb/parallel_invoke.h>\r
\r
+#include <limits>\r
+#include <memory>\r
+#include <queue>\r
+\r
namespace caspar { namespace ffmpeg {\r
\r
struct ffmpeg_producer : public core::frame_producer\r
const core::video_format_desc format_desc_;\r
\r
input input_; \r
- video_decoder video_decoder_;\r
- audio_decoder audio_decoder_; \r
- double fps_;\r
- frame_muxer muxer_;\r
+ std::unique_ptr<video_decoder> video_decoder_;\r
+ std::unique_ptr<audio_decoder> audio_decoder_; \r
+ std::unique_ptr<frame_muxer> muxer_;\r
\r
const int start_;\r
const bool loop_;\r
const size_t length_;\r
\r
safe_ptr<core::basic_frame> last_frame_;\r
-\r
- const size_t width_;\r
- const size_t height_;\r
- bool is_progressive_;\r
+ \r
+ std::queue<safe_ptr<core::basic_frame>> frame_buffer_;\r
\r
public:\r
explicit ffmpeg_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filename, const std::wstring& filter, bool loop, int start, size_t length) \r
, frame_factory_(frame_factory) \r
, format_desc_(frame_factory->get_video_format_desc())\r
, input_(graph_, filename_, loop, start, length)\r
- , video_decoder_(input_.context(), frame_factory, filter)\r
- , audio_decoder_(input_.context(), frame_factory->get_video_format_desc())\r
- , fps_(video_decoder_.fps())\r
- , muxer_(fps_, frame_factory)\r
, start_(start)\r
, loop_(loop)\r
, length_(length)\r
, last_frame_(core::basic_frame::empty())\r
- , width_(video_decoder_.width())\r
- , height_(video_decoder_.height())\r
- , is_progressive_(true)\r
{\r
graph_->add_guide("frame-time", 0.5);\r
graph_->set_color("frame-time", diagnostics::color(0.1f, 1.0f, 0.1f));\r
graph_->set_color("underflow", diagnostics::color(0.6f, 0.3f, 0.9f)); \r
diagnostics::register_graph(graph_);\r
\r
- for(int n = 0; n < 3; ++n)\r
- frame_factory->create_frame(this, std::max<size_t>(2, video_decoder_.width()), std::max<size_t>(2, video_decoder_.height()));\r
+ try\r
+ {\r
+ video_decoder_.reset(new video_decoder(input_.context()));\r
+ }\r
+ catch(averror_stream_not_found&)\r
+ {\r
+ CASPAR_LOG(warning) << "No video-stream found. Running without video."; \r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
+ CASPAR_LOG(warning) << "Failed to open video-stream. Running without video."; \r
+ }\r
+\r
+ try\r
+ {\r
+ audio_decoder_.reset(new audio_decoder(input_.context(), frame_factory->get_video_format_desc()));\r
+ }\r
+ catch(averror_stream_not_found&)\r
+ {\r
+ CASPAR_LOG(warning) << "No audio-stream found. Running without audio."; \r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
+ CASPAR_LOG(warning) << "Failed to open audio-stream. Running without audio."; \r
+ } \r
+\r
+ muxer_.reset(new frame_muxer(video_decoder_ ? video_decoder_->fps() : frame_factory->get_video_format_desc().fps, frame_factory, filter));\r
}\r
- \r
+ \r
virtual safe_ptr<core::basic_frame> receive(int hints)\r
- {\r
- auto frame = core::basic_frame::late();\r
- \r
+ { \r
frame_timer_.restart();\r
\r
- for(int n = 0; n < 64 && muxer_.empty(); ++n)\r
- decode_frame(hints);\r
+ for(int n = 0; n < 32 && frame_buffer_.size() < 2; ++n)\r
+ try_decode_frame(hints);\r
\r
- graph_->update_value("frame-time", static_cast<float>(frame_timer_.elapsed()*format_desc_.fps*0.5));\r
+ graph_->update_value("frame-time", frame_timer_.elapsed()*format_desc_.fps*0.5);\r
+ \r
+ if(frame_buffer_.empty() && input_.eof())\r
+ return core::basic_frame::eof();\r
\r
- if(!muxer_.empty())\r
- frame = last_frame_ = muxer_.pop(); \r
- else\r
+ if(frame_buffer_.empty())\r
{\r
- if(input_.eof())\r
- return core::basic_frame::eof();\r
- else \r
- graph_->add_tag("underflow"); \r
+ graph_->add_tag("underflow"); \r
+ return core::basic_frame::late(); \r
}\r
+ \r
+ last_frame_ = frame_buffer_.front(); \r
+ frame_buffer_.pop();\r
\r
graph_->set_text(print());\r
- \r
- return frame;\r
+\r
+ return last_frame_;\r
}\r
\r
virtual safe_ptr<core::basic_frame> last_frame() const\r
{\r
return disable_audio(last_frame_);\r
}\r
-\r
- void push_packets()\r
+ \r
+ void try_decode_frame(int hints)\r
{\r
- for(int n = 0; n < 16 && ((!muxer_.video_ready() && !video_decoder_.ready()) || (!muxer_.audio_ready() && !audio_decoder_.ready())); ++n) \r
+ std::shared_ptr<AVPacket> pkt;\r
+\r
+ for(int n = 0; n < 32 && ((video_decoder_ && !video_decoder_->ready()) || (audio_decoder_ && !audio_decoder_->ready())) && input_.try_pop(pkt); ++n)\r
{\r
- std::shared_ptr<AVPacket> pkt;\r
- if(input_.try_pop(pkt))\r
- {\r
- video_decoder_.push(pkt);\r
- audio_decoder_.push(pkt);\r
- }\r
+ if(video_decoder_)\r
+ video_decoder_->push(pkt);\r
+ if(audio_decoder_)\r
+ audio_decoder_->push(pkt);\r
}\r
- }\r
-\r
- void decode_frame(int hints)\r
- {\r
- push_packets();\r
\r
+ std::shared_ptr<AVFrame> video;\r
+ std::shared_ptr<core::audio_buffer> audio;\r
+\r
tbb::parallel_invoke(\r
[&]\r
{\r
- if(muxer_.video_ready())\r
- return;\r
-\r
- auto video_frames = video_decoder_.poll();\r
- BOOST_FOREACH(auto& video, video_frames) \r
- {\r
- is_progressive_ = video ? video->interlaced_frame == 0 : is_progressive_;\r
- muxer_.push(video, hints); \r
- }\r
+ if(!muxer_->video_ready() && video_decoder_) \r
+ video = video_decoder_->poll(); \r
},\r
[&]\r
- {\r
- if(muxer_.audio_ready())\r
- return;\r
- \r
- auto audio_samples = audio_decoder_.poll();\r
- BOOST_FOREACH(auto& audio, audio_samples)\r
- muxer_.push(audio); \r
+ { \r
+ if(!muxer_->audio_ready() && audio_decoder_) \r
+ audio = audio_decoder_->poll(); \r
});\r
\r
- muxer_.commit();\r
+ muxer_->push(video, hints);\r
+ muxer_->push(audio);\r
+\r
+ if(!audio_decoder_)\r
+ {\r
+ if(video == flush_video())\r
+ muxer_->push(flush_audio());\r
+ else if(!muxer_->audio_ready())\r
+ muxer_->push(empty_audio());\r
+ }\r
+\r
+ if(!video_decoder_)\r
+ {\r
+ if(audio == flush_audio())\r
+ muxer_->push(flush_video(), 0);\r
+ else if(!muxer_->video_ready())\r
+ muxer_->push(empty_video(), 0);\r
+ }\r
+ \r
+ for(auto frame = muxer_->poll(); frame; frame = muxer_->poll())\r
+ frame_buffer_.push(make_safe_ptr(frame));\r
}\r
\r
virtual int64_t nb_frames() const \r
int64_t nb_frames = input_.nb_frames();\r
if(input_.nb_loops() < 1) // input still hasn't counted all frames\r
{\r
- int64_t video_nb_frames = video_decoder_.nb_frames();\r
- int64_t audio_nb_frames = audio_decoder_.nb_frames();\r
+ auto video_nb_frames = video_decoder_ ? video_decoder_->nb_frames() : std::numeric_limits<int64_t>::max();\r
+ auto audio_nb_frames = audio_decoder_ ? audio_decoder_->nb_frames() : std::numeric_limits<int64_t>::max();\r
\r
nb_frames = std::min(static_cast<int64_t>(length_), std::max(nb_frames, std::max(video_nb_frames, audio_nb_frames)));\r
}\r
\r
- nb_frames = muxer_.calc_nb_frames(nb_frames);\r
+ nb_frames = muxer_->calc_nb_frames(nb_frames);\r
\r
// TODO: Might need to scale nb_frames av frame_muxer transformations.\r
\r
\r
virtual std::wstring print() const\r
{\r
- return L"ffmpeg[" + boost::filesystem::wpath(filename_).filename() + L"|" \r
- + boost::lexical_cast<std::wstring>(width_) + L"x" + boost::lexical_cast<std::wstring>(height_)\r
- + (is_progressive_ ? L"p" : L"i") + boost::lexical_cast<std::wstring>(is_progressive_ ? fps_ : 2.0 * fps_)\r
- + L"]";\r
+ if(video_decoder_)\r
+ {\r
+ return L"ffmpeg[" + boost::filesystem::wpath(filename_).filename() + L"|" \r
+ + boost::lexical_cast<std::wstring>(video_decoder_->width()) + L"x" + boost::lexical_cast<std::wstring>(video_decoder_->height())\r
+ + (video_decoder_->is_progressive() ? L"p" : L"i") + boost::lexical_cast<std::wstring>(video_decoder_->is_progressive() ? video_decoder_->fps() : 2.0 * video_decoder_->fps())\r
+ + L"]";\r
+ }\r
+ \r
+ return L"ffmpeg[" + boost::filesystem::wpath(filename_).filename() + L"]";\r
}\r
};\r
\r
boost::replace_all(filter_str, L"DEINTERLACE", L"YADIF=0:-1");\r
boost::replace_all(filter_str, L"DEINTERLACE_BOB", L"YADIF=1:-1");\r
\r
- return make_safe<ffmpeg_producer>(frame_factory, path, filter_str, loop, start, length);\r
+ return create_destroy_proxy(make_safe<ffmpeg_producer>(frame_factory, path, filter_str, loop, start, length));\r
}\r
\r
}}
\ No newline at end of file
\r
#include "../../ffmpeg_error.h"\r
\r
+#include <common/exception/exceptions.h>\r
+\r
#include <boost/assign.hpp>\r
\r
#include <cstdio>\r
AVFilterContext* buffersrc_ctx_;\r
std::shared_ptr<void> parallel_yadif_ctx_;\r
std::vector<PixelFormat> pix_fmts_;\r
+ std::queue<std::shared_ptr<AVFrame>> bypass_;\r
\r
implementation(const std::wstring& filters, const std::vector<PixelFormat>& pix_fmts) \r
: filters_(narrow(filters))\r
std::transform(filters_.begin(), filters_.end(), filters_.begin(), ::tolower);\r
}\r
\r
- std::vector<safe_ptr<AVFrame>> execute(const std::shared_ptr<AVFrame>& frame)\r
- {\r
+ void push(const std::shared_ptr<AVFrame>& frame)\r
+ { \r
if(!frame)\r
- return std::vector<safe_ptr<AVFrame>>();\r
-\r
- if(filters_.empty())\r
- return boost::assign::list_of(frame);\r
+ return;\r
\r
- push(frame);\r
- return poll();\r
- }\r
+ if(frame->data[0] == nullptr || frame->width < 1)\r
+ BOOST_THROW_EXCEPTION(invalid_argument());\r
\r
- void push(const std::shared_ptr<AVFrame>& frame)\r
- { \r
- if(!graph_)\r
+ if(filters_.empty())\r
{\r
- graph_.reset(avfilter_graph_alloc(), [](AVFilterGraph* p){avfilter_graph_free(&p);});\r
+ bypass_.push(frame);\r
+ return;\r
+ }\r
+ \r
+ try\r
+ {\r
+ if(!graph_)\r
+ {\r
+ try\r
+ {\r
+ graph_.reset(avfilter_graph_alloc(), [](AVFilterGraph* p){avfilter_graph_free(&p);});\r
\r
- // Input\r
- std::stringstream args;\r
- args << frame->width << ":" << frame->height << ":" << frame->format << ":" << 0 << ":" << 0 << ":" << 0 << ":" << 0; // don't care about pts and aspect_ratio\r
- THROW_ON_ERROR2(avfilter_graph_create_filter(&buffersrc_ctx_, avfilter_get_by_name("buffer"), "src", args.str().c_str(), NULL, graph_.get()), "[filter]");\r
-\r
- // OPIX_FMT_BGRAutput\r
- AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();\r
- buffersink_params->pixel_fmts = pix_fmts_.data();\r
- THROW_ON_ERROR2(avfilter_graph_create_filter(&buffersink_ctx_, avfilter_get_by_name("buffersink"), "out", NULL, buffersink_params, graph_.get()), "[filter]");\r
+ // Input\r
+ std::stringstream args;\r
+ args << frame->width << ":" << frame->height << ":" << frame->format << ":" << 0 << ":" << 0 << ":" << 0 << ":" << 0; // don't care about pts and aspect_ratio\r
+ THROW_ON_ERROR2(avfilter_graph_create_filter(&buffersrc_ctx_, avfilter_get_by_name("buffer"), "src", args.str().c_str(), NULL, graph_.get()), "[filter]");\r
+\r
+ // OPIX_FMT_BGRAutput\r
+ AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();\r
+ buffersink_params->pixel_fmts = pix_fmts_.data();\r
+ THROW_ON_ERROR2(avfilter_graph_create_filter(&buffersink_ctx_, avfilter_get_by_name("buffersink"), "out", NULL, buffersink_params, graph_.get()), "[filter]");\r
\r
- AVFilterInOut* outputs = avfilter_inout_alloc();\r
- AVFilterInOut* inputs = avfilter_inout_alloc();\r
+ AVFilterInOut* outputs = avfilter_inout_alloc();\r
+ AVFilterInOut* inputs = avfilter_inout_alloc();\r
\r
- outputs->name = av_strdup("in");\r
- outputs->filter_ctx = buffersrc_ctx_;\r
- outputs->pad_idx = 0;\r
- outputs->next = NULL;\r
-\r
- inputs->name = av_strdup("out");\r
- inputs->filter_ctx = buffersink_ctx_;\r
- inputs->pad_idx = 0;\r
- inputs->next = NULL;\r
+ outputs->name = av_strdup("in");\r
+ outputs->filter_ctx = buffersrc_ctx_;\r
+ outputs->pad_idx = 0;\r
+ outputs->next = NULL;\r
+\r
+ inputs->name = av_strdup("out");\r
+ inputs->filter_ctx = buffersink_ctx_;\r
+ inputs->pad_idx = 0;\r
+ inputs->next = NULL;\r
\r
- THROW_ON_ERROR2(avfilter_graph_parse(graph_.get(), filters_.c_str(), &inputs, &outputs, NULL), "[filter]");\r
+ THROW_ON_ERROR2(avfilter_graph_parse(graph_.get(), filters_.c_str(), &inputs, &outputs, NULL), "[filter]");\r
\r
- avfilter_inout_free(&inputs);\r
- avfilter_inout_free(&outputs);\r
-\r
- THROW_ON_ERROR2(avfilter_graph_config(graph_.get(), NULL), "[filter]"); \r
-\r
- for(size_t n = 0; n < graph_->filter_count; ++n)\r
- {\r
- auto filter_name = graph_->filters[n]->name;\r
- if(strstr(filter_name, "yadif") != 0)\r
- parallel_yadif_ctx_ = make_parallel_yadif(graph_->filters[n]);\r
+ avfilter_inout_free(&inputs);\r
+ avfilter_inout_free(&outputs);\r
+\r
+ THROW_ON_ERROR2(avfilter_graph_config(graph_.get(), NULL), "[filter]"); \r
+\r
+ for(size_t n = 0; n < graph_->filter_count; ++n)\r
+ {\r
+ auto filter_name = graph_->filters[n]->name;\r
+ if(strstr(filter_name, "yadif") != 0)\r
+ parallel_yadif_ctx_ = make_parallel_yadif(graph_->filters[n]);\r
+ }\r
+ }\r
+ catch(...)\r
+ {\r
+ graph_ = nullptr;\r
+ throw;\r
+ }\r
}\r
+ \r
+ THROW_ON_ERROR2(av_vsrc_buffer_add_frame(buffersrc_ctx_, frame.get(), 0), "[filter]");\r
+ }\r
+ catch(ffmpeg_error&)\r
+ {\r
+ throw;\r
+ }\r
+ catch(...)\r
+ {\r
+ BOOST_THROW_EXCEPTION(ffmpeg_error() << boost::errinfo_nested_exception(boost::current_exception()));\r
}\r
- \r
- THROW_ON_ERROR2(av_vsrc_buffer_add_frame(buffersrc_ctx_, frame.get(), 0), "[filter]");\r
}\r
\r
- std::vector<safe_ptr<AVFrame>> poll()\r
+ std::shared_ptr<AVFrame> poll()\r
{\r
- std::vector<safe_ptr<AVFrame>> result;\r
+ if(filters_.empty())\r
+ {\r
+ if(bypass_.empty())\r
+ return nullptr;\r
+ auto frame = bypass_.front();\r
+ bypass_.pop();\r
+ return frame;\r
+ }\r
\r
if(!graph_)\r
- return result;\r
+ return nullptr;\r
\r
- while (avfilter_poll_frame(buffersink_ctx_->inputs[0])) \r
+ try\r
{\r
- AVFilterBufferRef *picref;\r
- THROW_ON_ERROR2(av_buffersink_get_buffer_ref(buffersink_ctx_, &picref, 0), "[filter]");\r
+ if(avfilter_poll_frame(buffersink_ctx_->inputs[0])) \r
+ {\r
+ AVFilterBufferRef *picref;\r
+ THROW_ON_ERROR2(av_buffersink_get_buffer_ref(buffersink_ctx_, &picref, 0), "[filter]");\r
+\r
+ if (picref) \r
+ { \r
+ safe_ptr<AVFrame> frame(avcodec_alloc_frame(), [=](AVFrame* p)\r
+ {\r
+ av_free(p);\r
+ avfilter_unref_buffer(picref);\r
+ });\r
+\r
+ avcodec_get_frame_defaults(frame.get()); \r
+\r
+ memcpy(frame->data, picref->data, sizeof(frame->data));\r
+ memcpy(frame->linesize, picref->linesize, sizeof(frame->linesize));\r
+ frame->format = picref->format;\r
+ frame->width = picref->video->w;\r
+ frame->height = picref->video->h;\r
+ frame->pkt_pos = picref->pos;\r
+ frame->interlaced_frame = picref->video->interlaced;\r
+ frame->top_field_first = picref->video->top_field_first;\r
+ frame->key_frame = picref->video->key_frame;\r
+ frame->pict_type = picref->video->pict_type;\r
+ frame->sample_aspect_ratio = picref->video->sample_aspect_ratio;\r
+\r
+ return frame;\r
+ }\r
+ }\r
+ }\r
+ catch(ffmpeg_error&)\r
+ {\r
+ throw;\r
+ }\r
+ catch(...)\r
+ {\r
+ BOOST_THROW_EXCEPTION(ffmpeg_error() << boost::errinfo_nested_exception(boost::current_exception()));\r
+ }\r
\r
- if (picref) \r
- { \r
- safe_ptr<AVFrame> frame(avcodec_alloc_frame(), [=](AVFrame* p)\r
- {\r
- av_free(p);\r
- avfilter_unref_buffer(picref);\r
- });\r
-\r
- avcodec_get_frame_defaults(frame.get()); \r
-\r
- memcpy(frame->data, picref->data, sizeof(frame->data));\r
- memcpy(frame->linesize, picref->linesize, sizeof(frame->linesize));\r
- frame->format = picref->format;\r
- frame->width = picref->video->w;\r
- frame->height = picref->video->h;\r
- frame->pkt_pos = picref->pos;\r
- frame->interlaced_frame = picref->video->interlaced;\r
- frame->top_field_first = picref->video->top_field_first;\r
- frame->key_frame = picref->video->key_frame;\r
- frame->pict_type = picref->video->pict_type;\r
- frame->sample_aspect_ratio = picref->video->sample_aspect_ratio;\r
-\r
- result.push_back(frame);\r
- }\r
- }\r
-\r
- return result;\r
+ return nullptr;\r
}\r
};\r
\r
filter::filter(const std::wstring& filters, const std::vector<PixelFormat>& pix_fmts) : impl_(new implementation(filters, pix_fmts)){}\r
filter::filter(filter&& other) : impl_(std::move(other.impl_)){}\r
filter& filter::operator=(filter&& other){impl_ = std::move(other.impl_); return *this;}\r
-std::vector<safe_ptr<AVFrame>> filter::execute(const std::shared_ptr<AVFrame>& frame) {return impl_->execute(frame);}\r
+void filter::push(const std::shared_ptr<AVFrame>& frame){impl_->push(frame);}\r
+std::shared_ptr<AVFrame> filter::poll(){return impl_->poll();}\r
+std::string filter::filter_str() const{return impl_->filters_;}\r
+std::vector<safe_ptr<AVFrame>> filter::poll_all()\r
+{ \r
+ std::vector<safe_ptr<AVFrame>> frames;\r
+ for(auto frame = poll(); frame; frame = poll())\r
+ frames.push_back(make_safe_ptr(frame));\r
+ return frames;\r
+}\r
\r
}}
\ No newline at end of file
#include <common/memory/safe_ptr.h>\r
\r
#include <boost/noncopyable.hpp>\r
+#include <boost/algorithm/string/case_conv.hpp>\r
\r
#include <string>\r
#include <vector>\r
\r
namespace caspar { namespace ffmpeg {\r
\r
-static bool double_rate(const std::wstring& filters)\r
+static bool is_double_rate(const std::wstring& filters)\r
{\r
- if(filters.find(L"YADIF=1") != std::string::npos)\r
+ if(boost::to_upper_copy(filters).find(L"YADIF=1") != std::string::npos)\r
return true;\r
\r
- if(filters.find(L"YADIF=3") != std::string::npos)\r
+ if(boost::to_upper_copy(filters).find(L"YADIF=3") != std::string::npos)\r
return true;\r
\r
return false;\r
}\r
\r
+static bool is_deinterlacing(const std::wstring& filters)\r
+{\r
+ if(boost::to_upper_copy(filters).find(L"YADIF") != std::string::npos)\r
+ return true; \r
+ return false;\r
+}\r
+\r
+static std::wstring append_filter(const std::wstring& filters, const std::wstring& filter)\r
+{\r
+ return filters + (filters.empty() ? L"" : L",") + filter;\r
+}\r
+\r
class filter : boost::noncopyable\r
{\r
public:\r
filter(filter&& other);\r
filter& operator=(filter&& other);\r
\r
- std::vector<safe_ptr<AVFrame>> execute(const std::shared_ptr<AVFrame>& frame);\r
+ void push(const std::shared_ptr<AVFrame>& frame);\r
+ std::shared_ptr<AVFrame> poll();\r
+ std::vector<safe_ptr<AVFrame>> poll_all();\r
\r
+ std::string filter_str() const;\r
+ \r
private:\r
struct implementation;\r
safe_ptr<implementation> impl_;\r
+++ /dev/null
-#include "../StdAfx.h"\r
-\r
-#include "frame_muxer.h"\r
-\r
-#include "filter/filter.h"\r
-\r
-#include "util.h"\r
-\r
-#include <core/producer/frame_producer.h>\r
-#include <core/producer/frame/basic_frame.h>\r
-#include <core/producer/frame/frame_transform.h>\r
-#include <core/producer/frame/pixel_format.h>\r
-#include <core/producer/frame/frame_factory.h>\r
-#include <core/mixer/write_frame.h>\r
-\r
-#include <common/env.h>\r
-#include <common/exception/exceptions.h>\r
-#include <common/log/log.h>\r
-\r
-#if defined(_MSC_VER)\r
-#pragma warning (push)\r
-#pragma warning (disable : 4244)\r
-#endif\r
-extern "C" \r
-{\r
- #define __STDC_CONSTANT_MACROS\r
- #define __STDC_LIMIT_MACROS\r
- #include <libavcodec/avcodec.h>\r
- #include <libavformat/avformat.h>\r
-}\r
-#if defined(_MSC_VER)\r
-#pragma warning (pop)\r
-#endif\r
-\r
-#include <boost/foreach.hpp>\r
-#include <boost/range/algorithm_ext/push_back.hpp>\r
-\r
-#include <deque>\r
-#include <queue>\r
-#include <vector>\r
-\r
-using namespace caspar::core;\r
-\r
-namespace caspar { namespace ffmpeg {\r
-\r
-struct display_mode\r
-{\r
- enum type\r
- {\r
- simple,\r
- duplicate,\r
- half,\r
- interlace,\r
- deinterlace_bob,\r
- deinterlace_bob_reinterlace,\r
- deinterlace,\r
- count,\r
- invalid\r
- };\r
-\r
- static std::wstring print(display_mode::type value)\r
- {\r
- switch(value)\r
- {\r
- case simple:\r
- return L"simple";\r
- case duplicate:\r
- return L"duplicate";\r
- case half:\r
- return L"half";\r
- case interlace:\r
- return L"interlace";\r
- case deinterlace_bob:\r
- return L"deinterlace_bob";\r
- case deinterlace_bob_reinterlace:\r
- return L"deinterlace_bob_reinterlace";\r
- case deinterlace:\r
- return L"deinterlace";\r
- default:\r
- return L"invalid";\r
- }\r
- }\r
-};\r
-\r
-display_mode::type get_display_mode(const core::field_mode::type in_mode, double in_fps, const core::field_mode::type out_mode, double out_fps)\r
-{ \r
- static const auto epsilon = 2.0;\r
-\r
- if(in_fps < 20.0 || in_fps > 80.0)\r
- {\r
- //if(out_mode != core::field_mode::progressive && in_mode == core::field_mode::progressive)\r
- // return display_mode::interlace;\r
- \r
- if(out_mode == core::field_mode::progressive && in_mode != core::field_mode::progressive)\r
- {\r
- if(in_fps < 35.0)\r
- return display_mode::deinterlace;\r
- else\r
- return display_mode::deinterlace_bob;\r
- }\r
- }\r
-\r
- if(std::abs(in_fps - out_fps) < epsilon)\r
- {\r
- if(in_mode != core::field_mode::progressive && out_mode == core::field_mode::progressive)\r
- return display_mode::deinterlace;\r
- //else if(in_mode == core::field_mode::progressive && out_mode != core::field_mode::progressive)\r
- // simple(); // interlace_duplicate();\r
- else\r
- return display_mode::simple;\r
- }\r
- else if(std::abs(in_fps/2.0 - out_fps) < epsilon)\r
- {\r
- if(in_mode != core::field_mode::progressive)\r
- return display_mode::invalid;\r
-\r
- if(out_mode != core::field_mode::progressive)\r
- return display_mode::interlace;\r
- else\r
- return display_mode::half;\r
- }\r
- else if(std::abs(in_fps - out_fps/2.0) < epsilon)\r
- {\r
- if(out_mode != core::field_mode::progressive)\r
- return display_mode::invalid;\r
-\r
- if(in_mode != core::field_mode::progressive)\r
- return display_mode::deinterlace_bob;\r
- else\r
- return display_mode::duplicate;\r
- }\r
-\r
- return display_mode::invalid;\r
-}\r
-\r
-struct frame_muxer::implementation : boost::noncopyable\r
-{ \r
- std::deque<std::queue<safe_ptr<write_frame>>> video_streams_;\r
- std::deque<core::audio_buffer> audio_streams_;\r
- std::deque<safe_ptr<basic_frame>> frame_buffer_;\r
- display_mode::type display_mode_;\r
- const double in_fps_;\r
- const video_format_desc format_desc_;\r
- bool auto_transcode_;\r
-\r
- size_t audio_sample_count_;\r
- size_t video_frame_count_;\r
- \r
- size_t processed_audio_sample_count_;\r
- size_t processed_video_frame_count_;\r
-\r
- filter filter_;\r
- safe_ptr<core::frame_factory> frame_factory_;\r
- \r
- implementation(double in_fps, const safe_ptr<core::frame_factory>& frame_factory)\r
- : video_streams_(1)\r
- , audio_streams_(1)\r
- , display_mode_(display_mode::invalid)\r
- , in_fps_(in_fps)\r
- , format_desc_(frame_factory->get_video_format_desc())\r
- , auto_transcode_(env::properties().get("configuration.producers.auto-transcode", false))\r
- , audio_sample_count_(0)\r
- , video_frame_count_(0)\r
- , frame_factory_(frame_factory)\r
- {\r
- }\r
-\r
- void push(const std::shared_ptr<AVFrame>& video_frame, int hints)\r
- { \r
- if(!video_frame)\r
- { \r
- CASPAR_LOG(debug) << L"video-frame-count: " << static_cast<float>(video_frame_count_);\r
- video_frame_count_ = 0;\r
- video_streams_.push_back(std::queue<safe_ptr<write_frame>>());\r
- return;\r
- }\r
-\r
- if(video_frame->data[0] == nullptr)\r
- {\r
- video_streams_.back().push(make_safe<core::write_frame>(this));\r
- ++video_frame_count_;\r
- display_mode_ = display_mode::simple;\r
- return;\r
- }\r
-\r
- if(display_mode_ == display_mode::invalid)\r
- {\r
- if(auto_transcode_)\r
- {\r
- auto in_mode = get_mode(*video_frame);\r
- display_mode_ = get_display_mode(in_mode, in_fps_, format_desc_.field_mode, format_desc_.fps);\r
- \r
- if(display_mode_ == display_mode::simple && in_mode != core::field_mode::progressive && format_desc_.field_mode != core::field_mode::progressive && video_frame->height != static_cast<int>(format_desc_.height))\r
- display_mode_ = display_mode::deinterlace_bob_reinterlace; // The frame will most likely be scaled, we need to deinterlace->reinterlace \r
- \r
- if(display_mode_ == display_mode::deinterlace)\r
- filter_ = filter(L"YADIF=0:-1");\r
- else if(display_mode_ == display_mode::deinterlace_bob || display_mode_ == display_mode::deinterlace_bob_reinterlace)\r
- filter_ = filter(L"YADIF=1:-1");\r
- }\r
- else\r
- display_mode_ = display_mode::simple;\r
-\r
- if(display_mode_ == display_mode::invalid)\r
- {\r
- CASPAR_LOG(warning) << L"[frame_muxer] Failed to detect display-mode.";\r
- display_mode_ = display_mode::simple;\r
- }\r
-\r
- CASPAR_LOG(info) << "[frame_muxer] " << display_mode::print(display_mode_);\r
- }\r
-\r
- \r
- if(hints & core::frame_producer::ALPHA_HINT)\r
- video_frame->format = make_alpha_format(video_frame->format);\r
- \r
- auto format = video_frame->format;\r
- if(video_frame->format == CASPAR_PIX_FMT_LUMA) // CASPAR_PIX_FMT_LUMA is not valid for filter, change it to GRAY8\r
- video_frame->format = PIX_FMT_GRAY8;\r
-\r
- BOOST_FOREACH(auto& av_frame, filter_.execute(video_frame))\r
- {\r
- av_frame->format = format;\r
-\r
- auto frame = make_write_frame(this, av_frame, frame_factory_, hints);\r
-\r
- // Fix field-order if needed\r
- if(frame->get_type() == core::field_mode::lower && format_desc_.field_mode == core::field_mode::upper)\r
- frame->get_frame_transform().fill_translation[1] += 1.0/static_cast<double>(format_desc_.height);\r
- else if(frame->get_type() == core::field_mode::upper && format_desc_.field_mode == core::field_mode::lower)\r
- frame->get_frame_transform().fill_translation[1] -= 1.0/static_cast<double>(format_desc_.height);\r
-\r
- video_streams_.back().push(frame);\r
- ++video_frame_count_;\r
- }\r
-\r
- if(video_streams_.back().size() > 8)\r
- BOOST_THROW_EXCEPTION(invalid_operation() << source_info("frame_muxer") << msg_info("video-stream overflow. This can be caused by incorrect frame-rate. Check clip meta-data."));\r
- }\r
-\r
- void push(const std::shared_ptr<core::audio_buffer>& audio_samples)\r
- {\r
- if(!audio_samples) \r
- {\r
- CASPAR_LOG(debug) << L"audio-chunk-count: " << audio_sample_count_/format_desc_.audio_samples_per_frame;\r
- audio_streams_.push_back(core::audio_buffer());\r
- audio_sample_count_ = 0;\r
- return;\r
- }\r
-\r
- audio_sample_count_ += audio_samples->size();\r
-\r
- boost::range::push_back(audio_streams_.back(), *audio_samples);\r
-\r
- if(audio_streams_.back().size() > 8*format_desc_.audio_samples_per_frame)\r
- BOOST_THROW_EXCEPTION(invalid_operation() << source_info("frame_muxer") << msg_info("audio-stream overflow. This can be caused by incorrect frame-rate. Check clip meta-data."));\r
- }\r
-\r
- safe_ptr<basic_frame> pop()\r
- { \r
- auto frame = frame_buffer_.front();\r
- frame_buffer_.pop_front(); \r
- return frame;\r
- }\r
-\r
- size_t size() const\r
- {\r
- return frame_buffer_.size();\r
- }\r
-\r
- safe_ptr<core::write_frame> pop_video()\r
- {\r
- auto frame = video_streams_.front().front();\r
- video_streams_.front().pop();\r
- \r
- return frame;\r
- }\r
-\r
- core::audio_buffer pop_audio()\r
- {\r
- CASPAR_VERIFY(audio_streams_.front().size() >= format_desc_.audio_samples_per_frame);\r
-\r
- auto begin = audio_streams_.front().begin();\r
- auto end = begin + format_desc_.audio_samples_per_frame;\r
-\r
- auto samples = core::audio_buffer(begin, end);\r
- audio_streams_.front().erase(begin, end);\r
-\r
- return samples;\r
- }\r
- \r
- bool video_ready() const\r
- { \r
- return video_streams_.size() > 1 || (video_streams_.size() >= audio_streams_.size() && video_ready2());\r
- }\r
- \r
- bool audio_ready() const\r
- {\r
- return audio_streams_.size() > 1 || (audio_streams_.size() >= video_streams_.size() && audio_ready2());\r
- }\r
-\r
- bool video_ready2() const\r
- { \r
- switch(display_mode_)\r
- {\r
- case display_mode::deinterlace_bob_reinterlace: \r
- case display_mode::interlace: \r
- return video_streams_.front().size() >= 2;\r
- default: \r
- return !video_streams_.front().empty();\r
- }\r
- }\r
- \r
- bool audio_ready2() const\r
- {\r
- switch(display_mode_)\r
- {\r
- case display_mode::duplicate: \r
- return audio_streams_.front().size()/2 >= format_desc_.audio_samples_per_frame;\r
- default: \r
- return audio_streams_.front().size() >= format_desc_.audio_samples_per_frame;\r
- }\r
- }\r
- \r
- void commit()\r
- {\r
- if(video_streams_.size() > 1 && audio_streams_.size() > 1 && (!video_ready2() || !audio_ready2()))\r
- {\r
- if(!video_streams_.front().empty() || !audio_streams_.front().empty())\r
- CASPAR_LOG(debug) << "Truncating: " << video_streams_.front().size() << L" video-frames, " << audio_streams_.front().size() << L" audio-samples.";\r
-\r
- video_streams_.pop_front();\r
- audio_streams_.pop_front();\r
- }\r
-\r
- if(!video_ready2() || !audio_ready2())\r
- return;\r
- \r
- switch(display_mode_)\r
- {\r
- case display_mode::simple: return simple(frame_buffer_);\r
- case display_mode::duplicate: return duplicate(frame_buffer_);\r
- case display_mode::half: return half(frame_buffer_);\r
- case display_mode::interlace: return interlace(frame_buffer_);\r
- case display_mode::deinterlace_bob: return simple(frame_buffer_);\r
- case display_mode::deinterlace_bob_reinterlace: return interlace(frame_buffer_);\r
- case display_mode::deinterlace: return simple(frame_buffer_);\r
- default: BOOST_THROW_EXCEPTION(invalid_operation() << msg_info("invalid display-mode"));\r
- }\r
- }\r
- \r
- void simple(std::deque<safe_ptr<basic_frame>>& dest)\r
- { \r
- auto frame1 = pop_video();\r
- frame1->audio_data() = pop_audio();\r
-\r
- dest.push_back(frame1); \r
- }\r
-\r
- void duplicate(std::deque<safe_ptr<basic_frame>>& dest)\r
- { \r
- auto frame = pop_video();\r
-\r
- auto frame1 = make_safe<core::write_frame>(*frame); // make a copy\r
- frame1->audio_data() = pop_audio();\r
-\r
- auto frame2 = frame;\r
- frame2->audio_data() = pop_audio();\r
-\r
- dest.push_back(frame1);\r
- dest.push_back(frame2);\r
- }\r
-\r
- void half(std::deque<safe_ptr<basic_frame>>& dest)\r
- { \r
- auto frame1 = pop_video();\r
- frame1->audio_data() = pop_audio();\r
- \r
- video_streams_.front().pop(); // Throw away\r
-\r
- dest.push_back(frame1);\r
- }\r
- \r
- void interlace(std::deque<safe_ptr<basic_frame>>& dest)\r
- { \r
- auto frame1 = pop_video();\r
- frame1->audio_data() = pop_audio();\r
- \r
- auto frame2 = pop_video();\r
-\r
- dest.push_back(core::basic_frame::interlace(frame1, frame2, format_desc_.field_mode)); \r
- }\r
- \r
- int64_t calc_nb_frames(int64_t nb_frames) const\r
- {\r
- switch(display_mode_)\r
- {\r
- case display_mode::interlace:\r
- case display_mode::half:\r
- return nb_frames/2;\r
- case display_mode::duplicate:\r
- case display_mode::deinterlace_bob:\r
- return nb_frames*2;\r
- default:\r
- return nb_frames;\r
- }\r
- }\r
-};\r
-\r
-frame_muxer::frame_muxer(double in_fps, const safe_ptr<core::frame_factory>& frame_factory)\r
- : impl_(new implementation(in_fps, frame_factory)){}\r
-void frame_muxer::push(const std::shared_ptr<AVFrame>& video_frame, int hints){impl_->push(video_frame, hints);}\r
-void frame_muxer::push(const std::shared_ptr<core::audio_buffer>& audio_samples){return impl_->push(audio_samples);}\r
-void frame_muxer::commit(){impl_->commit();}\r
-safe_ptr<basic_frame> frame_muxer::pop(){return impl_->pop();}\r
-size_t frame_muxer::size() const {return impl_->size();}\r
-bool frame_muxer::empty() const {return impl_->size() == 0;}\r
-bool frame_muxer::video_ready() const{return impl_->video_ready();}\r
-bool frame_muxer::audio_ready() const{return impl_->audio_ready();}\r
-int64_t frame_muxer::calc_nb_frames(int64_t nb_frames) const {return impl_->calc_nb_frames(nb_frames);}\r
-\r
-}}
\ No newline at end of file
#pragma warning (disable : 4244)\r
#endif\r
\r
-#include "../stdafx.h"\r
+#include "../../stdafx.h"\r
\r
#include "input.h"\r
-#include "../ffmpeg_error.h"\r
-#include "../tbb_avcodec.h"\r
+\r
+#include "../util/util.h"\r
+#include "../../ffmpeg_error.h"\r
\r
#include <core/video_format.h>\r
\r
\r
struct input::implementation : boost::noncopyable\r
{ \r
- std::shared_ptr<AVFormatContext> format_context_; // Destroy this last\r
- int default_stream_index_;\r
-\r
safe_ptr<diagnostics::graph> graph_;\r
- \r
+\r
+ const safe_ptr<AVFormatContext> format_context_; // Destroy this last\r
+ const int default_stream_index_;\r
+ \r
const std::wstring filename_;\r
const bool loop_;\r
const size_t start_; \r
boost::condition_variable buffer_cond_;\r
boost::mutex buffer_mutex_;\r
\r
- boost::thread thread_;\r
- tbb::atomic<bool> is_running_;\r
-\r
tbb::atomic<size_t> nb_frames_;\r
tbb::atomic<size_t> nb_loops_;\r
\r
+ boost::thread thread_;\r
+ tbb::atomic<bool> is_running_;\r
+\r
public:\r
explicit implementation(const safe_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, size_t start, size_t length) \r
: graph_(graph)\r
+ , format_context_(open_input(filename)) \r
+ , default_stream_index_(av_find_default_stream_index(format_context_.get()))\r
, loop_(loop)\r
, filename_(filename)\r
, start_(start)\r
, length_(length)\r
, frame_number_(0)\r
- { \r
- is_running_ = true;\r
- nb_frames_ = 0;\r
- nb_loops_ = 0;\r
- \r
- AVFormatContext* weak_format_context_ = nullptr;\r
- THROW_ON_ERROR2(avformat_open_input(&weak_format_context_, narrow(filename).c_str(), nullptr, nullptr), print());\r
-\r
- format_context_.reset(weak_format_context_, av_close_input_file);\r
-\r
- av_dump_format(weak_format_context_, 0, narrow(filename).c_str(), 0);\r
- \r
- THROW_ON_ERROR2(avformat_find_stream_info(format_context_.get(), nullptr), print());\r
- \r
- default_stream_index_ = THROW_ON_ERROR2(av_find_default_stream_index(format_context_.get()), print());\r
-\r
+ { \r
if(start_ > 0) \r
seek_frame(start_);\r
- \r
- for(int n = 0; n < 16 && !full(); ++n)\r
- read_next_packet();\r
- \r
+ \r
graph_->set_color("seek", diagnostics::color(1.0f, 0.5f, 0.0f)); \r
graph_->set_color("buffer-count", diagnostics::color(0.7f, 0.4f, 0.4f));\r
graph_->set_color("buffer-size", diagnostics::color(1.0f, 1.0f, 0.0f)); \r
-\r
+ \r
+ is_running_ = true;\r
thread_ = boost::thread([this]{run();});\r
}\r
\r
\r
void read_next_packet()\r
{ \r
- int ret = 0;\r
-\r
- std::shared_ptr<AVPacket> read_packet(new AVPacket, [](AVPacket* p)\r
- {\r
- av_free_packet(p);\r
- delete p;\r
- });\r
- av_init_packet(read_packet.get());\r
-\r
- ret = av_read_frame(format_context_.get(), read_packet.get()); // read_packet is only valid until next call of av_read_frame. Use av_dup_packet to extend its life. \r
+ auto packet = create_packet();\r
+ auto ret = av_read_frame(format_context_.get(), packet.get()); // packet is only valid until next call of av_read_frame. Use av_dup_packet to extend its life. \r
\r
if(is_eof(ret)) \r
{\r
{ \r
THROW_ON_ERROR(ret, "av_read_frame", print());\r
\r
- if(read_packet->stream_index == default_stream_index_)\r
+ if(packet->stream_index == default_stream_index_)\r
{\r
if(nb_loops_ == 0)\r
++nb_frames_;\r
++frame_number_;\r
}\r
\r
- THROW_ON_ERROR2(av_dup_packet(read_packet.get()), print());\r
+ THROW_ON_ERROR2(av_dup_packet(packet.get()), print());\r
\r
// Make sure that the packet is correctly deallocated even if size and data is modified during decoding.\r
- auto size = read_packet->size;\r
- auto data = read_packet->data;\r
+ auto size = packet->size;\r
+ auto data = packet->data;\r
\r
- read_packet = std::shared_ptr<AVPacket>(read_packet.get(), [=](AVPacket*)\r
+ packet = safe_ptr<AVPacket>(packet.get(), [packet, size, data](AVPacket*)\r
{\r
- read_packet->size = size;\r
- read_packet->data = data;\r
+ packet->size = size;\r
+ packet->data = data;\r
});\r
\r
- buffer_.try_push(read_packet);\r
- buffer_size_ += read_packet->size;\r
+ buffer_.try_push(packet);\r
+ buffer_size_ += packet->size;\r
\r
graph_->update_value("buffer-size", (static_cast<double>(buffer_size_)+0.001)/MAX_BUFFER_SIZE);\r
graph_->update_value("buffer-count", (static_cast<double>(buffer_.size()+0.001)/MAX_BUFFER_COUNT));\r
}\r
\r
void seek_frame(int64_t frame, int flags = 0)\r
- { \r
+ { \r
+ if(flags == AVSEEK_FLAG_BACKWARD)\r
+ {\r
+ // Fix VP6 seeking\r
+ int vid_stream_index = av_find_best_stream(format_context_.get(), AVMEDIA_TYPE_VIDEO, -1, -1, 0, 0);\r
+ if(vid_stream_index >= 0)\r
+ {\r
+ auto codec_id = format_context_->streams[vid_stream_index]->codec->codec_id;\r
+ if(codec_id == CODEC_ID_VP6A || codec_id == CODEC_ID_VP6F || codec_id == CODEC_ID_VP6)\r
+ flags |= AVSEEK_FLAG_BYTE;\r
+ }\r
+ }\r
+\r
THROW_ON_ERROR2(av_seek_frame(format_context_.get(), default_stream_index_, frame, flags), print()); \r
- buffer_.push(nullptr);\r
+\r
+ buffer_.push(flush_packet());\r
} \r
\r
bool is_eof(int ret)\r
: impl_(new implementation(graph, filename, loop, start, length)){}\r
bool input::eof() const {return !impl_->is_running_;}\r
bool input::try_pop(std::shared_ptr<AVPacket>& packet){return impl_->try_pop(packet);}\r
-safe_ptr<AVFormatContext> input::context(){return make_safe_ptr(impl_->format_context_);}\r
+safe_ptr<AVFormatContext> input::context(){return impl_->format_context_;}\r
size_t input::nb_frames() const {return impl_->nb_frames();}\r
size_t input::nb_loops() const {return impl_->nb_loops();}\r
}}\r
--- /dev/null
+#pragma once\r
+\r
+#include <core/video_format.h>\r
+\r
+namespace caspar { namespace ffmpeg {\r
+ \r
+struct display_mode\r
+{\r
+ enum type\r
+ {\r
+ simple,\r
+ duplicate,\r
+ half,\r
+ interlace,\r
+ deinterlace_bob,\r
+ deinterlace_bob_reinterlace,\r
+ deinterlace,\r
+ count,\r
+ invalid\r
+ };\r
+\r
+ static std::wstring print(display_mode::type value)\r
+ {\r
+ switch(value)\r
+ {\r
+ case simple: return L"simple";\r
+ case duplicate: return L"duplicate";\r
+ case half: return L"half";\r
+ case interlace: return L"interlace";\r
+ case deinterlace_bob: return L"deinterlace_bob";\r
+ case deinterlace_bob_reinterlace: return L"deinterlace_bob_reinterlace";\r
+ case deinterlace: return L"deinterlace";\r
+ default: return L"invalid";\r
+ }\r
+ }\r
+};\r
+\r
+display_mode::type get_display_mode(const core::field_mode::type in_mode, double in_fps, const core::field_mode::type out_mode, double out_fps)\r
+{ \r
+ static const auto epsilon = 2.0;\r
+\r
+ if(in_fps < 20.0 || in_fps > 80.0)\r
+ {\r
+ //if(out_mode != core::field_mode::progressive && in_mode == core::field_mode::progressive)\r
+ // return display_mode::interlace;\r
+ \r
+ if(out_mode == core::field_mode::progressive && in_mode != core::field_mode::progressive)\r
+ {\r
+ if(in_fps < 35.0)\r
+ return display_mode::deinterlace;\r
+ else\r
+ return display_mode::deinterlace_bob;\r
+ }\r
+ }\r
+\r
+ if(std::abs(in_fps - out_fps) < epsilon)\r
+ {\r
+ if(in_mode != core::field_mode::progressive && out_mode == core::field_mode::progressive)\r
+ return display_mode::deinterlace;\r
+ //else if(in_mode == core::field_mode::progressive && out_mode != core::field_mode::progressive)\r
+ // simple(); // interlace_duplicate();\r
+ else\r
+ return display_mode::simple;\r
+ }\r
+ else if(std::abs(in_fps/2.0 - out_fps) < epsilon)\r
+ {\r
+ if(in_mode != core::field_mode::progressive)\r
+ return display_mode::invalid;\r
+\r
+ if(out_mode != core::field_mode::progressive)\r
+ return display_mode::interlace;\r
+ else\r
+ return display_mode::half;\r
+ }\r
+ else if(std::abs(in_fps - out_fps/2.0) < epsilon)\r
+ {\r
+ if(out_mode != core::field_mode::progressive)\r
+ return display_mode::invalid;\r
+\r
+ if(in_mode != core::field_mode::progressive)\r
+ return display_mode::deinterlace_bob;\r
+ else\r
+ return display_mode::duplicate;\r
+ }\r
+\r
+ return display_mode::invalid;\r
+}\r
+\r
+}}
\ No newline at end of file
--- /dev/null
+#include "../../StdAfx.h"\r
+\r
+#include "frame_muxer.h"\r
+\r
+#include "display_mode.h"\r
+\r
+#include "../filter/filter.h"\r
+#include "../util/util.h"\r
+\r
+#include <core/producer/frame_producer.h>\r
+#include <core/producer/frame/basic_frame.h>\r
+#include <core/producer/frame/frame_transform.h>\r
+#include <core/producer/frame/pixel_format.h>\r
+#include <core/producer/frame/frame_factory.h>\r
+#include <core/mixer/write_frame.h>\r
+\r
+#include <common/env.h>\r
+#include <common/exception/exceptions.h>\r
+#include <common/log/log.h>\r
+\r
+#if defined(_MSC_VER)\r
+#pragma warning (push)\r
+#pragma warning (disable : 4244)\r
+#endif\r
+extern "C" \r
+{\r
+ #define __STDC_CONSTANT_MACROS\r
+ #define __STDC_LIMIT_MACROS\r
+ #include <libavcodec/avcodec.h>\r
+ #include <libavformat/avformat.h>\r
+}\r
+#if defined(_MSC_VER)\r
+#pragma warning (pop)\r
+#endif\r
+\r
+#include <boost/foreach.hpp>\r
+#include <boost/range/algorithm_ext/push_back.hpp>\r
+\r
+#include <deque>\r
+#include <queue>\r
+#include <vector>\r
+\r
+using namespace caspar::core;\r
+\r
+namespace caspar { namespace ffmpeg {\r
+ \r
+struct frame_muxer::implementation : boost::noncopyable\r
+{ \r
+ std::queue<std::queue<safe_ptr<write_frame>>> video_streams_;\r
+ std::queue<core::audio_buffer> audio_streams_;\r
+ std::queue<safe_ptr<basic_frame>> frame_buffer_;\r
+ display_mode::type display_mode_;\r
+ const double in_fps_;\r
+ const video_format_desc format_desc_;\r
+ bool auto_transcode_;\r
+\r
+ size_t audio_sample_count_;\r
+ size_t video_frame_count_;\r
+ \r
+ safe_ptr<core::frame_factory> frame_factory_;\r
+ \r
+ filter filter_;\r
+ std::wstring filter_str_;\r
+ \r
+ implementation(double in_fps, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter_str)\r
+ : display_mode_(display_mode::invalid)\r
+ , in_fps_(in_fps)\r
+ , format_desc_(frame_factory->get_video_format_desc())\r
+ , auto_transcode_(env::properties().get("configuration.producers.auto-transcode", false))\r
+ , audio_sample_count_(0)\r
+ , video_frame_count_(0)\r
+ , frame_factory_(frame_factory)\r
+ , filter_str_(filter_str)\r
+ {\r
+ video_streams_.push(std::queue<safe_ptr<write_frame>>());\r
+ audio_streams_.push(core::audio_buffer());\r
+ }\r
+\r
+ void push(const std::shared_ptr<AVFrame>& video_frame, int hints)\r
+ { \r
+ if(!video_frame)\r
+ return;\r
+ \r
+ if(video_frame == flush_video())\r
+ { \r
+ CASPAR_LOG(trace) << L"video-frame-count: " << static_cast<float>(video_frame_count_);\r
+ video_frame_count_ = 0;\r
+ video_streams_.push(std::queue<safe_ptr<write_frame>>());\r
+ }\r
+ else if(video_frame == empty_video())\r
+ {\r
+ video_streams_.back().push(make_safe<core::write_frame>(this));\r
+ ++video_frame_count_;\r
+ display_mode_ = display_mode::simple;\r
+ }\r
+ else\r
+ {\r
+ if(display_mode_ == display_mode::invalid)\r
+ initialize_display_mode(*video_frame);\r
+ \r
+ if(hints & core::frame_producer::ALPHA_HINT)\r
+ video_frame->format = make_alpha_format(video_frame->format);\r
+ \r
+ auto format = video_frame->format;\r
+ if(video_frame->format == CASPAR_PIX_FMT_LUMA) // CASPAR_PIX_FMT_LUMA is not valid for filter, change it to GRAY8\r
+ video_frame->format = PIX_FMT_GRAY8;\r
+\r
+ filter_.push(video_frame);\r
+ BOOST_FOREACH(auto& av_frame, filter_.poll_all())\r
+ {\r
+ av_frame->format = format;\r
+ video_streams_.back().push(make_write_frame(this, av_frame, frame_factory_, hints));\r
+ ++video_frame_count_;\r
+ }\r
+ }\r
+\r
+ if(video_streams_.back().size() > 32)\r
+ BOOST_THROW_EXCEPTION(invalid_operation() << source_info("frame_muxer") << msg_info("video-stream overflow. This can be caused by incorrect frame-rate. Check clip meta-data."));\r
+ }\r
+\r
+ void push(const std::shared_ptr<core::audio_buffer>& audio)\r
+ {\r
+ if(!audio) \r
+ return;\r
+\r
+ if(audio == flush_audio())\r
+ {\r
+ CASPAR_LOG(trace) << L"audio-frame-count: " << audio_sample_count_/format_desc_.audio_samples_per_frame;\r
+ audio_sample_count_ = 0;\r
+ audio_streams_.push(core::audio_buffer());\r
+ }\r
+ else if(audio == empty_audio())\r
+ {\r
+ boost::range::push_back(audio_streams_.back(), core::audio_buffer(format_desc_.audio_samples_per_frame, 0));\r
+ audio_sample_count_ += audio->size();\r
+ }\r
+ else\r
+ {\r
+ boost::range::push_back(audio_streams_.back(), *audio);\r
+ audio_sample_count_ += audio->size();\r
+ }\r
+\r
+ if(audio_streams_.back().size() > 32*format_desc_.audio_samples_per_frame)\r
+ BOOST_THROW_EXCEPTION(invalid_operation() << source_info("frame_muxer") << msg_info("audio-stream overflow. This can be caused by incorrect frame-rate. Check clip meta-data."));\r
+ }\r
+ \r
+ bool video_ready() const\r
+ { \r
+ return video_streams_.size() > 1 || (video_streams_.size() >= audio_streams_.size() && video_ready2());\r
+ }\r
+ \r
+ bool audio_ready() const\r
+ {\r
+ return audio_streams_.size() > 1 || (audio_streams_.size() >= video_streams_.size() && audio_ready2());\r
+ }\r
+\r
+ bool video_ready2() const\r
+ { \r
+ switch(display_mode_)\r
+ {\r
+ case display_mode::deinterlace_bob_reinterlace: \r
+ case display_mode::interlace: \r
+ case display_mode::half:\r
+ return video_streams_.front().size() >= 2;\r
+ default: \r
+ return video_streams_.front().size() >= 1;\r
+ }\r
+ }\r
+ \r
+ bool audio_ready2() const\r
+ {\r
+ switch(display_mode_)\r
+ {\r
+ case display_mode::duplicate: \r
+ return audio_streams_.front().size()/2 >= format_desc_.audio_samples_per_frame;\r
+ default: \r
+ return audio_streams_.front().size() >= format_desc_.audio_samples_per_frame;\r
+ }\r
+ }\r
+ \r
+ std::shared_ptr<basic_frame> poll()\r
+ {\r
+ if(!frame_buffer_.empty())\r
+ {\r
+ auto frame = frame_buffer_.front();\r
+ frame_buffer_.pop(); \r
+ return frame;\r
+ }\r
+\r
+ if(video_streams_.size() > 1 && audio_streams_.size() > 1 && (!video_ready2() || !audio_ready2()))\r
+ {\r
+ if(!video_streams_.front().empty() || !audio_streams_.front().empty())\r
+ CASPAR_LOG(debug) << "Truncating: " << video_streams_.front().size() << L" video-frames, " << audio_streams_.front().size() << L" audio-samples.";\r
+\r
+ video_streams_.pop();\r
+ audio_streams_.pop();\r
+ }\r
+\r
+ if(!video_ready2() || !audio_ready2())\r
+ return nullptr;\r
+ \r
+ auto frame1 = pop_video();\r
+ frame1->audio_data() = pop_audio();\r
+\r
+ switch(display_mode_)\r
+ {\r
+ case display_mode::simple: \r
+ case display_mode::deinterlace_bob: \r
+ case display_mode::deinterlace: \r
+ {\r
+ frame_buffer_.push(frame1);\r
+ break;\r
+ }\r
+ case display_mode::interlace: \r
+ case display_mode::deinterlace_bob_reinterlace: \r
+ { \r
+ auto frame2 = pop_video();\r
+\r
+ frame_buffer_.push(core::basic_frame::interlace(frame1, frame2, format_desc_.field_mode)); \r
+ break;\r
+ }\r
+ case display_mode::duplicate: \r
+ {\r
+ auto frame2 = make_safe<core::write_frame>(*frame1);\r
+ frame2->audio_data() = pop_audio();\r
+\r
+ frame_buffer_.push(frame1);\r
+ frame_buffer_.push(frame2);\r
+ break;\r
+ }\r
+ case display_mode::half: \r
+ { \r
+ pop_video(); // Throw away\r
+\r
+ frame_buffer_.push(frame1);\r
+ break;\r
+ }\r
+ default: \r
+ BOOST_THROW_EXCEPTION(invalid_operation() << msg_info("invalid display-mode"));\r
+ }\r
+ \r
+ return frame_buffer_.empty() ? nullptr : poll();\r
+ }\r
+ \r
+ safe_ptr<core::write_frame> pop_video()\r
+ {\r
+ auto frame = video_streams_.front().front();\r
+ video_streams_.front().pop(); \r
+ return frame;\r
+ }\r
+\r
+ core::audio_buffer pop_audio()\r
+ {\r
+ CASPAR_VERIFY(audio_streams_.front().size() >= format_desc_.audio_samples_per_frame);\r
+\r
+ auto begin = audio_streams_.front().begin();\r
+ auto end = begin + format_desc_.audio_samples_per_frame;\r
+\r
+ auto samples = core::audio_buffer(begin, end);\r
+ audio_streams_.front().erase(begin, end);\r
+\r
+ return samples;\r
+ }\r
+ \r
+ void initialize_display_mode(const AVFrame& frame)\r
+ {\r
+ display_mode_ = display_mode::simple;\r
+ if(auto_transcode_)\r
+ {\r
+ auto mode = get_mode(frame);\r
+ auto fps = in_fps_;\r
+\r
+ if(is_deinterlacing(filter_str_))\r
+ mode = core::field_mode::progressive;\r
+\r
+ if(is_double_rate(filter_str_))\r
+ fps *= 2;\r
+ \r
+ display_mode_ = get_display_mode(mode, fps, format_desc_.field_mode, format_desc_.fps);\r
+ \r
+ if(display_mode_ == display_mode::simple && mode != core::field_mode::progressive && format_desc_.field_mode != core::field_mode::progressive && frame.height != static_cast<int>(format_desc_.height))\r
+ display_mode_ = display_mode::deinterlace_bob_reinterlace; // The frame will most likely be scaled, we need to deinterlace->reinterlace \r
+ \r
+ if(display_mode_ == display_mode::deinterlace)\r
+ filter_str_ = append_filter(filter_str_, L"YADIF=0:-1");\r
+ else if(display_mode_ == display_mode::deinterlace_bob || display_mode_ == display_mode::deinterlace_bob_reinterlace)\r
+ filter_str_ = append_filter(filter_str_, L"YADIF=1:-1");\r
+ }\r
+\r
+ if(display_mode_ == display_mode::invalid)\r
+ {\r
+ CASPAR_LOG(warning) << L"[frame_muxer] Auto-transcode: Failed to detect display-mode.";\r
+ display_mode_ = display_mode::simple;\r
+ }\r
+ \r
+ filter_ = filter(filter_str_);\r
+\r
+ CASPAR_LOG(info) << "[frame_muxer] " << display_mode::print(display_mode_) \r
+ << L" " << frame.width << L"x" << frame.height \r
+ << (frame.interlaced_frame ? L"i" : L"p") \r
+ << (frame.interlaced_frame ? in_fps_*2 : in_fps_);\r
+ }\r
+\r
+ int64_t calc_nb_frames(int64_t nb_frames) const\r
+ {\r
+ switch(display_mode_) // Take into account transformation in run.\r
+ {\r
+ case display_mode::deinterlace_bob_reinterlace:\r
+ case display_mode::interlace: \r
+ case display_mode::half:\r
+ nb_frames /= 2;\r
+ break;\r
+ case display_mode::duplicate:\r
+ nb_frames *= 2;\r
+ break;\r
+ }\r
+\r
+ if(is_double_rate(widen(filter_.filter_str()))) // Take into account transformations in filter.\r
+ nb_frames *= 2;\r
+\r
+ return nb_frames;\r
+ }\r
+};\r
+\r
+frame_muxer::frame_muxer(double in_fps, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter)\r
+ : impl_(new implementation(in_fps, frame_factory, filter)){}\r
+void frame_muxer::push(const std::shared_ptr<AVFrame>& video_frame, int hints){impl_->push(video_frame, hints);}\r
+void frame_muxer::push(const std::shared_ptr<core::audio_buffer>& audio_samples){return impl_->push(audio_samples);}\r
+std::shared_ptr<basic_frame> frame_muxer::poll(){return impl_->poll();}\r
+int64_t frame_muxer::calc_nb_frames(int64_t nb_frames) const {return impl_->calc_nb_frames(nb_frames);}\r
+bool frame_muxer::video_ready() const{return impl_->video_ready();}\r
+bool frame_muxer::audio_ready() const{return impl_->audio_ready();}\r
+\r
+}}
\ No newline at end of file
class frame_muxer : boost::noncopyable\r
{\r
public:\r
- frame_muxer(double in_fps, const safe_ptr<core::frame_factory>& frame_factory);\r
+ frame_muxer(double in_fps, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter = L"");\r
\r
void push(const std::shared_ptr<AVFrame>& video_frame, int hints = 0);\r
void push(const std::shared_ptr<core::audio_buffer>& audio_samples);\r
\r
- void commit();\r
-\r
bool video_ready() const;\r
bool audio_ready() const;\r
\r
- size_t size() const;\r
- bool empty() const;\r
+ std::shared_ptr<core::basic_frame> poll();\r
\r
int64_t calc_nb_frames(int64_t nb_frames) const;\r
\r
- safe_ptr<core::basic_frame> pop();\r
private:\r
struct implementation;\r
safe_ptr<implementation> impl_;\r
+++ /dev/null
-#pragma once\r
-\r
-#include <common/memory/safe_ptr.h>\r
-\r
-#include <core/video_format.h>\r
-#include <core/producer/frame/pixel_format.h>\r
-\r
-extern "C"\r
-{\r
- #include <libavutil/pixfmt.h>\r
-}\r
-\r
-struct AVFrame;\r
-struct AVFormatContext;\r
-\r
-namespace caspar {\r
-\r
-namespace core {\r
-\r
-struct pixel_format_desc;\r
-class write_frame;\r
-struct frame_factory;\r
-\r
-}\r
-\r
-namespace ffmpeg {\r
-\r
-static const PixelFormat CASPAR_PIX_FMT_LUMA = PIX_FMT_MONOBLACK; // Just hijack some unual pixel format.\r
-\r
-core::field_mode::type get_mode(AVFrame& frame);\r
-core::pixel_format::type get_pixel_format(PixelFormat pix_fmt);\r
-core::pixel_format_desc get_pixel_format_desc(PixelFormat pix_fmt, size_t width, size_t height);\r
-int make_alpha_format(int format); // NOTE: Be careful about CASPAR_PIX_FMT_LUMA, change it to PIX_FMT_GRAY8 if you want to use the frame inside some ffmpeg function.\r
-safe_ptr<core::write_frame> make_write_frame(const void* tag, const safe_ptr<AVFrame>& decoded_frame, const safe_ptr<core::frame_factory>& frame_factory, int hints);\r
-\r
-void fix_meta_data(AVFormatContext& context);\r
-\r
-}}
\ No newline at end of file
\r
#include "util.h"\r
\r
-#include "format/flv.h"\r
+#include "flv.h"\r
+\r
+#include "../../ffmpeg_error.h"\r
\r
#include <tbb/concurrent_unordered_map.h>\r
#include <tbb/concurrent_queue.h>\r
#include <core/mixer/write_frame.h>\r
\r
#include <common/exception/exceptions.h>\r
+#include <common/utility/assert.h>\r
+#include <common/memory/memcpy.h>\r
\r
#include <tbb/parallel_for.h>\r
\r
#endif\r
\r
namespace caspar { namespace ffmpeg {\r
+ \r
+safe_ptr<AVPacket> flush_packet()\r
+{\r
+ static auto packet = create_packet();\r
+ return packet;\r
+}\r
+\r
+std::shared_ptr<core::audio_buffer> flush_audio()\r
+{\r
+ static std::shared_ptr<core::audio_buffer> audio(new core::audio_buffer());\r
+ return audio;\r
+}\r
+\r
+std::shared_ptr<core::audio_buffer> empty_audio()\r
+{\r
+ static std::shared_ptr<core::audio_buffer> audio(new core::audio_buffer());\r
+ return audio;\r
+}\r
+\r
+std::shared_ptr<AVFrame> flush_video()\r
+{\r
+ static std::shared_ptr<AVFrame> video(avcodec_alloc_frame(), av_free);\r
+ return video;\r
+}\r
+\r
+std::shared_ptr<AVFrame> empty_video()\r
+{\r
+ static std::shared_ptr<AVFrame> video(avcodec_alloc_frame(), av_free);\r
+ return video;\r
+}\r
\r
-core::field_mode::type get_mode(AVFrame& frame)\r
+core::field_mode::type get_mode(const AVFrame& frame)\r
{\r
if(!frame.interlaced_frame)\r
return core::field_mode::progressive;\r
{\r
switch(get_pixel_format(static_cast<PixelFormat>(format)))\r
{\r
- case core::pixel_format::luma:\r
- case core::pixel_format::gray:\r
- case core::pixel_format::invalid:\r
- return format;\r
case core::pixel_format::ycbcr:\r
case core::pixel_format::ycbcra:\r
return CASPAR_PIX_FMT_LUMA;\r
default:\r
- return PIX_FMT_GRAY8;\r
+ return format;\r
}\r
}\r
\r
{ \r
static tbb::concurrent_unordered_map<size_t, tbb::concurrent_queue<std::shared_ptr<SwsContext>>> sws_contexts_;\r
\r
+ if(decoded_frame->width < 1 || decoded_frame->height < 1)\r
+ return make_safe<core::write_frame>(tag);\r
+\r
const auto width = decoded_frame->width;\r
const auto height = decoded_frame->height;\r
auto desc = get_pixel_format_desc(static_cast<PixelFormat>(decoded_frame->format), width, height);\r
if(hints & core::frame_producer::ALPHA_HINT)\r
desc = get_pixel_format_desc(static_cast<PixelFormat>(make_alpha_format(decoded_frame->format)), width, height);\r
\r
+ std::shared_ptr<core::write_frame> write;\r
+\r
if(desc.pix_fmt == core::pixel_format::invalid)\r
{\r
auto pix_fmt = static_cast<PixelFormat>(decoded_frame->format);\r
\r
- auto write = frame_factory->create_frame(tag, get_pixel_format_desc(PIX_FMT_BGRA, width, height));\r
+ write = frame_factory->create_frame(tag, get_pixel_format_desc(PIX_FMT_BGRA, width, height));\r
write->set_type(get_mode(*decoded_frame));\r
\r
std::shared_ptr<SwsContext> sws_context;\r
pool.push(sws_context);\r
\r
write->commit();\r
-\r
- return write;\r
}\r
else\r
{\r
- auto write = frame_factory->create_frame(tag, desc);\r
+ write = frame_factory->create_frame(tag, desc);\r
write->set_type(get_mode(*decoded_frame));\r
\r
for(int n = 0; n < static_cast<int>(desc.planes.size()); ++n)\r
auto result = write->image_data(n).begin();\r
auto decoded = decoded_frame->data[n];\r
auto decoded_linesize = decoded_frame->linesize[n];\r
- \r
- // Copy line by line since ffmpeg sometimes pads each line.\r
- tbb::affinity_partitioner ap;\r
- tbb::parallel_for(tbb::blocked_range<size_t>(0, static_cast<int>(desc.planes[n].height)), [&](const tbb::blocked_range<size_t>& r)\r
+ \r
+ CASPAR_ASSERT(decoded);\r
+ CASPAR_ASSERT(write->image_data(n).begin());\r
+\r
+ if(decoded_linesize != static_cast<int>(plane.width))\r
{\r
- for(size_t y = r.begin(); y != r.end(); ++y)\r
- memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
- }, ap);\r
+ // Copy line by line since ffmpeg sometimes pads each line.\r
+ tbb::parallel_for<size_t>(0, desc.planes[n].height, [&](size_t y)\r
+ {\r
+ fast_memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
+ });\r
+ }\r
+ else\r
+ {\r
+ fast_memcpy(result, decoded, plane.size);\r
+ }\r
\r
write->commit(n);\r
}\r
- \r
- return write;\r
}\r
+ \r
+ // Fix field-order if needed\r
+ if(write->get_type() == core::field_mode::lower && frame_factory->get_video_format_desc().field_mode == core::field_mode::upper)\r
+ write->get_frame_transform().fill_translation[1] += 1.0/static_cast<double>(frame_factory->get_video_format_desc().height);\r
+ else if(write->get_type() == core::field_mode::upper && frame_factory->get_video_format_desc().field_mode == core::field_mode::lower)\r
+ write->get_frame_transform().fill_translation[1] -= 1.0/static_cast<double>(frame_factory->get_video_format_desc().height);\r
+\r
+ return make_safe_ptr(write);\r
}\r
\r
bool is_sane_fps(AVRational time_base)\r
\r
if(audio_index > -1) // Check for invalid double frame-rate\r
{\r
- auto& audio_context = *context.streams[audio_index]->codec;\r
- auto& audio_stream = *context.streams[audio_index];\r
+ auto& audio_context = *context.streams[audio_index]->codec;\r
+ auto& audio_stream = *context.streams[audio_index];\r
\r
- double duration_sec = audio_stream.duration / static_cast<double>(audio_context.sample_rate);\r
- double fps = static_cast<double>(video_context.time_base.den) / static_cast<double>(video_context.time_base.num);\r
+ double duration_sec = audio_stream.duration / static_cast<double>(audio_context.sample_rate);\r
+ double fps = static_cast<double>(video_context.time_base.den) / static_cast<double>(video_context.time_base.num);\r
\r
double fps_nb_frames = static_cast<double>(duration_sec*fps);\r
- double stream_nb_frames = static_cast<double>(video_stream.nb_frames);\r
- double diff = std::abs(fps_nb_frames - stream_nb_frames*2.0);\r
+ double stream_nb_frames = static_cast<double>(video_stream.nb_frames);\r
+ double diff = std::abs(fps_nb_frames - stream_nb_frames*2.0);\r
if(diff < fps_nb_frames*0.05)\r
video_context.time_base.num *= 2;\r
}\r
+ else\r
+ {\r
+ video_context.time_base.den = video_stream.r_frame_rate.num;\r
+ video_context.time_base.num = video_stream.r_frame_rate.den;\r
+ }\r
}\r
\r
double fps = static_cast<double>(video_context.time_base.den) / static_cast<double>(video_context.time_base.num);\r
video_context.time_base.den = static_cast<int>(closest_fps*1000000.0);\r
}\r
\r
+safe_ptr<AVPacket> create_packet()\r
+{\r
+ safe_ptr<AVPacket> packet(new AVPacket, [](AVPacket* p)\r
+ {\r
+ av_free_packet(p);\r
+ delete p;\r
+ });\r
+ \r
+ av_init_packet(packet.get());\r
+ return packet;\r
+}\r
+\r
+safe_ptr<AVCodecContext> open_codec(AVFormatContext& context, enum AVMediaType type, int& index)\r
+{ \r
+ AVCodec* decoder;\r
+ index = THROW_ON_ERROR2(av_find_best_stream(&context, type, -1, -1, &decoder, 0), "");\r
+ THROW_ON_ERROR2(avcodec_open(context.streams[index]->codec, decoder), "");\r
+ return safe_ptr<AVCodecContext>(context.streams[index]->codec, avcodec_close);\r
+}\r
+\r
+safe_ptr<AVFormatContext> open_input(const std::wstring& filename)\r
+{\r
+ AVFormatContext* weak_context = nullptr;\r
+ THROW_ON_ERROR2(avformat_open_input(&weak_context, narrow(filename).c_str(), nullptr, nullptr), filename);\r
+ safe_ptr<AVFormatContext> context(weak_context, av_close_input_file); \r
+ THROW_ON_ERROR2(avformat_find_stream_info(weak_context, nullptr), filename);\r
+ fix_meta_data(*context);\r
+ return context;\r
+}\r
+//\r
+//void av_dup_frame(AVFrame* frame)\r
+//{\r
+// AVFrame* new_frame = avcodec_alloc_frame();\r
+//\r
+//\r
+// const uint8_t *src_data[4] = {0};\r
+// memcpy(const_cast<uint8_t**>(&src_data[0]), frame->data, 4);\r
+// const int src_linesizes[4] = {0};\r
+// memcpy(const_cast<int*>(&src_linesizes[0]), frame->linesize, 4);\r
+//\r
+// av_image_alloc(new_frame->data, new_frame->linesize, new_frame->width, new_frame->height, frame->format, 16);\r
+//\r
+// av_image_copy(new_frame->data, new_frame->linesize, src_data, src_linesizes, frame->format, new_frame->width, new_frame->height);\r
+//\r
+// frame =\r
+//}\r
+\r
}}
\ No newline at end of file
--- /dev/null
+#pragma once\r
+\r
+#include <common/memory/safe_ptr.h>\r
+\r
+#include <core/video_format.h>\r
+#include <core/producer/frame/pixel_format.h>\r
+#include <core/mixer/audio/audio_mixer.h>\r
+\r
+\r
+#if defined(_MSC_VER)\r
+#pragma warning (push)\r
+#pragma warning (disable : 4244)\r
+#endif\r
+extern "C" \r
+{\r
+ #include <libavutil/pixfmt.h>\r
+ #include <libavcodec/avcodec.h>\r
+}\r
+#if defined(_MSC_VER)\r
+#pragma warning (pop)\r
+#endif\r
+\r
+struct AVFrame;\r
+struct AVFormatContext;\r
+struct AVPacket;\r
+\r
+namespace caspar {\r
+\r
+namespace core {\r
+\r
+struct pixel_format_desc;\r
+class write_frame;\r
+struct frame_factory;\r
+\r
+}\r
+\r
+namespace ffmpeg {\r
+ \r
+ \r
+safe_ptr<AVPacket> flush_packet();\r
+std::shared_ptr<core::audio_buffer> flush_audio();\r
+std::shared_ptr<core::audio_buffer> empty_audio();\r
+std::shared_ptr<AVFrame> flush_video();\r
+std::shared_ptr<AVFrame> empty_video();\r
+\r
+// Utils\r
+\r
+static const PixelFormat CASPAR_PIX_FMT_LUMA = PIX_FMT_MONOBLACK; // Just hijack some unual pixel format.\r
+\r
+core::field_mode::type get_mode(const AVFrame& frame);\r
+int make_alpha_format(int format); // NOTE: Be careful about CASPAR_PIX_FMT_LUMA, change it to PIX_FMT_GRAY8 if you want to use the frame inside some ffmpeg function.\r
+safe_ptr<core::write_frame> make_write_frame(const void* tag, const safe_ptr<AVFrame>& decoded_frame, const safe_ptr<core::frame_factory>& frame_factory, int hints);\r
+\r
+safe_ptr<AVPacket> create_packet();\r
+\r
+safe_ptr<AVCodecContext> open_codec(AVFormatContext& context, enum AVMediaType type, int& index);\r
+safe_ptr<AVFormatContext> open_input(const std::wstring& filename);\r
+\r
+//void av_dup_frame(AVFrame* frame);\r
+\r
+}}
\ No newline at end of file
\r
#include "video_decoder.h"\r
\r
-#include "../util.h"\r
-#include "../filter/filter.h"\r
+#include "../util/util.h"\r
\r
#include "../../ffmpeg_error.h"\r
-#include "../../tbb_avcodec.h"\r
\r
#include <core/producer/frame/frame_transform.h>\r
#include <core/producer/frame/frame_factory.h>\r
\r
struct video_decoder::implementation : boost::noncopyable\r
{\r
- const safe_ptr<core::frame_factory> frame_factory_;\r
- std::shared_ptr<AVCodecContext> codec_context_;\r
int index_;\r
+ const safe_ptr<AVCodecContext> codec_context_;\r
\r
- std::queue<std::shared_ptr<AVPacket>> packets_;\r
-\r
- filter filter_;\r
-\r
- double fps_;\r
- int64_t nb_frames_;\r
+ std::queue<safe_ptr<AVPacket>> packets_;\r
+ \r
+ const double fps_;\r
+ const int64_t nb_frames_;\r
\r
- size_t width_;\r
- size_t height_;\r
+ const size_t width_;\r
+ const size_t height_;\r
+ bool is_progressive_;\r
\r
public:\r
- explicit implementation(const safe_ptr<AVFormatContext>& context, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter) \r
- : frame_factory_(frame_factory)\r
- , filter_(filter)\r
- , fps_(frame_factory_->get_video_format_desc().fps)\r
- , nb_frames_(0)\r
- , width_(0)\r
- , height_(0)\r
+ explicit implementation(const safe_ptr<AVFormatContext>& context) \r
+ : codec_context_(open_codec(*context, AVMEDIA_TYPE_VIDEO, index_))\r
+ , fps_(static_cast<double>(codec_context_->time_base.den) / static_cast<double>(codec_context_->time_base.num))\r
+ , nb_frames_(context->streams[index_]->nb_frames)\r
+ , width_(codec_context_->width)\r
+ , height_(codec_context_->height)\r
{\r
- try\r
- {\r
- AVCodec* dec;\r
- index_ = THROW_ON_ERROR2(av_find_best_stream(context.get(), AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0), "[video_decoder]");\r
- \r
- THROW_ON_ERROR2(tbb_avcodec_open(context->streams[index_]->codec, dec), "[video_decoder]");\r
- \r
- codec_context_.reset(context->streams[index_]->codec, tbb_avcodec_close);\r
- \r
- CASPAR_LOG(debug) << "[video_decoder] " << context->streams[index_]->codec->codec->long_name;\r
-\r
- // Some files give an invalid time_base numerator, try to fix it.\r
-\r
- fix_meta_data(*context);\r
- \r
- fps_ = static_cast<double>(codec_context_->time_base.den) / static_cast<double>(codec_context_->time_base.num);\r
- nb_frames_ = context->streams[index_]->nb_frames;\r
-\r
- if(double_rate(filter))\r
- fps_ *= 2;\r
-\r
- width_ = codec_context_->width;\r
- height_ = codec_context_->height;\r
- }\r
- catch(...)\r
- {\r
- index_ = THROW_ON_ERROR2(av_find_best_stream(context.get(), AVMEDIA_TYPE_AUDIO, -1, -1, nullptr, 0), "[video_decoder]");\r
-\r
- CASPAR_LOG_CURRENT_EXCEPTION();\r
- CASPAR_LOG(warning) << "[video_decoder] Failed to open video-stream. Running without video."; \r
- }\r
+ CASPAR_LOG(debug) << "[video_decoder] " << context->streams[index_]->codec->codec->long_name;\r
}\r
\r
void push(const std::shared_ptr<AVPacket>& packet)\r
{\r
- if(packet && packet->stream_index != index_)\r
+ if(!packet)\r
return;\r
\r
- packets_.push(packet);\r
+ if(packet->stream_index == index_ || packet == flush_packet())\r
+ packets_.push(make_safe_ptr(packet));\r
}\r
\r
- std::vector<std::shared_ptr<AVFrame>> poll()\r
+ std::shared_ptr<AVFrame> poll()\r
{ \r
- std::vector<std::shared_ptr<AVFrame>> result;\r
-\r
if(packets_.empty())\r
- return result;\r
-\r
- if(!codec_context_)\r
- return empty_poll();\r
-\r
+ return nullptr;\r
+ \r
auto packet = packets_.front();\r
\r
- if(packet)\r
+ if(packet == flush_packet())\r
{ \r
- BOOST_FOREACH(auto& frame, decode(*packet))\r
- boost::range::push_back(result, filter_.execute(frame));\r
-\r
- if(packet->size == 0)\r
- packets_.pop();\r
- }\r
- else\r
- {\r
if(codec_context_->codec->capabilities & CODEC_CAP_DELAY)\r
{\r
AVPacket pkt;\r
pkt.data = nullptr;\r
pkt.size = 0;\r
\r
- BOOST_FOREACH(auto& frame, decode(pkt))\r
- boost::range::push_back(result, filter_.execute(frame)); \r
- }\r
-\r
- if(result.empty())\r
- { \r
- packets_.pop();\r
- avcodec_flush_buffers(codec_context_.get());\r
- result.push_back(nullptr);\r
+ auto video = decode(pkt);\r
+ if(video)\r
+ return video;\r
}\r
- }\r
\r
- return result;\r
- }\r
-\r
- std::vector<std::shared_ptr<AVFrame>> empty_poll()\r
- { \r
- auto packet = packets_.front();\r
+ packets_.pop();\r
+ avcodec_flush_buffers(codec_context_.get());\r
+ return flush_video(); \r
+ }\r
+ \r
packets_.pop();\r
-\r
- if(!packet) \r
- return boost::assign::list_of(nullptr);\r
-\r
- std::shared_ptr<AVFrame> frame(avcodec_alloc_frame(), av_free);\r
- frame->data[0] = nullptr;\r
-\r
- return boost::assign::list_of(frame); \r
+ return decode(*packet);\r
}\r
\r
- std::vector<std::shared_ptr<AVFrame>> decode(AVPacket& pkt)\r
+ std::shared_ptr<AVFrame> decode(AVPacket& pkt)\r
{\r
std::shared_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
\r
// If a decoder consumes less then the whole packet then something is wrong\r
// that might be just harmless padding at the end, or a problem with the\r
// AVParser or demuxer which puted more then one frame in a AVPacket.\r
- pkt.data = nullptr;\r
- pkt.size = 0;\r
\r
if(frame_finished == 0) \r
- return std::vector<std::shared_ptr<AVFrame>>();\r
+ return nullptr;\r
+\r
+ is_progressive_ = !decoded_frame->interlaced_frame;\r
\r
- if(decoded_frame->repeat_pict % 2 > 0)\r
- CASPAR_LOG(warning) << "[video_decoder]: Field repeat_pict not implemented.";\r
+ if(decoded_frame->repeat_pict > 0)\r
+ CASPAR_LOG(warning) << "[video_decoder] Field repeat_pict not implemented.";\r
\r
- return std::vector<std::shared_ptr<AVFrame>>(1 + decoded_frame->repeat_pict/2, decoded_frame);\r
+ return decoded_frame;\r
}\r
\r
bool ready() const\r
{\r
- return !packets_.empty();\r
+ return packets_.size() > 10;\r
}\r
\r
double fps() const\r
}\r
};\r
\r
-video_decoder::video_decoder(const safe_ptr<AVFormatContext>& context, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter) : impl_(new implementation(context, frame_factory, filter)){}\r
+video_decoder::video_decoder(const safe_ptr<AVFormatContext>& context) : impl_(new implementation(context)){}\r
void video_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}\r
-std::vector<std::shared_ptr<AVFrame>> video_decoder::poll(){return impl_->poll();}\r
+std::shared_ptr<AVFrame> video_decoder::poll(){return impl_->poll();}\r
bool video_decoder::ready() const{return impl_->ready();}\r
double video_decoder::fps() const{return impl_->fps();}\r
int64_t video_decoder::nb_frames() const{return impl_->nb_frames_;}\r
size_t video_decoder::width() const{return impl_->width_;}\r
size_t video_decoder::height() const{return impl_->height_;}\r
+bool video_decoder::is_progressive() const{return impl_->is_progressive_;}\r
\r
}}
\ No newline at end of file
\r
#include <common/memory/safe_ptr.h>\r
\r
-#include <core/video_format.h>\r
-\r
#include <boost/noncopyable.hpp>\r
\r
-#include <vector>\r
-\r
struct AVFormatContext;\r
struct AVFrame;\r
struct AVPacket;\r
class video_decoder : boost::noncopyable\r
{\r
public:\r
- explicit video_decoder(const safe_ptr<AVFormatContext>& context, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter);\r
+ explicit video_decoder(const safe_ptr<AVFormatContext>& context);\r
\r
- void push(const std::shared_ptr<AVPacket>& packet);\r
bool ready() const;\r
- std::vector<std::shared_ptr<AVFrame>> poll();\r
+ void push(const std::shared_ptr<AVPacket>& packet);\r
+ std::shared_ptr<AVFrame> poll();\r
\r
- size_t width() const;\r
- size_t height() const;\r
-\r
+ size_t width() const;\r
+ size_t height() const;\r
int64_t nb_frames() const;\r
-\r
- double fps() const;\r
+ double fps() const;\r
+ bool is_progressive() const;\r
private:\r
struct implementation;\r
safe_ptr<implementation> impl_;\r
+++ /dev/null
-// Author Robert Nagy\r
-\r
-#include "stdafx.h"\r
-\r
-#include "tbb_avcodec.h"\r
-\r
-#include <common/log/log.h>\r
-#include <common/env.h>\r
-#include <common/utility/assert.h>\r
-\r
-#include <tbb/task.h>\r
-#include <tbb/atomic.h>\r
-#include <tbb/parallel_for.h>\r
-#include <tbb/tbb_thread.h>\r
-\r
-#if defined(_MSC_VER)\r
-#pragma warning (push)\r
-#pragma warning (disable : 4244)\r
-#endif\r
-extern "C" \r
-{\r
- #define __STDC_CONSTANT_MACROS\r
- #define __STDC_LIMIT_MACROS\r
- #include <libavformat/avformat.h>\r
-}\r
-#if defined(_MSC_VER)\r
-#pragma warning (pop)\r
-#endif\r
-\r
-namespace caspar { namespace ffmpeg {\r
- \r
-int thread_execute(AVCodecContext* s, int (*func)(AVCodecContext *c2, void *arg2), void* arg, int* ret, int count, int size)\r
-{\r
- tbb::parallel_for(tbb::blocked_range<size_t>(0, count), [&](const tbb::blocked_range<size_t>& r)\r
- {\r
- for(size_t n = r.begin(); n != r.end(); ++n) \r
- {\r
- int r = func(s, reinterpret_cast<uint8_t*>(arg) + n*size);\r
- if(ret)\r
- ret[n] = r;\r
- }\r
- });\r
-\r
- return 0;\r
-}\r
-\r
-int thread_execute2(AVCodecContext* s, int (*func)(AVCodecContext* c2, void* arg2, int, int), void* arg, int* ret, int count)\r
-{ \r
- tbb::atomic<int> counter; \r
- counter = 0; \r
-\r
- CASPAR_ASSERT(tbb::tbb_thread::hardware_concurrency() < 16);\r
- // Note: this will probably only work when tbb::task_scheduler_init::num_threads() < 16.\r
- tbb::parallel_for(tbb::blocked_range<int>(0, count, 2), [&](const tbb::blocked_range<int> &r) \r
- { \r
- int threadnr = counter++; \r
- for(int jobnr = r.begin(); jobnr != r.end(); ++jobnr)\r
- { \r
- int r = func(s, arg, jobnr, threadnr); \r
- if (ret) \r
- ret[jobnr] = r; \r
- }\r
- --counter;\r
- }); \r
-\r
- return 0; \r
-}\r
-\r
-void thread_init(AVCodecContext* s)\r
-{\r
- static const size_t MAX_THREADS = 16; // See mpegvideo.h\r
- static int dummy_opaque;\r
-\r
- s->active_thread_type = FF_THREAD_SLICE;\r
- s->thread_opaque = &dummy_opaque; \r
- s->execute = thread_execute;\r
- s->execute2 = thread_execute2;\r
- s->thread_count = MAX_THREADS; // We are using a task-scheduler, so use as many "threads/tasks" as possible. \r
-\r
- CASPAR_LOG(info) << "Initialized ffmpeg tbb context.";\r
-}\r
-\r
-void thread_free(AVCodecContext* s)\r
-{\r
- if(!s->thread_opaque)\r
- return;\r
-\r
- s->thread_opaque = nullptr;\r
- \r
- CASPAR_LOG(info) << "Released ffmpeg tbb context.";\r
-}\r
-\r
-int tbb_avcodec_open(AVCodecContext* avctx, AVCodec* codec)\r
-{\r
- avctx->thread_count = 1;\r
- // Some codecs don't like to have multiple multithreaded decoding instances. Only enable for those we know work.\r
- if((codec->id == CODEC_ID_MPEG2VIDEO) && \r
- (codec->capabilities & CODEC_CAP_SLICE_THREADS) && \r
- (avctx->thread_type & FF_THREAD_SLICE))\r
- {\r
- thread_init(avctx);\r
- } \r
- // ff_thread_init will not be executed since thread_opaque != nullptr || thread_count == 1.\r
- return avcodec_open(avctx, codec); \r
-}\r
-\r
-int tbb_avcodec_close(AVCodecContext* avctx)\r
-{\r
- thread_free(avctx);\r
- // ff_thread_free will not be executed since thread_opaque == nullptr.\r
- return avcodec_close(avctx); \r
-}\r
-\r
-}}
\ No newline at end of file
+++ /dev/null
-#pragma once\r
-\r
-struct AVCodecContext;\r
-struct AVCodec;\r
-\r
-namespace caspar { namespace ffmpeg {\r
- \r
-int tbb_avcodec_open(AVCodecContext *avctx, AVCodec *codec);\r
-int tbb_avcodec_close(AVCodecContext *avctx);\r
-\r
-}}
\ No newline at end of file
{\r
auto template_host = get_template_host(frame_factory->get_video_format_desc());\r
\r
- return make_safe<flash_producer>(frame_factory, env::template_folder() + L"\\" + widen(template_host.filename), template_host.width, template_host.height);\r
+ return create_destroy_proxy(make_safe<flash_producer>(frame_factory, env::template_folder() + L"\\" + widen(template_host.filename), template_host.width, template_host.height));\r
}\r
\r
std::wstring find_template(const std::wstring& template_name)\r
}\r
}\r
\r
- return last_frame_ = core::basic_frame(frames_);\r
+ return last_frame_ = make_safe<core::basic_frame>(frames_);\r
}\r
\r
virtual safe_ptr<core::basic_frame> last_frame() const\r
input_.try_push(std::make_shared<std::vector<int16_t, tbb::cache_aligned_allocator<int16_t>>>());\r
input_.try_push(std::make_shared<std::vector<int16_t, tbb::cache_aligned_allocator<int16_t>>>());\r
Stop();\r
+ input_.try_push(std::make_shared<std::vector<int16_t, tbb::cache_aligned_allocator<int16_t>>>());\r
+ input_.try_push(std::make_shared<std::vector<int16_t, tbb::cache_aligned_allocator<int16_t>>>());\r
CASPAR_LOG(info) << print() << L" Shutting down."; \r
}\r
\r
auto av_frame = get_av_frame();\r
av_frame->data[0] = const_cast<uint8_t*>(frame->image_data().begin());\r
\r
- auto frames = filter_.execute(av_frame);\r
+ filter_.push(av_frame);\r
+ auto frames = filter_.poll_all();\r
\r
if(frames.empty())\r
return;\r
\r
bool auto_play = std::find(_parameters.begin(), _parameters.end(), L"AUTO") != _parameters.end();\r
\r
- auto pFP2 = create_transition_producer(GetChannel()->get_video_format_desc().field_mode, create_destroy_producer_proxy(GetChannel()->context().destruction(), pFP), transitionInfo);\r
+ auto pFP2 = create_transition_producer(GetChannel()->get_video_format_desc().field_mode, pFP, transitionInfo);\r
GetChannel()->stage()->load(GetLayerIndex(), pFP2, false, auto_play ? transitionInfo.duration : -1); // TODO: LOOP\r
\r
CASPAR_LOG(info) << "Loaded " << _parameters[0] << TEXT(" successfully to background");\r
</producers>\r
<channels>\r
<channel>\r
- <video-mode>720p5000</video-mode>\r
+ <video-mode>PAL</video-mode>\r
<consumers>\r
<screen></screen>\r
</consumers>\r
CASPAR_LOG(info) << device;\r
});\r
\r
+ CASPAR_LOG(info) << L"Bluefish " << caspar::bluefish::get_version();\r
auto blue = caspar::bluefish::get_device_list();\r
std::for_each(blue.begin(), blue.end(), [](const std::wstring& device)\r
{\r
}\r
catch(...){}\r
\r
- return EXCEPTION_CONTINUE_EXECUTION;\r
+ return EXCEPTION_EXECUTE_HANDLER;\r
}\r
\r
int main(int argc, wchar_t* argv[])\r
wcmd += L"\r\n";\r
amcp.Parse(wcmd.c_str(), wcmd.length(), dummy);\r
}\r
+ \r
+ Sleep(100); // CAPSAR_LOG is asynchronous. Try to get text in correct order.\r
+ system("pause");\r
}\r
catch(boost::property_tree::file_parser_error&)\r
{\r
catch(caspar::gl::ogl_exception&)\r
{\r
CASPAR_LOG_CURRENT_EXCEPTION();\r
- CASPAR_LOG(fatal) << L"Unhandled OpenGL Error in main thread. Please try to update graphics drivers in order to receive full OpenGL 3.1+ Support.";\r
+ CASPAR_LOG(fatal) << L"Unhandled OpenGL Error in main thread. Please try to update graphics drivers for OpenGL 3.0+ Support.";\r
}\r
catch(...)\r
{\r
} \r
\r
CASPAR_LOG(info) << "Successfully shutdown CasparCG Server.";\r
- Sleep(100); // CAPSAR_LOG is asynchronous. Try to get text in correct order.\r
- system("pause");\r
return 0;\r
}
\ No newline at end of file
implementation() \r
{ \r
ffmpeg::init();\r
- bluefish::init();\r
- decklink::init();\r
- flash::init();\r
- oal::init();\r
- ogl::init();\r
- //init_silverlight();\r
- image::init();\r
+ CASPAR_LOG(info) << L"Initialized ffmpeg module.";\r
+ \r
+ bluefish::init(); \r
+ CASPAR_LOG(info) << L"Initialized bluefish module.";\r
+ \r
+ decklink::init(); \r
+ CASPAR_LOG(info) << L"Initialized decklink module.";\r
+ \r
+ flash::init(); \r
+ CASPAR_LOG(info) << L"Initialized flash module.";\r
+ \r
+ oal::init(); \r
+ CASPAR_LOG(info) << L"Initialized oal module.";\r
+ \r
+ ogl::init(); \r
+ CASPAR_LOG(info) << L"Initialized ogl module.";\r
+\r
+ image::init(); \r
+ CASPAR_LOG(info) << L"Initialized image module.";\r
\r
setup_channels(env::properties());\r
+ CASPAR_LOG(info) << L"Initialized channels.";\r
+\r
setup_controllers(env::properties());\r
+ CASPAR_LOG(info) << L"Initialized controllers.";\r
}\r
\r
~implementation()\r
if(format_desc.format == video_format::invalid)\r
BOOST_THROW_EXCEPTION(caspar_exception() << msg_info("Invalid video-mode."));\r
\r
- channels_.push_back(video_channel(channels_.size(), format_desc, ogl_));\r
+ channels_.push_back(make_safe<video_channel>(channels_.size(), format_desc, ogl_));\r
\r
int index = 0;\r
BOOST_FOREACH(auto& xml_consumer, xml_channel.second.get_child("consumers"))\r