D* get_deleter(safe_ptr<U> const& ptr) { return impl_.get_deleter(); } // noexcept\r
};\r
\r
+template<class T, class U>\r
+bool operator==(const std::shared_ptr<T>& a, const safe_ptr<U>& b) // noexcept\r
+{\r
+ return a.get() == b.get();\r
+}\r
+\r
+template<class T, class U>\r
+bool operator==(const safe_ptr<T>& a, const std::shared_ptr<U>& b) // noexcept\r
+{\r
+ return a.get() == b.get();\r
+}\r
+\r
template<class T, class U>\r
bool operator==(const safe_ptr<T>& a, const safe_ptr<U>& b) // noexcept\r
{\r
GL(glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));\r
GL(glTexImage2D(GL_TEXTURE_2D, 0, INTERNAL_FORMAT[stride_], width_, height_, 0, FORMAT[stride_], GL_UNSIGNED_BYTE, NULL));\r
GL(glBindTexture(GL_TEXTURE_2D, 0));\r
- //CASPAR_LOG(trace) << "[device_buffer] allocated size:" << width*height*stride; \r
+ CASPAR_LOG(trace) << "[device_buffer] allocated size:" << width*height*stride; \r
clear();\r
} \r
\r
if(!pbo_)\r
BOOST_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to allocate buffer."));\r
\r
- //CASPAR_LOG(trace) << "[host_buffer] allocated size:" << size_ << " usage: " << (usage == write_only ? "write_only" : "read_only");\r
+ CASPAR_LOG(trace) << "[host_buffer] allocated size:" << size_ << " usage: " << (usage == write_only ? "write_only" : "read_only");\r
} \r
\r
~implementation()\r
\r
namespace caspar { namespace core {\r
\r
-struct write_frame::implementation : boost::noncopyable\r
+struct write_frame::implementation\r
{ \r
- ogl_device& ogl_;\r
- std::vector<std::shared_ptr<host_buffer>> buffers_;\r
- std::vector<safe_ptr<device_buffer>> textures_;\r
- std::vector<int16_t> audio_data_;\r
- const core::pixel_format_desc desc_;\r
- int tag_;\r
- core::video_mode::type mode_;\r
+ ogl_device& ogl_;\r
+ std::vector<std::shared_ptr<host_buffer>> buffers_;\r
+ std::array<std::shared_ptr<device_buffer>, 4> textures_;\r
+ std::vector<int16_t> audio_data_;\r
+ const core::pixel_format_desc desc_;\r
+ int tag_;\r
+ core::video_mode::type mode_;\r
\r
implementation(ogl_device& ogl, int tag, const core::pixel_format_desc& desc) \r
: ogl_(ogl)\r
{\r
return ogl_.create_host_buffer(plane.size, host_buffer::write_only);\r
});\r
- std::transform(desc.planes.begin(), desc.planes.end(), std::back_inserter(textures_), [&](const core::pixel_format_desc::plane& plane)\r
- {\r
- return ogl_.create_device_buffer(plane.width, plane.height, plane.channels);\r
- });\r
}, high_priority);\r
}\r
- \r
+ \r
void accept(write_frame& self, core::frame_visitor& visitor)\r
{\r
visitor.begin(self);\r
\r
if(!buffer)\r
return;\r
-\r
- auto texture = textures_[plane_index];\r
-\r
+ \r
ogl_.begin_invoke([=]\r
{\r
- texture->read(*buffer);\r
+ auto plane = desc_.planes[plane_index];\r
+ textures_[plane_index] = ogl_.create_device_buffer(plane.width, plane.height, plane.channels); \r
+ textures_[plane_index]->read(*buffer);\r
}, high_priority);\r
}\r
};\r
\r
write_frame::write_frame(ogl_device& ogl, int32_t tag, const core::pixel_format_desc& desc) \r
: impl_(new implementation(ogl, tag, desc)){}\r
+write_frame::write_frame(const write_frame& other) : impl_(new implementation(*other.impl_)){}\r
void write_frame::accept(core::frame_visitor& visitor){impl_->accept(*this, visitor);}\r
\r
boost::iterator_range<uint8_t*> write_frame::image_data(size_t index){return impl_->image_data(index);}\r
}\r
int write_frame::tag() const {return impl_->tag_;}\r
const core::pixel_format_desc& write_frame::get_pixel_format_desc() const{return impl_->desc_;}\r
-const std::vector<safe_ptr<device_buffer>>& write_frame::get_textures() const{return impl_->textures_;}\r
+const std::vector<safe_ptr<device_buffer>> write_frame::get_textures() const\r
+{\r
+ std::vector<safe_ptr<device_buffer>> textures;\r
+ BOOST_FOREACH(auto texture, impl_->textures_)\r
+ {\r
+ if(texture)\r
+ textures.push_back(make_safe(texture));\r
+ }\r
+\r
+ return textures;\r
+}\r
void write_frame::commit(size_t plane_index){impl_->commit(plane_index);}\r
void write_frame::commit(){impl_->commit();}\r
void write_frame::set_type(const video_mode::type& mode){impl_->mode_ = mode;}\r
{\r
public: \r
explicit write_frame(ogl_device& ogl, int tag, const core::pixel_format_desc& desc);\r
+ write_frame(const write_frame& other);\r
\r
virtual boost::iterator_range<uint8_t*> image_data(size_t plane_index = 0); \r
virtual std::vector<int16_t>& audio_data();\r
\r
private:\r
friend class image_mixer;\r
-\r
- const std::vector<safe_ptr<device_buffer>>& get_textures() const;\r
+ \r
+ const std::vector<safe_ptr<device_buffer>> get_textures() const;\r
\r
struct implementation;\r
safe_ptr<implementation> impl_;\r
public:\r
explicit color_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& color) \r
: color_str_(color)\r
- , frame_(basic_frame::empty())\r
- {\r
- if(color.length() != 9 || color[0] != '#')\r
- BOOST_THROW_EXCEPTION(invalid_argument() << arg_name_info("color") << arg_value_info(narrow(color)) << msg_info("Invalid color code"));\r
-\r
- auto frame = frame_factory->create_frame(this, 1, 1, pixel_format::bgra);\r
- \r
- // Read color from hex-string and write to frame pixel.\r
- auto& value = *reinterpret_cast<uint32_t*>(frame->image_data().begin());\r
- std::wstringstream str(color_str_.substr(1));\r
- str >> std::hex >> value;\r
-\r
- frame->commit();\r
-\r
- frame_ = std::move(frame);\r
- }\r
+ , frame_(create_color_frame(this, frame_factory, color))\r
+ {}\r
\r
// frame_producer\r
\r
return make_safe<color_producer>(frame_factory, params[0]);\r
}\r
\r
+safe_ptr<core::write_frame> create_color_frame(void* tag, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& color)\r
+{\r
+ if(color.length() != 9 || color[0] != '#')\r
+ BOOST_THROW_EXCEPTION(invalid_argument() << arg_name_info("color") << arg_value_info(narrow(color)) << msg_info("Invalid color code"));\r
+\r
+ auto frame = frame_factory->create_frame(tag, 1, 1, pixel_format::bgra);\r
+ \r
+ // Read color from hex-string and write to frame pixel.\r
+ auto& value = *reinterpret_cast<uint32_t*>(frame->image_data().begin());\r
+ std::wstringstream str(color.substr(1));\r
+ str >> std::hex >> value;\r
+\r
+ frame->commit();\r
+ \r
+ return frame;\r
+}\r
+\r
}}
\ No newline at end of file
#include <vector>\r
\r
namespace caspar { namespace core {\r
- \r
+\r
safe_ptr<frame_producer> create_color_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::vector<std::wstring>& params);\r
+safe_ptr<core::write_frame> create_color_frame(void* tag, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& color);\r
\r
}}\r
if(frame1 == basic_frame::empty() && frame2 == basic_frame::empty())\r
return basic_frame::empty();\r
\r
- if(frame1 == basic_frame::eof() && frame2 == basic_frame::eof())\r
+ if(frame1 == basic_frame::eof() || frame2 == basic_frame::eof())\r
return basic_frame::eof();\r
\r
if(frame1 == frame2 || mode == video_mode::progressive)\r
count,\r
invalid\r
};\r
+\r
+ static std::wstring print(video_mode::type value)\r
+ {\r
+ switch(value)\r
+ {\r
+ case progressive:\r
+ return L"progressive"; \r
+ case lower:\r
+ return L"lower";\r
+ case upper:\r
+ return L"upper";\r
+ default:\r
+ return L"invalid";\r
+ }\r
+ }\r
};\r
\r
struct video_format_desc\r
\r
\r
/* File created by MIDL compiler version 7.00.0555 */\r
-/* at Sat Jun 25 15:44:58 2011\r
+/* at Wed Jul 27 12:40:52 2011\r
*/\r
/* Compiler settings for interop\DeckLinkAPI.idl:\r
Oicf, W1, Zp8, env=Win32 (32b run), target_arch=X86 7.00.0555 \r
\r
\r
/* File created by MIDL compiler version 7.00.0555 */\r
-/* at Sat Jun 25 15:44:58 2011\r
+/* at Wed Jul 27 12:40:52 2011\r
*/\r
/* Compiler settings for interop\DeckLinkAPI.idl:\r
Oicf, W1, Zp8, env=Win32 (32b run), target_arch=X86 7.00.0555 \r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
</ClCompile>\r
- <ClCompile Include="producer\filter_producer.cpp">\r
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>\r
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">true</ExcludedFromBuild>\r
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">true</ExcludedFromBuild>\r
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>\r
- </ClCompile>\r
<ClCompile Include="producer\input.cpp">\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
<ClInclude Include="producer\audio\audio_decoder.h" />\r
<ClInclude Include="producer\ffmpeg_producer.h" />\r
<ClInclude Include="producer\filter\filter.h" />\r
- <ClInclude Include="producer\filter_producer.h">\r
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>\r
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">true</ExcludedFromBuild>\r
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">true</ExcludedFromBuild>\r
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>\r
- </ClInclude>\r
<ClInclude Include="producer\input.h" />\r
<ClInclude Include="producer\util.h" />\r
<ClInclude Include="producer\video\video_decoder.h" />\r
<Filter Include="source\producer">\r
<UniqueIdentifier>{c5a94fd1-4552-4f6d-97cd-24e44e662e0f}</UniqueIdentifier>\r
</Filter>\r
- <Filter Include="source\producer\video">\r
- <UniqueIdentifier>{4b0f3949-6dc5-4895-837f-4c3ef1759a90}</UniqueIdentifier>\r
- </Filter>\r
<Filter Include="source\producer\audio">\r
<UniqueIdentifier>{6937307b-550f-48f8-9cc0-509de0e18ddc}</UniqueIdentifier>\r
</Filter>\r
<Filter Include="source\producer\filter">\r
<UniqueIdentifier>{0d46b4fa-2b19-44b7-82ea-52cbd9ad24a4}</UniqueIdentifier>\r
</Filter>\r
+ <Filter Include="source\producer\video">\r
+ <UniqueIdentifier>{4b0f3949-6dc5-4895-837f-4c3ef1759a90}</UniqueIdentifier>\r
+ </Filter>\r
</ItemGroup>\r
<ItemGroup>\r
<ClCompile Include="producer\ffmpeg_producer.cpp">\r
<ClCompile Include="producer\filter\filter.cpp">\r
<Filter>source\producer\filter</Filter>\r
</ClCompile>\r
- <ClCompile Include="producer\filter_producer.cpp">\r
- <Filter>source\producer</Filter>\r
- </ClCompile>\r
</ItemGroup>\r
<ItemGroup>\r
<ClInclude Include="producer\ffmpeg_producer.h">\r
<ClInclude Include="producer\util.h">\r
<Filter>source\producer</Filter>\r
</ClInclude>\r
- <ClInclude Include="producer\filter_producer.h">\r
- <Filter>source\producer</Filter>\r
- </ClInclude>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
\r
#include "audio_decoder.h"\r
\r
+#include <tbb/task_group.h>\r
+\r
#if defined(_MSC_VER)\r
#pragma warning (push)\r
#pragma warning (disable : 4244)\r
#endif\r
\r
namespace caspar {\r
-\r
+ \r
struct audio_decoder::implementation : boost::noncopyable\r
{ \r
- input& input_;\r
- AVCodecContext& codec_context_; \r
- const core::video_format_desc format_desc_;\r
-\r
- std::vector<int16_t, tbb::cache_aligned_allocator<int16_t>> current_chunk_; \r
-\r
- size_t frame_number_;\r
- bool wait_for_eof_;\r
-\r
- std::vector<int16_t, tbb::cache_aligned_allocator<int16_t>> buffer_;\r
+ std::shared_ptr<AVCodecContext> codec_context_; \r
+ const core::video_format_desc format_desc_;\r
+ int index_;\r
+ std::vector<int16_t, tbb::cache_aligned_allocator<int16_t>> buffer_; // avcodec_decode_audio3 needs 4 byte alignment\r
+ std::vector<int16_t, tbb::cache_aligned_allocator<int16_t>> audio_samples_; // avcodec_decode_audio3 needs 4 byte alignment\r
+ std::queue<std::shared_ptr<AVPacket>> packets_;\r
public:\r
- explicit implementation(input& input, const core::video_format_desc& format_desc) \r
- : input_(input)\r
- , codec_context_(*input_.get_audio_codec_context())\r
- , format_desc_(format_desc) \r
- , frame_number_(0)\r
- , wait_for_eof_(false)\r
- , buffer_(4*format_desc_.audio_sample_rate*2+FF_INPUT_BUFFER_PADDING_SIZE/2, 0)\r
+ explicit implementation(AVStream* stream, const core::video_format_desc& format_desc) \r
+ : format_desc_(format_desc) \r
{\r
- if(codec_context_.sample_rate != static_cast<int>(format_desc_.audio_sample_rate) || \r
- codec_context_.channels != static_cast<int>(format_desc_.audio_channels))\r
+ if(!stream || !stream->codec)\r
+ return;\r
+\r
+ auto codec = avcodec_find_decoder(stream->codec->codec_id); \r
+ if(!codec)\r
+ return;\r
+ \r
+ int errn = avcodec_open(stream->codec, codec);\r
+ if(errn < 0)\r
+ return;\r
+ \r
+ index_ = stream->index;\r
+ codec_context_.reset(stream->codec, avcodec_close);\r
+\r
+ if(codec_context_ &&\r
+ (codec_context_->sample_rate != static_cast<int>(format_desc_.audio_sample_rate) || \r
+ codec_context_->channels != static_cast<int>(format_desc_.audio_channels)))\r
{ \r
BOOST_THROW_EXCEPTION(\r
file_read_error() <<\r
msg_info("Invalid sample-rate or number of channels.") <<\r
- arg_value_info(boost::lexical_cast<std::string>(codec_context_.sample_rate)) << \r
+ arg_value_info(boost::lexical_cast<std::string>(codec_context_->sample_rate)) << \r
arg_name_info("codec_context"));\r
- }\r
- }\r
- \r
- std::deque<std::pair<int, std::vector<int16_t>>> receive()\r
- {\r
- std::deque<std::pair<int, std::vector<int16_t>>> result;\r
- \r
- std::shared_ptr<AVPacket> pkt;\r
- for(int n = 0; n < 32 && result.empty() && input_.try_pop_audio_packet(pkt); ++n) \r
- result = decode(pkt);\r
-\r
- return result;\r
+ } \r
}\r
\r
- std::deque<std::pair<int, std::vector<int16_t>>> decode(const std::shared_ptr<AVPacket>& audio_packet)\r
+ void push(const std::shared_ptr<AVPacket>& packet)\r
{ \r
- std::deque<std::pair<int, std::vector<int16_t>>> result;\r
+ if(!codec_context_)\r
+ return;\r
\r
- if(!audio_packet) // eof\r
- { \r
- avcodec_flush_buffers(&codec_context_);\r
- current_chunk_.clear();\r
- frame_number_ = 0;\r
- wait_for_eof_ = false;\r
- return result;\r
- }\r
+ if(packet && packet->stream_index != index_)\r
+ return;\r
\r
- if(wait_for_eof_)\r
- return result;\r
- \r
- int written_bytes = buffer_.size()-FF_INPUT_BUFFER_PADDING_SIZE/2;\r
- const int errn = avcodec_decode_audio3(&codec_context_, buffer_.data(), &written_bytes, audio_packet.get());\r
- if(errn < 0)\r
- { \r
- BOOST_THROW_EXCEPTION(\r
- invalid_operation() <<\r
- boost::errinfo_api_function("avcodec_decode_audio2") <<\r
- boost::errinfo_errno(AVUNERROR(errn)));\r
+ packets_.push(packet);\r
+ } \r
+ \r
+ std::vector<std::vector<int16_t>> poll()\r
+ {\r
+ std::vector<std::vector<int16_t>> result;\r
+\r
+ if(!codec_context_)\r
+ result.push_back(std::vector<int16_t>(format_desc_.audio_samples_per_frame, 0));\r
+ else if(!packets_.empty())\r
+ {\r
+ decode(packets_.front());\r
+ packets_.pop();\r
+\r
+ while(audio_samples_.size() > format_desc_.audio_samples_per_frame)\r
+ {\r
+ const auto begin = audio_samples_.begin();\r
+ const auto end = audio_samples_.begin() + format_desc_.audio_samples_per_frame;\r
+\r
+ result.push_back(std::vector<int16_t>(begin, end));\r
+ audio_samples_.erase(begin, end);\r
+ }\r
}\r
\r
- current_chunk_.insert(current_chunk_.end(), buffer_.begin(), buffer_.begin() + written_bytes/2);\r
-\r
- const auto last = current_chunk_.end() - current_chunk_.size() % format_desc_.audio_samples_per_frame;\r
- \r
- for(auto it = current_chunk_.begin(); it != last; it += format_desc_.audio_samples_per_frame) \r
- result.push_back(std::make_pair(frame_number_++, std::vector<int16_t>(it, it + format_desc_.audio_samples_per_frame))); \r
-\r
- current_chunk_.erase(current_chunk_.begin(), last);\r
-\r
return result;\r
}\r
\r
- void restart()\r
+ void decode(const std::shared_ptr<AVPacket>& packet)\r
+ { \r
+ if(!packet) // eof\r
+ {\r
+ auto truncate = audio_samples_.size() % format_desc_.audio_samples_per_frame;\r
+ if(truncate > 0)\r
+ {\r
+ audio_samples_.resize(audio_samples_.size() - truncate); \r
+ CASPAR_LOG(info) << L"Truncating " << truncate << L" audio-samples."; \r
+ }\r
+ avcodec_flush_buffers(codec_context_.get());\r
+ }\r
+ else\r
+ {\r
+ buffer_.resize(4*format_desc_.audio_sample_rate*2+FF_INPUT_BUFFER_PADDING_SIZE/2, 0);\r
+\r
+ int written_bytes = buffer_.size() - FF_INPUT_BUFFER_PADDING_SIZE/2;\r
+ const int errn = avcodec_decode_audio3(codec_context_.get(), buffer_.data(), &written_bytes, packet.get());\r
+ if(errn < 0)\r
+ { \r
+ BOOST_THROW_EXCEPTION(\r
+ invalid_operation() <<\r
+ boost::errinfo_api_function("avcodec_decode_audio2") <<\r
+ boost::errinfo_errno(AVUNERROR(errn)));\r
+ }\r
+\r
+ buffer_.resize(written_bytes/2);\r
+ audio_samples_.insert(audio_samples_.end(), buffer_.begin(), buffer_.end());\r
+ buffer_.clear(); \r
+ }\r
+ }\r
+\r
+ bool ready() const\r
{\r
- wait_for_eof_ = true;\r
+ return !codec_context_ || !packets_.empty();\r
}\r
};\r
\r
-audio_decoder::audio_decoder(input& input, const core::video_format_desc& format_desc) : impl_(new implementation(input, format_desc)){}\r
-std::deque<std::pair<int, std::vector<int16_t>>> audio_decoder::receive(){return impl_->receive();}\r
-void audio_decoder::restart(){impl_->restart();}\r
+audio_decoder::audio_decoder(AVStream* stream, const core::video_format_desc& format_desc) : impl_(new implementation(stream, format_desc)){}\r
+void audio_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}\r
+bool audio_decoder::ready() const{return impl_->ready();}\r
+std::vector<std::vector<int16_t>> audio_decoder::poll(){return impl_->poll();}\r
}
\ No newline at end of file
*/\r
#pragma once\r
\r
-#include "../input.h"\r
+#include <common/memory/safe_ptr.h>\r
\r
#include <core/video_format.h>\r
\r
-#include <tbb/cache_aligned_allocator.h>\r
-\r
#include <boost/noncopyable.hpp>\r
\r
-#include <memory>\r
-#include <vector>\r
+#include <deque>\r
\r
+struct AVStream;\r
struct AVCodecContext;\r
\r
namespace caspar {\r
- \r
+\r
class audio_decoder : boost::noncopyable\r
{\r
public:\r
- explicit audio_decoder(input& input, const core::video_format_desc& format_desc);\r
-\r
- std::deque<std::pair<int, std::vector<int16_t>>> receive();\r
+ explicit audio_decoder(AVStream* stream, const core::video_format_desc& format_desc);\r
+ \r
+ void push(const std::shared_ptr<AVPacket>& packet);\r
+ bool ready() const;\r
+ std::vector<std::vector<int16_t>> poll();\r
\r
- void restart();\r
private:\r
struct implementation;\r
- std::shared_ptr<implementation> impl_;\r
+ safe_ptr<implementation> impl_;\r
};\r
\r
}
\ No newline at end of file
#include <core/mixer/write_frame.h>\r
#include <core/producer/frame/audio_transform.h>\r
#include <core/video_format.h>\r
+#include <core/producer/color/color_producer.h>\r
\r
#include <common/env.h>\r
\r
-#include <tbb/parallel_invoke.h>\r
-#include <tbb/task_group.h>\r
-\r
#include <boost/timer.hpp>\r
#include <boost/range/algorithm.hpp>\r
#include <boost/range/algorithm_ext.hpp>\r
\r
+#include <tbb/task_group.h>\r
+\r
#include <deque>\r
+#include <vector>\r
\r
namespace caspar {\r
- \r
+\r
+struct display_mode\r
+{\r
+ enum type\r
+ {\r
+ simple,\r
+ duplicate,\r
+ half,\r
+ interlace,\r
+ deinterlace,\r
+ deinterlace_half,\r
+ count,\r
+ invalid\r
+ };\r
+\r
+ static std::wstring print(display_mode::type value)\r
+ {\r
+ switch(value)\r
+ {\r
+ case simple:\r
+ return L"simple";\r
+ case duplicate:\r
+ return L"duplicate";\r
+ case half:\r
+ return L"half";\r
+ case interlace:\r
+ return L"interlace";\r
+ case deinterlace:\r
+ return L"deinterlace";\r
+ case deinterlace_half:\r
+ return L"deinterlace_half";\r
+ default:\r
+ return L"invalid";\r
+ }\r
+ }\r
+};\r
+\r
+display_mode::type get_display_mode(const core::video_mode::type in_mode, double in_fps, const core::video_mode::type out_mode, double out_fps)\r
+{ \r
+ if(in_mode == core::video_mode::invalid || out_mode == core::video_mode::invalid)\r
+ return display_mode::invalid;\r
+\r
+ static const auto epsilon = 2.0;\r
+\r
+ if(std::abs(in_fps - out_fps) < epsilon)\r
+ {\r
+ if(in_mode != core::video_mode::progressive && out_mode == core::video_mode::progressive)\r
+ return display_mode::deinterlace_half;\r
+ //else if(in_mode == core::video_mode::progressive && out_mode != core::video_mode::progressive)\r
+ // simple(); // interlace_duplicate();\r
+ else\r
+ return display_mode::simple;\r
+ }\r
+ else if(std::abs(in_fps/2.0 - out_fps) < epsilon)\r
+ {\r
+ if(in_mode != core::video_mode::progressive)\r
+ return display_mode::invalid;\r
+\r
+ if(out_mode != core::video_mode::progressive)\r
+ return display_mode::interlace;\r
+ else\r
+ return display_mode::half;\r
+ }\r
+ else if(std::abs(in_fps - out_fps/2.0) < epsilon)\r
+ {\r
+ if(out_mode != core::video_mode::progressive)\r
+ return display_mode::invalid;\r
+\r
+ if(in_mode != core::video_mode::progressive)\r
+ return display_mode::deinterlace;\r
+ else\r
+ return display_mode::duplicate;\r
+ }\r
+\r
+ return display_mode::invalid;\r
+}\r
+ \r
struct ffmpeg_producer : public core::frame_producer\r
{\r
- const std::wstring filename_;\r
+ const std::wstring filename_;\r
\r
- const safe_ptr<diagnostics::graph> graph_;\r
- boost::timer frame_timer_;\r
+ const safe_ptr<diagnostics::graph> graph_;\r
+ boost::timer frame_timer_;\r
\r
- const safe_ptr<core::frame_factory> frame_factory_;\r
+ const safe_ptr<core::frame_factory> frame_factory_;\r
+ const core::video_format_desc format_desc_;\r
\r
- input input_; \r
- std::unique_ptr<video_decoder> video_decoder_;\r
- std::unique_ptr<audio_decoder> audio_decoder_;\r
+ input input_; \r
+ video_decoder video_decoder_;\r
+ audio_decoder audio_decoder_;\r
+\r
+ std::queue<safe_ptr<core::basic_frame>> frame_buffer_;\r
+ std::queue<safe_ptr<core::basic_frame>> output_buffer_;\r
+\r
+ const bool auto_convert_;\r
+ display_mode::type display_mode_;\r
+\r
+ std::deque<safe_ptr<core::write_frame>> video_frames_;\r
+ std::deque<std::vector<int16_t>> audio_chunks_;\r
+\r
+ tbb::task_group tasks_;\r
\r
- std::deque<std::pair<int, std::vector<int16_t>>> audio_chunks_;\r
- std::deque<std::pair<int, safe_ptr<core::write_frame>>> video_frames_;\r
- \r
- tbb::task_group task_group_;\r
public:\r
explicit ffmpeg_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filename, const std::wstring& filter_str, bool loop, int start, int length) \r
: filename_(filename)\r
, graph_(diagnostics::create_graph(narrow(print())))\r
, frame_factory_(frame_factory) \r
+ , format_desc_(frame_factory->get_video_format_desc())\r
, input_(safe_ptr<diagnostics::graph>(graph_), filename_, loop, start, length)\r
+ , video_decoder_(input_.stream(AVMEDIA_TYPE_VIDEO), frame_factory)\r
+ , audio_decoder_(input_.stream(AVMEDIA_TYPE_AUDIO), frame_factory->get_video_format_desc())\r
+ , auto_convert_(env::properties().get("configuration.ffmpeg.auto-mode", false))\r
+ , display_mode_(display_mode::invalid)\r
{\r
graph_->add_guide("frame-time", 0.5);\r
- graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
+ graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
graph_->set_color("underflow", diagnostics::color(0.6f, 0.3f, 0.9f)); \r
\r
- double frame_time = 1.0f/input_.fps();\r
- double format_frame_time = 1.0/frame_factory->get_video_format_desc().fps;\r
- if(abs(frame_time - format_frame_time) > 0.0001 && abs(frame_time - format_frame_time/2) > 0.0001)\r
- CASPAR_LOG(warning) << print() << L" Invalid framerate detected. This may cause distorted audio during playback. frame-time: " << frame_time;\r
- \r
- video_decoder_.reset(input_.get_video_codec_context() ? \r
- new video_decoder(input_, frame_factory, narrow(filter_str)) : nullptr);\r
- \r
- audio_decoder_.reset(input_.get_audio_codec_context() ? \r
- new audio_decoder(input_, frame_factory->get_video_format_desc()) : nullptr); \r
- \r
- // Fill buffers.\r
- for(size_t n = 0; n < 2; ++n)\r
- decode_packets();\r
+ for(int n = 0; n < 128 && frame_buffer_.size() < 4; ++n)\r
+ decode_frame();\r
}\r
\r
- virtual safe_ptr<core::basic_frame> receive()\r
- { \r
- // "receive" is called on the same thread as the gpu mixer runs. Minimize "receive" time in order to allow gpu and cpu to run in parallel. \r
- task_group_.wait();\r
-\r
- auto result = get_frame();\r
-\r
- task_group_.run([=]\r
- {\r
- frame_timer_.restart();\r
- decode_packets();\r
- graph_->update_value("frame-time", static_cast<float>(frame_timer_.elapsed()*frame_factory_->get_video_format_desc().fps*0.5));\r
- }); \r
- \r
- return result;\r
- }\r
- \r
- virtual std::wstring print() const\r
+ ~ffmpeg_producer()\r
{\r
- return L"ffmpeg[" + boost::filesystem::wpath(filename_).filename() + L"]";\r
+ tasks_.cancel();\r
+ tasks_.wait();\r
}\r
\r
- void decode_packets()\r
+ virtual safe_ptr<core::basic_frame> receive()\r
{\r
- tbb::parallel_invoke\r
- (\r
- [&]\r
- {\r
- try\r
- {\r
- if(video_decoder_ && video_frames_.size() < 3)\r
- boost::range::push_back(video_frames_, video_decoder_->receive()); \r
- }\r
- catch(...)\r
- {\r
- CASPAR_LOG_CURRENT_EXCEPTION();\r
- video_decoder_.reset();\r
- }\r
- }, \r
- [&]\r
+ if(output_buffer_.empty())\r
+ { \r
+ tasks_.wait();\r
+\r
+ output_buffer_ = std::move(frame_buffer_);\r
+\r
+ tasks_.run([=]\r
{\r
- try\r
- {\r
- if(audio_decoder_ && audio_chunks_.size() < 3)\r
- boost::range::push_back(audio_chunks_, audio_decoder_->receive()); \r
- }\r
- catch(...)\r
- {\r
- CASPAR_LOG_CURRENT_EXCEPTION();\r
- audio_decoder_.reset();\r
- }\r
- }\r
- );\r
+ frame_timer_.restart();\r
+\r
+ for(int n = 0; n < 64 && frame_buffer_.empty(); ++n)\r
+ decode_frame();\r
+\r
+ graph_->update_value("frame-time", static_cast<float>(frame_timer_.elapsed()*format_desc_.fps*0.5));\r
+ });\r
+ }\r
\r
- // If video is on first frame, sync with audio\r
- if(audio_decoder_ && video_decoder_ && !video_frames_.empty() && !audio_chunks_.empty() &&\r
- video_frames_.front().first == 0 && audio_chunks_.front().first != 0)\r
+ auto frame = core::basic_frame::late();\r
+\r
+ if(output_buffer_.empty())\r
{\r
- audio_decoder_->restart(); // Notify decoder to wait for eof which was sent with video eof.\r
- audio_chunks_ = audio_decoder_->receive(); \r
+ if(input_.eof())\r
+ frame = core::basic_frame::eof();\r
+ else\r
+ graph_->add_tag("underflow"); \r
+ }\r
+ else\r
+ {\r
+ frame = output_buffer_.front();\r
+ output_buffer_.pop();\r
}\r
\r
- CASPAR_ASSERT(!(video_decoder_ && audio_decoder_ && !video_frames_.empty() && !audio_chunks_.empty()) ||\r
- video_frames_.front().first == audio_chunks_.front().first);\r
+ return frame;\r
}\r
\r
- // FIXME: Don't re-interlace when going from 50i to 50p, maybe do this inside decoder?\r
- safe_ptr<core::basic_frame> get_video_frame(std::vector<int16_t>&& audio_chunk)\r
+ void decode_frame()\r
{\r
- auto frame = std::move(video_frames_.front().second); \r
- auto frame_number = video_frames_.front().first;\r
- video_frames_.pop_front();\r
- \r
- frame->audio_data() = std::move(audio_chunk);\r
- if(frame->audio_data().empty())\r
- frame->get_audio_transform().set_has_audio(false); \r
-\r
- if(!video_frames_.empty()) // interlace if we have double frames\r
+ for(int n = 0; n < 32 && ((video_frames_.size() < 2 && !video_decoder_.ready()) || (audio_chunks_.size() < 2 && !audio_decoder_.ready())); ++n) \r
{\r
- if(video_frames_.front().first == frame_number)\r
+ std::shared_ptr<AVPacket> pkt;\r
+ if(input_.try_pop(pkt))\r
{\r
- auto frame2 = std::move(video_frames_.front().second); \r
- video_frames_.pop_front();\r
- frame2->get_audio_transform().set_has_audio(false); \r
-\r
- return core::basic_frame::interlace(frame, frame2, frame_factory_->get_video_format_desc().mode);\r
+ video_decoder_.push(pkt);\r
+ audio_decoder_.push(pkt);\r
}\r
}\r
+ \r
+ tbb::parallel_invoke(\r
+ [=]\r
+ {\r
+ if(video_frames_.size() < 2)\r
+ boost::range::push_back(video_frames_, video_decoder_.poll());\r
+ },\r
+ [=]\r
+ {\r
+ if(audio_chunks_.size() < 2)\r
+ boost::range::push_back(audio_chunks_, audio_decoder_.poll());\r
+ });\r
\r
- return frame;\r
+ if(video_frames_.empty() || audio_chunks_.empty())\r
+ return;\r
+\r
+ if(auto_convert_)\r
+ auto_convert();\r
+ else\r
+ simple();\r
}\r
\r
- safe_ptr<core::basic_frame> get_frame()\r
- { \r
- if(video_decoder_ && audio_decoder_ && !video_frames_.empty() && !audio_chunks_.empty())\r
+ void auto_convert()\r
+ {\r
+ auto current_display_mode = get_display_mode(video_decoder_.mode(), video_decoder_.fps(), format_desc_.mode, format_desc_.fps); \r
+ if(current_display_mode != display_mode_)\r
{\r
- auto audio_chunk = std::move(audio_chunks_.front().second);\r
- audio_chunks_.pop_front();\r
- \r
- return get_video_frame(std::move(audio_chunk));\r
- }\r
- else if(video_decoder_ && !audio_decoder_ && !video_frames_.empty())\r
- { \r
- return get_video_frame(std::vector<int16_t>());\r
+ display_mode_ = current_display_mode;\r
+ CASPAR_LOG(info) << print() << " display_mode: " << display_mode::print(display_mode_) << \r
+ L" in: " << core::video_mode::print(video_decoder_.mode()) << L" " << video_decoder_.fps() << " fps" <<\r
+ L" out: " << core::video_mode::print(format_desc_.mode) << L" " << format_desc_.fps << " fps";\r
}\r
- else if(audio_decoder_ && !video_decoder_ && !audio_chunks_.empty())\r
- {\r
- auto frame = frame_factory_->create_frame(this, 1, 1);\r
- std::fill(frame->image_data().begin(), frame->image_data().end(), 0);\r
- \r
- frame->audio_data() = std::move(audio_chunks_.front().second);\r
- audio_chunks_.pop_front();\r
\r
- return frame;\r
- }\r
- else if(!input_.is_running() || (!video_decoder_ && !audio_decoder_))\r
- {\r
- return core::basic_frame::eof();\r
- }\r
- else\r
+ switch(display_mode_)\r
{\r
- graph_->add_tag("underflow");\r
- return core::basic_frame::late();\r
+ case display_mode::simple:\r
+ return simple();\r
+ case display_mode::duplicate:\r
+ return duplicate();\r
+ case display_mode::half:\r
+ return half();\r
+ case display_mode::interlace:\r
+ return interlace();\r
+ case display_mode::deinterlace:\r
+ return deinterlace();\r
+ case display_mode::deinterlace_half:\r
+ return deinterlace_half();\r
+ default:\r
+ BOOST_THROW_EXCEPTION(invalid_operation());\r
}\r
}\r
+\r
+ void simple()\r
+ {\r
+ CASPAR_ASSERT(!video_frames_.empty());\r
+ CASPAR_ASSERT(!audio_chunks_.empty());\r
+\r
+ auto frame1 = video_frames_.front();\r
+ video_frames_.pop_front();\r
+\r
+ frame1->audio_data() = audio_chunks_.front();\r
+ audio_chunks_.pop_front();\r
+\r
+ frame_buffer_.push(frame1);\r
+ }\r
+\r
+ void duplicate()\r
+ { \r
+ CASPAR_ASSERT(!video_frames_.empty());\r
+ CASPAR_ASSERT(!audio_chunks_.empty());\r
+\r
+ auto frame = video_frames_.front();\r
+ video_frames_.pop_front();\r
+\r
+ auto frame1 = make_safe<core::write_frame>(*frame); // make a copy\r
+ frame1->audio_data() = audio_chunks_.front();\r
+ audio_chunks_.pop_front();\r
+\r
+ auto frame2 = frame;\r
+ frame2->audio_data() = audio_chunks_.front();\r
+ audio_chunks_.pop_front();\r
+\r
+ frame_buffer_.push(frame1);\r
+ frame_buffer_.push(frame2);\r
+ }\r
+\r
+ void half()\r
+ { \r
+ CASPAR_ASSERT(!video_frames_.empty());\r
+ CASPAR_ASSERT(!audio_chunks_.empty());\r
+\r
+ if(video_frames_.size() < 2 && !input_.eof())\r
+ return;\r
+ \r
+ if(video_frames_.size() < 2)\r
+ video_frames_.push_back(create_color_frame(this, frame_factory_, L"#00000000"));\r
+\r
+ CASPAR_ASSERT(video_frames_.size() == 2);\r
+ \r
+ auto frame1 =video_frames_.front();\r
+ video_frames_.pop_front();\r
+ frame1->audio_data() = audio_chunks_.front();\r
+ audio_chunks_.pop_front();\r
+ \r
+ video_frames_.pop_front(); // Throw away\r
+\r
+ frame_buffer_.push(frame1);\r
+ }\r
+ \r
+ void interlace()\r
+ { \r
+ CASPAR_ASSERT(!video_frames_.empty());\r
+ CASPAR_ASSERT(!audio_chunks_.empty());\r
+\r
+ if(video_frames_.size() < 2 && !input_.eof())\r
+ return;\r
+\r
+ if(video_frames_.size() < 2)\r
+ video_frames_.push_back(create_color_frame(this, frame_factory_, L"#00000000"));\r
+ \r
+ CASPAR_ASSERT(video_frames_.size() == 2);\r
+\r
+ auto frame1 = video_frames_.front();\r
+ video_frames_.pop_front();\r
+\r
+ frame1->audio_data() = audio_chunks_.front();\r
+ audio_chunks_.pop_front();\r
+ \r
+ auto frame2 = video_frames_.front();\r
+ video_frames_.pop_front();\r
+\r
+ frame_buffer_.push(core::basic_frame::interlace(frame1, frame2, format_desc_.mode)); \r
+ }\r
+ \r
+ void deinterlace()\r
+ {\r
+ BOOST_THROW_EXCEPTION(not_implemented() << msg_info("deinterlace"));\r
+ }\r
+\r
+ void deinterlace_half()\r
+ {\r
+ BOOST_THROW_EXCEPTION(not_implemented() << msg_info("deinterlace_half"));\r
+ }\r
+ \r
+ virtual std::wstring print() const\r
+ {\r
+ return L"ffmpeg[" + boost::filesystem::wpath(filename_).filename() + L"]";\r
+ }\r
};\r
\r
safe_ptr<core::frame_producer> create_ffmpeg_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::vector<std::wstring>& params)\r
if(++seek_it != params.end())\r
start = boost::lexical_cast<int>(*seek_it);\r
}\r
- \r
- std::wstring filter_str = L"";\r
-\r
- auto filter_it = std::find(params.begin(), params.end(), L"FILTER");\r
- if(filter_it != params.end())\r
- {\r
- if(++filter_it != params.end())\r
- filter_str = *filter_it;\r
- }\r
\r
- return make_safe<ffmpeg_producer>(frame_factory, path, filter_str, loop, start, length);\r
+ return make_safe<ffmpeg_producer>(frame_factory, path, L"", loop, start, length);\r
}\r
\r
}
\ No newline at end of file
#include <common/diagnostics/graph.h>\r
\r
#include <tbb/concurrent_queue.h>\r
-#include <tbb/mutex.h>\r
+#include <tbb/atomic.h>\r
\r
-#include <boost/range/iterator_range.hpp>\r
#include <boost/range/algorithm.hpp>\r
+#include <boost/thread/condition_variable.hpp>\r
+#include <boost/thread/mutex.hpp>\r
+#include <boost/range/iterator_range.hpp>\r
\r
extern "C" \r
{\r
}\r
\r
namespace caspar {\r
- \r
-static const size_t PACKET_BUFFER_COUNT = 100; // Assume that av_read_frame distance between audio and video packets is less than PACKET_BUFFER_COUNT.\r
-\r
-class stream\r
-{\r
- std::shared_ptr<AVCodecContext> ctx_;\r
- int index_;\r
- tbb::concurrent_bounded_queue<std::shared_ptr<AVPacket>> buffer_;\r
-\r
-public:\r
-\r
- stream() : index_(-1)\r
- {\r
- buffer_.set_capacity(PACKET_BUFFER_COUNT);\r
- }\r
- \r
- int open(std::shared_ptr<AVFormatContext>& fctx, AVMediaType media_type)\r
- { \r
- const auto streams = boost::iterator_range<AVStream**>(fctx->streams, fctx->streams+fctx->nb_streams);\r
- const auto it = boost::find_if(streams, [&](AVStream* stream) \r
- {\r
- return stream && stream->codec->codec_type == media_type;\r
- });\r
- \r
- if(it == streams.end()) \r
- return AVERROR_STREAM_NOT_FOUND;\r
- \r
- auto codec = avcodec_find_decoder((*it)->codec->codec_id); \r
- if(!codec)\r
- return AVERROR_DECODER_NOT_FOUND;\r
- \r
- index_ = (*it)->index;\r
\r
- int errn = tbb_avcodec_open((*it)->codec, codec);\r
- if(errn < 0)\r
- return errn;\r
- \r
- ctx_.reset((*it)->codec, tbb_avcodec_close);\r
-\r
- // Some files give an invalid time_base numerator, try to fix it.\r
- if(ctx_ && ctx_->time_base.num == 1)\r
- ctx_->time_base.num = static_cast<int>(std::pow(10.0, static_cast<int>(std::log10(static_cast<float>(ctx_->time_base.den)))-1));\r
- \r
- return errn; \r
- }\r
-\r
- bool try_pop(std::shared_ptr<AVPacket>& pkt)\r
- {\r
- return buffer_.try_pop(pkt);\r
- }\r
-\r
- void push(const std::shared_ptr<AVPacket>& pkt)\r
- {\r
- if(pkt && pkt->stream_index != index_)\r
- return;\r
-\r
- if(!ctx_)\r
- return;\r
-\r
- if(pkt)\r
- av_dup_packet(pkt.get());\r
-\r
- buffer_.push(pkt); \r
- }\r
-\r
- int index() const {return index_;}\r
+static const size_t MAX_BUFFER_COUNT = 128;\r
+static const size_t MAX_BUFFER_SIZE = 64 * 1000000;\r
\r
- const std::shared_ptr<AVCodecContext>& ctx() const { return ctx_; }\r
-\r
- operator bool(){return ctx_ != nullptr;}\r
-\r
- double fps() const { return !ctx_ ? -1.0 : static_cast<double>(ctx_->time_base.den) / static_cast<double>(ctx_->time_base.num); }\r
-\r
- bool empty() const { return buffer_.empty();}\r
- int size() const { return buffer_.size();}\r
-};\r
- \r
struct input::implementation : boost::noncopyable\r
{ \r
safe_ptr<diagnostics::graph> graph_;\r
\r
std::shared_ptr<AVFormatContext> format_context_; // Destroy this last\r
\r
- const std::wstring filename_;\r
- const bool loop_;\r
- const int start_; \r
- double fps_;\r
+ const std::wstring filename_;\r
+ const bool loop_;\r
+ const int start_; \r
+\r
+ size_t buffer_size_limit_;\r
+ tbb::atomic<size_t> buffer_size_;\r
+ boost::condition_variable cond_;\r
+ boost::mutex mutex_;\r
\r
- stream video_stream_;\r
- stream audio_stream_;\r
+ tbb::concurrent_bounded_queue<std::shared_ptr<AVPacket>> buffer_;\r
\r
- std::exception_ptr exception_;\r
executor executor_;\r
public:\r
explicit implementation(const safe_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, int start) \r
, filename_(filename)\r
, executor_(print())\r
, start_(std::max(start, 0))\r
- { \r
+ { \r
int errn;\r
\r
+ buffer_.set_capacity(MAX_BUFFER_COUNT);\r
+\r
AVFormatContext* weak_format_context_ = nullptr;\r
errn = av_open_input_file(&weak_format_context_, narrow(filename).c_str(), nullptr, 0, nullptr);\r
if(errn < 0 || weak_format_context_ == nullptr)\r
boost::errinfo_errno(AVUNERROR(errn)));\r
}\r
\r
- errn = video_stream_.open(format_context_, AVMEDIA_TYPE_VIDEO);\r
- if(errn < 0)\r
- CASPAR_LOG(warning) << print() << L" Could not open video stream: " << widen(av_error_str(errn));\r
- \r
- errn = audio_stream_.open(format_context_, AVMEDIA_TYPE_AUDIO);\r
- if(errn < 0)\r
- CASPAR_LOG(warning) << print() << L" Could not open audio stream: " << widen(av_error_str(errn));\r
- \r
- if(!video_stream_ && !audio_stream_)\r
- { \r
- BOOST_THROW_EXCEPTION(\r
- file_read_error() << \r
- source_info(narrow(print())) << \r
- msg_info("No video or audio codec context found.")); \r
- }\r
-\r
- fps_ = video_stream_ ? video_stream_.fps() : audio_stream_.fps();\r
-\r
if(start_ != 0) \r
seek_frame(start_);\r
-\r
- for(size_t n = 0; n < 32; ++n) // Read some packets for pre-rolling.\r
- read_next_packet();\r
- \r
- if(audio_stream_)\r
- graph_->set_color("audio-input-buffer", diagnostics::color(0.5f, 1.0f, 0.2f));\r
- \r
- if(video_stream_)\r
- graph_->set_color("video-input-buffer", diagnostics::color(0.2f, 0.5f, 1.0f));\r
- \r
+ \r
graph_->set_color("seek", diagnostics::color(0.5f, 1.0f, 0.5f)); \r
+ graph_->set_color("buffer-count", diagnostics::color(0.2f, 0.8f, 1.0f));\r
+ graph_->set_color("buffer-size", diagnostics::color(0.2f, 0.4f, 1.0f)); \r
\r
executor_.begin_invoke([this]{read_file();});\r
CASPAR_LOG(info) << print() << " Started.";\r
stop();\r
// Unblock thread.\r
std::shared_ptr<AVPacket> packet;\r
- try_pop_video_packet(packet);\r
- try_pop_audio_packet(packet);\r
+ buffer_.try_pop(packet);\r
+ buffer_size_ = 0;\r
+ cond_.notify_all();\r
}\r
\r
- bool try_pop_video_packet(std::shared_ptr<AVPacket>& packet)\r
+ bool try_pop(std::shared_ptr<AVPacket>& packet)\r
{\r
- bool result = video_stream_.try_pop(packet);\r
- if(result && !packet)\r
- graph_->add_tag("video-input-buffer");\r
- return result;\r
- }\r
-\r
- bool try_pop_audio_packet(std::shared_ptr<AVPacket>& packet)\r
- { \r
- bool result = audio_stream_.try_pop(packet);\r
- if(result && !packet)\r
- graph_->add_tag("audio-input-buffer");\r
+ bool result = buffer_.try_pop(packet);\r
+ graph_->update_value("buffer-count", MAX_BUFFER_SIZE/static_cast<double>(buffer_.size()));\r
+ if(packet)\r
+ {\r
+ buffer_size_ -= packet->size;\r
+ graph_->update_value("buffer-size", MAX_BUFFER_SIZE/static_cast<double>(buffer_size_));\r
+ cond_.notify_all();\r
+ }\r
return result;\r
}\r
-\r
- double fps()\r
+ \r
+ AVStream* stream(AVMediaType media_type)\r
{\r
- return fps_;\r
+ const auto streams = boost::iterator_range<AVStream**>(format_context_->streams, format_context_->streams + format_context_->nb_streams);\r
+ const auto it = boost::find_if(streams, [&](AVStream* stream) \r
+ {\r
+ return stream && stream->codec->codec_type == media_type;\r
+ });\r
+ \r
+ if(it == streams.end()) \r
+ return nullptr;\r
+\r
+ return *it;\r
}\r
\r
private:\r
\r
void read_file()\r
{ \r
- if(video_stream_.size() > 4 || audio_stream_.size() > 4) // audio is always before video.\r
- Sleep(5); // There are enough packets, no hurry.\r
-\r
read_next_packet();\r
-\r
executor_.begin_invoke([this]{read_file();});\r
}\r
\r
}\r
else\r
{\r
- if(video_stream_)\r
- { \r
- video_stream_.push(read_packet);\r
- graph_->update_value("video-input-buffer", static_cast<float>(video_stream_.size())/static_cast<float>(PACKET_BUFFER_COUNT)); \r
- }\r
- if(audio_stream_)\r
- { \r
- audio_stream_.push(read_packet);\r
- graph_->update_value("audio-input-buffer", static_cast<float>(audio_stream_.size())/static_cast<float>(PACKET_BUFFER_COUNT)); \r
- }\r
+ av_dup_packet(read_packet.get());\r
+ buffer_.push(read_packet);\r
+\r
+ graph_->update_value("buffer-count", MAX_BUFFER_SIZE/static_cast<double>(buffer_.size()));\r
+ \r
+ boost::unique_lock<boost::mutex> lock(mutex_);\r
+ while(buffer_size_ > MAX_BUFFER_SIZE && buffer_.size() > 2)\r
+ cond_.wait(lock);\r
+\r
+ buffer_size_ += read_packet->size;\r
+\r
+ graph_->update_value("buffer-size", MAX_BUFFER_SIZE/static_cast<double>(buffer_size_));\r
} \r
}\r
catch(...)\r
static const AVRational base_q = {1, AV_TIME_BASE};\r
\r
// Convert from frames into seconds.\r
- auto seek_target = frame*static_cast<int64_t>(AV_TIME_BASE/fps_);\r
+ auto seek_target = frame;//*static_cast<int64_t>(AV_TIME_BASE/fps_);\r
\r
- int stream_index = video_stream_.index() >= 0 ? video_stream_.index() : audio_stream_.index();\r
+ int stream_index = -1;//video_stream_.index() >= 0 ? video_stream_.index() : audio_stream_.index();\r
\r
- if(stream_index >= 0) \r
- seek_target = av_rescale_q(seek_target, base_q, format_context_->streams[stream_index]->time_base);\r
+ //if(stream_index >= 0) \r
+ // seek_target = av_rescale_q(seek_target, base_q, format_context_->streams[stream_index]->time_base);\r
\r
const int errn = av_seek_frame(format_context_.get(), stream_index, seek_target, flags);\r
if(errn < 0)\r
boost::errinfo_errno(AVUNERROR(errn)));\r
}\r
\r
- video_stream_.push(nullptr);\r
- audio_stream_.push(nullptr);\r
+ buffer_.push(nullptr);\r
} \r
\r
bool is_eof(int errn)\r
\r
std::wstring print() const\r
{\r
- const auto video = widen(video_stream_.ctx() ? video_stream_.ctx()->codec->name : "no-video");\r
- const auto audio = widen(audio_stream_.ctx() ? audio_stream_.ctx()->codec->name : "no-audio");\r
-\r
- return L"ffmpeg_input[" + filename_ + L"(" + boost::lexical_cast<std::wstring>(static_cast<int>(100*fps_)) + L"|" + video + L"|" + audio + L")]";\r
+ return L"ffmpeg_input[" + filename_ + L")]";\r
}\r
};\r
\r
input::input(const safe_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, int start, int length) \r
: impl_(new implementation(graph, filename, loop, start)){}\r
-const std::shared_ptr<AVCodecContext>& input::get_video_codec_context() const{return impl_->video_stream_.ctx();}\r
-const std::shared_ptr<AVCodecContext>& input::get_audio_codec_context() const{return impl_->audio_stream_.ctx();}\r
-bool input::is_running() const {return impl_->executor_.is_running();}\r
-bool input::try_pop_video_packet(std::shared_ptr<AVPacket>& packet){return impl_->try_pop_video_packet(packet);}\r
-bool input::try_pop_audio_packet(std::shared_ptr<AVPacket>& packet){return impl_->try_pop_audio_packet(packet);}\r
-double input::fps() const { return impl_->fps(); }\r
+AVStream* input::stream(AVMediaType media_type){return impl_->stream(media_type);}\r
+bool input::eof() const {return !impl_->executor_.is_running();}\r
+bool input::try_pop(std::shared_ptr<AVPacket>& packet){return impl_->try_pop(packet);}\r
}
\ No newline at end of file
{\r
public:\r
explicit input(const safe_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, int start, int length);\r
- const std::shared_ptr<AVCodecContext>& get_video_codec_context() const;\r
- const std::shared_ptr<AVCodecContext>& get_audio_codec_context() const;\r
\r
- bool try_pop_video_packet(std::shared_ptr<AVPacket>& packet);\r
- bool try_pop_audio_packet(std::shared_ptr<AVPacket>& packet);\r
+ bool try_pop(std::shared_ptr<AVPacket>& packet);\r
+ bool eof() const;\r
\r
- bool is_running() const;\r
- double fps() const;\r
+ AVStream* stream(AVMediaType media_type);\r
private:\r
struct implementation;\r
std::shared_ptr<implementation> impl_;\r
};\r
-//\r
-//class input_video_iterator : public boost::iterator_facade<input_video_iterator, std::shared_ptr<AVPacket>, boost::forward_traversal_tag>\r
-//{\r
-// std::shared_ptr<AVPacket> pkt_;\r
-// input* input_;\r
-//public:\r
-// input_video_iterator() : input_(nullptr){}\r
-//\r
-// input_video_iterator(input& input)\r
-// : input_(&input) {}\r
-//\r
-// input_video_iterator(const input_video_iterator& other)\r
-// : input_(other.input_) {}\r
-//\r
-// private:\r
-// friend class boost::iterator_core_access;\r
-//\r
-// void increment() \r
-// {\r
-// if(input_ && !input_->try_pop_video_packet(pkt_))\r
-// input_ = nullptr;\r
-// }\r
-//\r
-// bool equal(input_video_iterator const& other) const\r
-// {\r
-// return input_ == other.input_;\r
-// }\r
-//\r
-// std::shared_ptr<AVPacket> const& dereference() const { return pkt_; }\r
-//};\r
\r
\r
}\r
{\r
std::shared_ptr<SwsContext> sws_context;\r
\r
- CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
+ //CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
\r
size_t key = width << 20 | height << 8 | pix_fmt;\r
\r
#include "../util.h"\r
\r
#include "../../ffmpeg_error.h"\r
-#include "../filter/filter.h"\r
+#include "../../tbb_avcodec.h"\r
\r
#include <common/memory/memcpy.h>\r
\r
#include <core/mixer/write_frame.h>\r
#include <core/producer/frame/image_transform.h>\r
#include <core/producer/frame/frame_factory.h>\r
+#include <core/producer/color/color_producer.h>\r
\r
-#include <tbb/parallel_for.h>\r
+#include <tbb/task_group.h>\r
\r
#include <boost/range/algorithm_ext.hpp>\r
\r
\r
struct video_decoder::implementation : boost::noncopyable\r
{\r
- input& input_;\r
const safe_ptr<core::frame_factory> frame_factory_;\r
- AVCodecContext& codec_context_;\r
- size_t frame_number_;\r
-\r
- std::shared_ptr<filter> filter_;\r
- int eof_count_;\r
-\r
- std::string filter_str_;\r
+ std::shared_ptr<AVCodecContext> codec_context_;\r
+ int index_;\r
+ core::video_mode::type mode_;\r
\r
+ std::queue<std::shared_ptr<AVPacket>> packet_buffer_;\r
public:\r
- explicit implementation(input& input, const safe_ptr<core::frame_factory>& frame_factory, const std::string& filter_str) \r
- : input_(input)\r
- , frame_factory_(frame_factory)\r
- , codec_context_(*input_.get_video_codec_context())\r
- , frame_number_(0)\r
- , filter_(filter_str.empty() ? nullptr : new filter(filter_str))\r
- , filter_str_(filter_str)\r
- , eof_count_(std::numeric_limits<int>::max())\r
+ explicit implementation(AVStream* stream, const safe_ptr<core::frame_factory>& frame_factory) \r
+ : frame_factory_(frame_factory)\r
+ , mode_(core::video_mode::invalid)\r
{\r
- }\r
+ if(!stream || !stream->codec)\r
+ return;\r
\r
- std::deque<std::pair<int, safe_ptr<core::write_frame>>> receive()\r
- {\r
- std::deque<std::pair<int, safe_ptr<core::write_frame>>> result;\r
+ auto codec = avcodec_find_decoder(stream->codec->codec_id); \r
+ if(!codec)\r
+ return;\r
+ \r
+ int errn = tbb_avcodec_open(stream->codec, codec);\r
+ if(errn < 0)\r
+ return;\r
+ \r
+ index_ = stream->index;\r
+ codec_context_.reset(stream->codec, tbb_avcodec_close);\r
+\r
+ // Some files give an invalid time_base numerator, try to fix it.\r
+ if(codec_context_ && codec_context_->time_base.num == 1)\r
+ codec_context_->time_base.num = static_cast<int>(std::pow(10.0, static_cast<int>(std::log10(static_cast<float>(codec_context_->time_base.den)))-1)); \r
+ }\r
\r
- std::shared_ptr<AVPacket> pkt;\r
- for(int n = 0; n < 32 && result.empty() && input_.try_pop_video_packet(pkt); ++n) \r
- boost::range::push_back(result, decode(pkt));\r
+ void push(const std::shared_ptr<AVPacket>& packet)\r
+ {\r
+ if(!codec_context_)\r
+ return;\r
\r
- return result;\r
+ if(packet && packet->stream_index != index_)\r
+ return;\r
+\r
+ packet_buffer_.push(packet);\r
}\r
\r
- std::deque<std::pair<int, safe_ptr<core::write_frame>>> decode(const std::shared_ptr<AVPacket>& video_packet)\r
- { \r
- std::deque<std::pair<int, safe_ptr<core::write_frame>>> result;\r
+ std::vector<safe_ptr<core::write_frame>> poll()\r
+ { \r
+ std::vector<safe_ptr<core::write_frame>> result;\r
\r
- if(!video_packet) // eof\r
+ if(!codec_context_)\r
+ result.push_back(core::create_color_frame(this, frame_factory_, L"#00000000"));\r
+ else if(!packet_buffer_.empty())\r
{\r
- eof_count_ = frame_number_ + (filter_ ? filter_->delay()+1 : 0);\r
- avcodec_flush_buffers(&codec_context_);\r
- return result;\r
- } \r
- \r
- frame_number_ = frame_number_ % eof_count_;\r
- \r
- const void* tag = this;\r
-\r
- if(filter_)\r
- { \r
- std::shared_ptr<AVFrame> frame;\r
+ auto packet = std::move(packet_buffer_.front());\r
+ packet_buffer_.pop();\r
\r
- tbb::parallel_invoke(\r
- [&]\r
+ if(!packet) // eof\r
+ { \r
+ if(codec_context_->codec->capabilities | CODEC_CAP_DELAY)\r
+ {\r
+ // FIXME: This might cause bad performance.\r
+ AVPacket pkt = {0};\r
+ auto frame = decode_frame(pkt);\r
+ if(frame)\r
+ result.push_back(make_write_frame(this, make_safe(frame), frame_factory_));\r
+ }\r
+\r
+ avcodec_flush_buffers(codec_context_.get());\r
+ }\r
+ else\r
{\r
- frame = decode_frame(video_packet);\r
- },\r
- [&]\r
- { \r
- boost::range::transform(filter_->poll(), std::back_inserter(result), [&](const safe_ptr<AVFrame>& frame)\r
+ auto frame = decode_frame(*packet);\r
+ if(frame)\r
{\r
- return std::make_pair(frame_number_, make_write_frame(tag, frame, frame_factory_));\r
- });\r
- \r
- if(!result.empty())\r
- ++frame_number_;\r
- }); \r
-\r
- if(frame)\r
- filter_->push(make_safe(frame));\r
- }\r
- else\r
- {\r
- auto frame = decode_frame(video_packet);\r
- \r
- if(frame)\r
- result.push_back(std::make_pair(frame_number_++, make_write_frame(tag, make_safe(frame), frame_factory_)));\r
+ auto frame2 = make_write_frame(this, make_safe(frame), frame_factory_); \r
+ mode_ = frame2->get_type();\r
+ result.push_back(std::move(frame2));\r
+ }\r
+ }\r
}\r
\r
return result;\r
}\r
- \r
- std::shared_ptr<AVFrame> decode_frame(const std::shared_ptr<AVPacket>& video_packet)\r
+\r
+ std::shared_ptr<AVFrame> decode_frame(AVPacket& packet)\r
{\r
std::shared_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
\r
int frame_finished = 0;\r
- const int errn = avcodec_decode_video2(&codec_context_, decoded_frame.get(), &frame_finished, video_packet.get());\r
+ const int errn = avcodec_decode_video2(codec_context_.get(), decoded_frame.get(), &frame_finished, &packet);\r
\r
if(errn < 0)\r
{\r
boost::errinfo_errno(AVUNERROR(errn)));\r
}\r
\r
- if(frame_finished == 0) \r
- decoded_frame = nullptr;\r
+ if(frame_finished == 0) \r
+ decoded_frame.reset();\r
\r
return decoded_frame;\r
}\r
-};\r
\r
-video_decoder::video_decoder(input& input, const safe_ptr<core::frame_factory>& frame_factory, const std::string& filter_str) : impl_(new implementation(input, frame_factory, filter_str)){}\r
-std::deque<std::pair<int, safe_ptr<core::write_frame>>> video_decoder::receive(){return impl_->receive();}\r
+ bool ready() const\r
+ {\r
+ return !codec_context_ || !packet_buffer_.empty();\r
+ }\r
+ \r
+ core::video_mode::type mode()\r
+ {\r
+ if(!codec_context_)\r
+ return frame_factory_->get_video_format_desc().mode;\r
+\r
+ return mode_;\r
+ }\r
+\r
+ double fps() const\r
+ {\r
+ return static_cast<double>(codec_context_->time_base.den) / static_cast<double>(codec_context_->time_base.num);\r
+ }\r
+};\r
\r
+video_decoder::video_decoder(AVStream* stream, const safe_ptr<core::frame_factory>& frame_factory) : impl_(new implementation(stream, frame_factory)){}\r
+void video_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}\r
+std::vector<safe_ptr<core::write_frame>> video_decoder::poll(){return impl_->poll();}\r
+bool video_decoder::ready() const{return impl_->ready();}\r
+core::video_mode::type video_decoder::mode(){return impl_->mode();}\r
+double video_decoder::fps() const{return impl_->fps();}\r
}
\ No newline at end of file
\r
#include <common/memory/safe_ptr.h>\r
\r
-#include "../input.h"\r
+#include <core/video_format.h>\r
+\r
+struct AVStream;\r
+\r
namespace caspar {\r
- \r
+\r
namespace core {\r
struct frame_factory;\r
class write_frame;\r
class video_decoder : boost::noncopyable\r
{\r
public:\r
- explicit video_decoder(input& input, const safe_ptr<core::frame_factory>& frame_factory, const std::string& filter_str);\r
- std::deque<std::pair<int, safe_ptr<core::write_frame>>> receive(); \r
+ explicit video_decoder(AVStream* stream, const safe_ptr<core::frame_factory>& frame_factory);\r
+ \r
+ void push(const std::shared_ptr<AVPacket>& packet);\r
+ bool ready() const;\r
+ std::vector<safe_ptr<core::write_frame>> poll();\r
+\r
+ core::video_mode::type mode();\r
\r
+ double fps() const;\r
private:\r
struct implementation;\r
safe_ptr<implementation> impl_;\r
<?xml version="1.0" encoding="utf-8"?>\r
<configuration>\r
<paths>\r
- <media-path>C:\\Casparcg\\_media\\</media-path>\r
- <log-path>C:\\Casparcg\\_log\\</log-path>\r
- <data-path>C:\\Casparcg\\_data\\</data-path>\r
- <template-path>C:\\Casparcg\\</template-path>\r
+ <media-path>L:\\casparcg\\_media\\</media-path>\r
+ <log-path>L:\\casparcg\\_log\\</log-path>\r
+ <data-path>L:\\casparcg\\_data\\</data-path>\r
+ <template-path>L:\\casparcg\\_templates\\</template-path>\r
<template-host>cg.fth.18</template-host>\r
</paths>\r
<diagnostics>\r
<graphs>true</graphs>\r
</diagnostics>\r
+ <ffmpeg>\r
+ <auto-mode>false</auto-mode>\r
+ </ffmpeg>\r
<channels>\r
<channel>\r
- <video-mode>PAL</video-mode>\r
+ <video-mode>1080i5000</video-mode>\r
<consumers>\r
- <screen>\r
- <stretch>uniform</stretch>\r
- </screen>\r
+ <decklink>\r
+ <device>1</device>\r
+ <embedded-audio>true</embedded-audio>\r
+ </decklink>\r
+ </consumers>\r
+ </channel>\r
+ <channel>\r
+ <video-mode>1080i5000</video-mode>\r
+ <consumers>\r
+ <decklink>\r
+ <device>2</device>\r
+ <embedded-audio>true</embedded-audio>\r
+ </decklink>\r
</consumers>\r
</channel>\r
</channels>\r
</configuration>\r
\r
<!--\r
+<auto-mode-convert>true [true|false]]</auto-mode-convert>\r
<channel>\r
<video-mode> PAL [PAL|NTSC|1080i5000|576p2500720p2500|720p5000|720p5994|720p6000|1080p2398|1080p2400|1080i5000|1080i5994|1080i6000|1080p2500|1080p2997|1080p3000|1080p5000] </video-mode>\r
<consumers>\r