\r
namespace caspar { namespace core {\r
\r
-class key_read_frame_adapter : public core::read_frame\r
+class key_read_frame_muxer : public core::read_frame\r
{\r
ogl_device& ogl_;\r
safe_ptr<read_frame> fill_;\r
std::shared_ptr<host_buffer> key_;\r
tbb::mutex mutex_;\r
public:\r
- key_read_frame_adapter(ogl_device& ogl, const safe_ptr<read_frame>& fill)\r
+ key_read_frame_muxer(ogl_device& ogl, const safe_ptr<read_frame>& fill)\r
: ogl_(ogl)\r
, fill_(fill)\r
{\r
timer_.tick(1.0/channel_.get_format_desc().fps);\r
\r
auto fill = frame;\r
- auto key = make_safe<key_read_frame_adapter>(channel_.ogl(), frame);\r
+ auto key = make_safe<key_read_frame_muxer>(channel_.ogl(), frame);\r
\r
auto it = consumers_.begin();\r
while(it != consumers_.end())\r
<ItemGroup>\r
<ClInclude Include="mixer\gpu\shader.h" />\r
<ClInclude Include="mixer\image\blending_glsl.h" />\r
+ <ClInclude Include="producer\frame_muxer.h" />\r
<ClInclude Include="video_channel.h" />\r
<ClInclude Include="video_channel_context.h" />\r
<ClInclude Include="consumer\output.h" />\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../../../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">../../../StdAfx.h</PrecompiledHeaderFile>\r
</ClCompile>\r
+ <ClCompile Include="producer\frame_muxer.cpp">\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
+ </ClCompile>\r
<ClCompile Include="video_channel.cpp" />\r
<ClCompile Include="consumer\frame_consumer.cpp">\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
<ClInclude Include="mixer\gpu\shader.h">\r
<Filter>source\mixer\gpu</Filter>\r
</ClInclude>\r
+ <ClInclude Include="producer\frame_muxer.h">\r
+ <Filter>source\producer</Filter>\r
+ </ClInclude>\r
</ItemGroup>\r
<ItemGroup>\r
<ClCompile Include="producer\transition\transition_producer.cpp">\r
<ClCompile Include="mixer\gpu\shader.cpp">\r
<Filter>source\mixer\gpu</Filter>\r
</ClCompile>\r
+ <ClCompile Include="producer\frame_muxer.cpp">\r
+ <Filter>source\producer</Filter>\r
+ </ClCompile>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
const auto prev_gain = static_cast<int>(prev.get_gain()*BASE);\r
\r
const int n_samples = audio_data_.back().size();\r
+ \r
+ CASPAR_VERIFY(audio_data.empty() || static_cast<size_t>(audio_data.size()) == audio_data_.size());\r
+\r
+ if(static_cast<size_t>(audio_data.size()) > audio_data_.size())\r
+ return;\r
\r
tbb::parallel_for\r
(\r
--- /dev/null
+#include "../StdAfx.h"\r
+\r
+#include "frame_muxer.h"\r
+\r
+#include "frame/basic_frame.h"\r
+#include "../mixer/write_frame.h"\r
+\r
+namespace caspar { namespace core {\r
+ \r
+struct display_mode\r
+{\r
+ enum type\r
+ {\r
+ simple,\r
+ duplicate,\r
+ half,\r
+ interlace,\r
+ deinterlace,\r
+ deinterlace_half,\r
+ count,\r
+ invalid\r
+ };\r
+\r
+ static std::wstring print(display_mode::type value)\r
+ {\r
+ switch(value)\r
+ {\r
+ case simple:\r
+ return L"simple";\r
+ case duplicate:\r
+ return L"duplicate";\r
+ case half:\r
+ return L"half";\r
+ case interlace:\r
+ return L"interlace";\r
+ case deinterlace:\r
+ return L"deinterlace";\r
+ case deinterlace_half:\r
+ return L"deinterlace_half";\r
+ default:\r
+ return L"invalid";\r
+ }\r
+ }\r
+};\r
+\r
+display_mode::type get_display_mode(const core::video_mode::type in_mode, double in_fps, const core::video_mode::type out_mode, double out_fps)\r
+{ \r
+ if(in_mode == core::video_mode::invalid || out_mode == core::video_mode::invalid)\r
+ return display_mode::invalid;\r
+\r
+ static const auto epsilon = 2.0;\r
+\r
+ if(std::abs(in_fps - out_fps) < epsilon)\r
+ {\r
+ if(in_mode != core::video_mode::progressive && out_mode == core::video_mode::progressive)\r
+ return display_mode::deinterlace_half;\r
+ //else if(in_mode == core::video_mode::progressive && out_mode != core::video_mode::progressive)\r
+ // simple(); // interlace_duplicate();\r
+ else\r
+ return display_mode::simple;\r
+ }\r
+ else if(std::abs(in_fps/2.0 - out_fps) < epsilon)\r
+ {\r
+ if(in_mode != core::video_mode::progressive)\r
+ return display_mode::invalid;\r
+\r
+ if(out_mode != core::video_mode::progressive)\r
+ return display_mode::interlace;\r
+ else\r
+ return display_mode::half;\r
+ }\r
+ else if(std::abs(in_fps - out_fps/2.0) < epsilon)\r
+ {\r
+ if(out_mode != core::video_mode::progressive)\r
+ return display_mode::invalid;\r
+\r
+ if(in_mode != core::video_mode::progressive)\r
+ return display_mode::deinterlace;\r
+ else\r
+ return display_mode::duplicate;\r
+ }\r
+\r
+ return display_mode::invalid;\r
+}\r
+\r
+struct frame_muxer::implementation\r
+{ \r
+ std::queue<safe_ptr<write_frame>> video_frames_;\r
+ std::queue<std::vector<int16_t>> audio_chunks_;\r
+ std::queue<safe_ptr<basic_frame>> frame_buffer_;\r
+ display_mode::type display_mode_;\r
+ const double in_fps_;\r
+ const double out_fps_;\r
+ const video_mode::type out_mode_;\r
+\r
+ implementation(double in_fps, const core::video_mode::type out_mode, double out_fps)\r
+ : display_mode_(display_mode::invalid)\r
+ , in_fps_(in_fps)\r
+ , out_fps_(out_fps)\r
+ , out_mode_(out_mode)\r
+ {\r
+ }\r
+\r
+ void push(const safe_ptr<write_frame>& video_frame)\r
+ {\r
+ video_frames_.push(video_frame);\r
+ process();\r
+ }\r
+\r
+ void push(const std::vector<int16_t>& audio_chunk)\r
+ {\r
+ audio_chunks_.push(audio_chunk);\r
+ process();\r
+ }\r
+\r
+ safe_ptr<basic_frame> pop()\r
+ { \r
+ auto frame = frame_buffer_.front();\r
+ frame_buffer_.pop();\r
+ return frame;\r
+ }\r
+\r
+ size_t size() const\r
+ {\r
+ return frame_buffer_.size();\r
+ }\r
+\r
+ void process()\r
+ {\r
+ if(video_frames_.empty() || audio_chunks_.empty())\r
+ return;\r
+\r
+ if(display_mode_ == display_mode::invalid)\r
+ display_mode_ = get_display_mode(video_frames_.front()->get_type(), in_fps_, out_mode_, out_fps_);\r
+\r
+ switch(display_mode_)\r
+ {\r
+ case display_mode::simple:\r
+ return simple();\r
+ case display_mode::duplicate:\r
+ return duplicate();\r
+ case display_mode::half:\r
+ return half();\r
+ case display_mode::interlace:\r
+ return interlace();\r
+ case display_mode::deinterlace:\r
+ return deinterlace();\r
+ case display_mode::deinterlace_half:\r
+ return deinterlace_half();\r
+ default:\r
+ BOOST_THROW_EXCEPTION(invalid_operation());\r
+ }\r
+ }\r
+\r
+ void simple()\r
+ {\r
+ if(video_frames_.empty() || audio_chunks_.empty())\r
+ return;\r
+\r
+ auto frame1 = video_frames_.front();\r
+ video_frames_.pop();\r
+\r
+ frame1->audio_data() = audio_chunks_.front();\r
+ audio_chunks_.pop();\r
+\r
+ frame_buffer_.push(frame1);\r
+ }\r
+\r
+ void duplicate()\r
+ { \r
+ if(video_frames_.empty() || audio_chunks_.size() < 2)\r
+ return;\r
+\r
+ auto frame = video_frames_.front();\r
+ video_frames_.pop();\r
+\r
+ auto frame1 = make_safe<core::write_frame>(*frame); // make a copy\r
+ frame1->audio_data() = audio_chunks_.front();\r
+ audio_chunks_.pop();\r
+\r
+ auto frame2 = frame;\r
+ frame2->audio_data() = audio_chunks_.front();\r
+ audio_chunks_.pop();\r
+\r
+ frame_buffer_.push(frame1);\r
+ frame_buffer_.push(frame2);\r
+ }\r
+\r
+ void half()\r
+ { \r
+ if(video_frames_.size() < 2 || audio_chunks_.empty())\r
+ return;\r
+ \r
+ auto frame1 = video_frames_.front();\r
+ video_frames_.pop();\r
+ frame1->audio_data() = audio_chunks_.front();\r
+ audio_chunks_.pop();\r
+ \r
+ video_frames_.pop(); // Throw away\r
+\r
+ frame_buffer_.push(frame1);\r
+ }\r
+ \r
+ void interlace()\r
+ { \r
+ if(video_frames_.size() < 2 || audio_chunks_.empty())\r
+ return;\r
+ \r
+ auto frame1 = video_frames_.front();\r
+ video_frames_.pop();\r
+\r
+ frame1->audio_data() = audio_chunks_.front();\r
+ audio_chunks_.pop();\r
+ \r
+ auto frame2 = video_frames_.front();\r
+ video_frames_.pop();\r
+\r
+ frame_buffer_.push(core::basic_frame::interlace(frame1, frame2, out_mode_)); \r
+ }\r
+ \r
+ void deinterlace()\r
+ {\r
+ BOOST_THROW_EXCEPTION(not_implemented() << msg_info("deinterlace"));\r
+ }\r
+\r
+ void deinterlace_half()\r
+ {\r
+ BOOST_THROW_EXCEPTION(not_implemented() << msg_info("deinterlace_half"));\r
+ }\r
+};\r
+\r
+frame_muxer::frame_muxer(double in_fps, const core::video_mode::type out_mode, double out_fps)\r
+ : impl_(implementation(in_fps, out_mode, out_fps)){}\r
+void frame_muxer::push(const safe_ptr<write_frame>& video_frame){impl_->push(video_frame);}\r
+void frame_muxer::push(const std::vector<int16_t>& audio_chunk){return impl_->push(audio_chunk);}\r
+safe_ptr<basic_frame> frame_muxer::pop(){return impl_->pop();}\r
+size_t frame_muxer::size() const {return impl_->size();}\r
+bool frame_muxer::empty() const {return impl_->size() == 0;}\r
+size_t frame_muxer::video_frames() const{return impl_->video_frames_.size();}\r
+size_t frame_muxer::audio_chunks() const{return impl_->audio_chunks_.size();}\r
+\r
+}}
\ No newline at end of file
--- /dev/null
+#pragma once\r
+\r
+#include "../video_format.h"\r
+\r
+#include <common/memory/safe_ptr.h>\r
+\r
+#include <vector>\r
+\r
+namespace caspar { namespace core {\r
+\r
+class write_frame;\r
+class basic_frame;\r
+\r
+class frame_muxer\r
+{\r
+public:\r
+ frame_muxer(double in_fps, const video_mode::type out_mode, double out_fps);\r
+\r
+ void push(const safe_ptr<write_frame>& video_frame);\r
+ void push(const std::vector<int16_t>& audio_chunk);\r
+ \r
+ size_t video_frames() const;\r
+ size_t audio_chunks() const;\r
+\r
+ size_t size() const;\r
+ bool empty() const;\r
+\r
+ safe_ptr<basic_frame> pop();\r
+private:\r
+ struct implementation;\r
+ safe_ptr<implementation> impl_;\r
+};\r
+\r
+}}
\ No newline at end of file
, buffer_depth(CONSUMER_BUFFER_DEPTH){}\r
};\r
\r
-class decklink_frame_adapter : public IDeckLinkVideoFrame\r
+class decklink_frame_muxer : public IDeckLinkVideoFrame\r
{\r
const safe_ptr<core::read_frame> frame_;\r
const core::video_format_desc format_desc_;\r
public:\r
- decklink_frame_adapter(const safe_ptr<core::read_frame>& frame, const core::video_format_desc& format_desc)\r
+ decklink_frame_muxer(const safe_ptr<core::read_frame>& frame, const core::video_format_desc& format_desc)\r
: frame_(frame)\r
, format_desc_(format_desc){}\r
\r
\r
void schedule_next_video(const safe_ptr<core::read_frame>& frame)\r
{\r
- frame_container_.push_back(std::make_shared<decklink_frame_adapter>(frame, format_desc_));\r
+ frame_container_.push_back(std::make_shared<decklink_frame_muxer>(frame, format_desc_));\r
if(FAILED(output_->ScheduleVideoFrame(frame_container_.back().get(), (frames_scheduled_++) * format_desc_.duration, format_desc_.duration, format_desc_.time_scale)))\r
CASPAR_LOG(error) << print() << L" Failed to schedule video.";\r
\r
#include <common/exception/exceptions.h>\r
#include <common/memory/memclr.h>\r
\r
-#include <core/producer/frame/frame_factory.h>\r
#include <core/mixer/write_frame.h>\r
#include <core/producer/frame/audio_transform.h>\r
+#include <core/producer/frame/frame_factory.h>\r
+#include <core/producer/frame_muxer.h>\r
\r
#include <tbb/concurrent_queue.h>\r
#include <tbb/atomic.h>\r
#include <functional>\r
\r
namespace caspar { \r
-\r
-class frame_filter\r
-{\r
- std::unique_ptr<filter> filter_;\r
- safe_ptr<core::frame_factory> frame_factory_;\r
-\r
-public:\r
- frame_filter(const std::string& filter_str, const safe_ptr<core::frame_factory>& frame_factory) \r
- : filter_(filter_str.empty() ? nullptr : new filter(filter_str))\r
- , frame_factory_(frame_factory)\r
- {\r
- }\r
-\r
- std::vector<safe_ptr<core::basic_frame>> execute(const safe_ptr<core::write_frame>& input_frame)\r
- { \r
- std::vector<safe_ptr<core::basic_frame>> result;\r
-\r
- if(!filter_)\r
- {\r
- input_frame->commit();\r
- result.push_back(input_frame);\r
- }\r
- else\r
- {\r
- auto desc = input_frame->get_pixel_format_desc();\r
-\r
- auto av_frame = as_av_frame(input_frame);\r
- \r
- filter_->push(av_frame); \r
- auto buffer = filter_->poll(); \r
- \r
- if(buffer.size() == 2)\r
- {\r
- auto frame1 = make_write_frame(this, buffer[0], frame_factory_);\r
- auto frame2 = make_write_frame(this, buffer[1], frame_factory_);\r
- frame1->audio_data() = std::move(input_frame->audio_data());\r
- \r
- if(frame_factory_->get_video_format_desc().mode == core::video_mode::progressive)\r
- {\r
- frame2->audio_data().insert(frame2->audio_data().begin(), frame1->audio_data().begin() + frame1->audio_data().size()/2, frame1->audio_data().end());\r
- frame1->audio_data().erase(frame1->audio_data().begin() + frame1->audio_data().size()/2, frame1->audio_data().end());\r
- result.push_back(frame1);\r
- result.push_back(frame2);\r
- }\r
- else\r
- {\r
- frame2->get_audio_transform().set_has_audio(false);\r
- result.push_back(core::basic_frame::interlace(frame1, frame2, frame_factory_->get_video_format_desc().mode));\r
- }\r
- }\r
- else if(buffer.size() > 0)\r
- {\r
- auto frame1 = make_write_frame(this, buffer[0], frame_factory_);\r
- frame1->audio_data() = std::move(input_frame->audio_data());\r
- result.push_back(frame1);\r
- }\r
-\r
- }\r
\r
- return result;\r
- }\r
-};\r
- \r
class decklink_producer : public IDeckLinkInputCallback\r
{ \r
CComPtr<IDeckLink> decklink_;\r
std::shared_ptr<diagnostics::graph> graph_;\r
boost::timer tick_timer_;\r
boost::timer frame_timer_;\r
- \r
- std::vector<short> audio_data_;\r
\r
+ std::vector<int16_t> audio_samples_;\r
+ \r
safe_ptr<core::frame_factory> frame_factory_;\r
\r
tbb::concurrent_bounded_queue<safe_ptr<core::basic_frame>> frame_buffer_;\r
safe_ptr<core::basic_frame> tail_;\r
\r
std::exception_ptr exception_;\r
- frame_filter filter_;\r
+ std::unique_ptr<filter> filter_;\r
+ \r
+ core::frame_muxer muxer_;\r
\r
public:\r
- decklink_producer(const core::video_format_desc& format_desc, size_t device_index, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter_str)\r
+ decklink_producer(const core::video_format_desc& format_desc, size_t device_index, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter)\r
: decklink_(get_device(device_index))\r
, input_(decklink_)\r
, model_name_(get_model_name(decklink_))\r
, device_index_(device_index)\r
, frame_factory_(frame_factory)\r
, tail_(core::basic_frame::empty())\r
- , filter_(narrow(filter_str), frame_factory_)\r
+ , filter_(filter.empty() ? nullptr : new caspar::filter(filter))\r
+ , muxer_(double_rate(filter) ? format_desc.fps * 2.0 : format_desc.fps, frame_factory->get_video_format_desc().mode, frame_factory->get_video_format_desc().fps)\r
{\r
frame_buffer_.set_capacity(2);\r
\r
});\r
frame->set_type(format_desc_.mode);\r
\r
+ std::vector<safe_ptr<core::write_frame>> frames;\r
+\r
+ if(filter_)\r
+ {\r
+ filter_->push(as_av_frame(frame));\r
+ auto av_frames = filter_->poll();\r
+ BOOST_FOREACH(auto& av_frame, av_frames)\r
+ frames.push_back(make_write_frame(this, av_frame, frame_factory_));\r
+ }\r
+ else\r
+ {\r
+ frame->commit();\r
+ frames.push_back(frame);\r
+ }\r
+\r
+ BOOST_FOREACH(auto frame, frames)\r
+ muxer_.push(frame);\r
+ \r
// It is assumed that audio is always equal or ahead of video.\r
if(audio && SUCCEEDED(audio->GetBytes(&bytes)))\r
{\r
- const size_t audio_samples = static_cast<size_t>(48000.0 / format_desc_.fps);\r
- const size_t audio_nchannels = 2;\r
-\r
auto sample_frame_count = audio->GetSampleFrameCount();\r
auto audio_data = reinterpret_cast<short*>(bytes);\r
- audio_data_.insert(audio_data_.end(), audio_data, audio_data + sample_frame_count*2);\r
+ audio_samples_.insert(audio_samples_.end(), audio_data, audio_data + sample_frame_count*2);\r
\r
- if(audio_data_.size() > audio_samples*audio_nchannels)\r
+ if(audio_samples_.size() > frame_factory_->get_video_format_desc().audio_samples_per_frame)\r
{\r
- frame->audio_data() = std::vector<short>(audio_data_.begin(), audio_data_.begin() + audio_samples*audio_nchannels);\r
- audio_data_.erase(audio_data_.begin(), audio_data_.begin() + audio_samples*audio_nchannels);\r
+ const auto begin = audio_samples_.begin();\r
+ const auto end = begin + frame_factory_->get_video_format_desc().audio_samples_per_frame;\r
+ muxer_.push(std::vector<int16_t>(begin, end));\r
+ audio_samples_.erase(begin, end);\r
}\r
}\r
- \r
- auto frames = filter_.execute(frame); \r
- \r
- for(size_t n = 0; n < frames.size(); ++n)\r
+ else\r
+ muxer_.push(std::vector<int16_t>(frame_factory_->get_video_format_desc().audio_samples_per_frame, 0));\r
+ \r
+ while(!muxer_.empty())\r
{\r
- if(!frame_buffer_.try_push(frames[n]))\r
+ if(!frame_buffer_.try_push(muxer_.pop()))\r
graph_->add_tag("dropped-frame");\r
}\r
\r
std::vector<int16_t, tbb::cache_aligned_allocator<int16_t>> audio_samples_; // avcodec_decode_audio3 needs 4 byte alignment\r
std::queue<std::shared_ptr<AVPacket>> packets_;\r
public:\r
- explicit implementation(AVStream* stream, const core::video_format_desc& format_desc) \r
+ explicit implementation(const std::shared_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc) \r
: format_desc_(format_desc) \r
{\r
- if(!stream || !stream->codec)\r
- return;\r
+ AVCodec* dec;\r
+ index_ = av_find_best_stream(context.get(), AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);\r
\r
- auto codec = avcodec_find_decoder(stream->codec->codec_id); \r
- if(!codec)\r
- return;\r
- \r
- int errn = avcodec_open(stream->codec, codec);\r
+ int errn = avcodec_open(context->streams[index_]->codec, dec);\r
if(errn < 0)\r
return;\r
\r
- index_ = stream->index;\r
- codec_context_.reset(stream->codec, avcodec_close);\r
+ codec_context_.reset(context->streams[index_]->codec, avcodec_close);\r
\r
if(codec_context_ &&\r
(codec_context_->sample_rate != static_cast<int>(format_desc_.audio_sample_rate) || \r
}\r
};\r
\r
-audio_decoder::audio_decoder(AVStream* stream, const core::video_format_desc& format_desc) : impl_(new implementation(stream, format_desc)){}\r
+audio_decoder::audio_decoder(const std::shared_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc) : impl_(new implementation(context, format_desc)){}\r
void audio_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}\r
bool audio_decoder::ready() const{return impl_->ready();}\r
std::vector<std::vector<int16_t>> audio_decoder::poll(){return impl_->poll();}\r
class audio_decoder : boost::noncopyable\r
{\r
public:\r
- explicit audio_decoder(AVStream* stream, const core::video_format_desc& format_desc);\r
+ explicit audio_decoder(const std::shared_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc);\r
\r
void push(const std::shared_ptr<AVPacket>& packet);\r
bool ready() const;\r
#include <common/utility/timer.h>\r
#include <common/diagnostics/graph.h>\r
\r
-#include <core/producer/frame/basic_frame.h>\r
#include <core/mixer/write_frame.h>\r
-#include <core/producer/frame/audio_transform.h>\r
#include <core/video_format.h>\r
+#include <core/producer/frame/audio_transform.h>\r
+#include <core/producer/frame/basic_frame.h>\r
#include <core/producer/color/color_producer.h>\r
+#include <core/producer/frame_muxer.h>\r
\r
#include <common/env.h>\r
\r
#include <vector>\r
\r
namespace caspar {\r
-\r
-struct display_mode\r
-{\r
- enum type\r
- {\r
- simple,\r
- duplicate,\r
- half,\r
- interlace,\r
- deinterlace,\r
- deinterlace_half,\r
- count,\r
- invalid\r
- };\r
-\r
- static std::wstring print(display_mode::type value)\r
- {\r
- switch(value)\r
- {\r
- case simple:\r
- return L"simple";\r
- case duplicate:\r
- return L"duplicate";\r
- case half:\r
- return L"half";\r
- case interlace:\r
- return L"interlace";\r
- case deinterlace:\r
- return L"deinterlace";\r
- case deinterlace_half:\r
- return L"deinterlace_half";\r
- default:\r
- return L"invalid";\r
- }\r
- }\r
-};\r
-\r
-display_mode::type get_display_mode(const core::video_mode::type in_mode, double in_fps, const core::video_mode::type out_mode, double out_fps)\r
-{ \r
- if(in_mode == core::video_mode::invalid || out_mode == core::video_mode::invalid)\r
- return display_mode::invalid;\r
-\r
- static const auto epsilon = 2.0;\r
-\r
- if(std::abs(in_fps - out_fps) < epsilon)\r
- {\r
- if(in_mode != core::video_mode::progressive && out_mode == core::video_mode::progressive)\r
- return display_mode::deinterlace_half;\r
- //else if(in_mode == core::video_mode::progressive && out_mode != core::video_mode::progressive)\r
- // simple(); // interlace_duplicate();\r
- else\r
- return display_mode::simple;\r
- }\r
- else if(std::abs(in_fps/2.0 - out_fps) < epsilon)\r
- {\r
- if(in_mode != core::video_mode::progressive)\r
- return display_mode::invalid;\r
-\r
- if(out_mode != core::video_mode::progressive)\r
- return display_mode::interlace;\r
- else\r
- return display_mode::half;\r
- }\r
- else if(std::abs(in_fps - out_fps/2.0) < epsilon)\r
- {\r
- if(out_mode != core::video_mode::progressive)\r
- return display_mode::invalid;\r
-\r
- if(in_mode != core::video_mode::progressive)\r
- return display_mode::deinterlace;\r
- else\r
- return display_mode::duplicate;\r
- }\r
-\r
- return display_mode::invalid;\r
-}\r
- \r
+ \r
struct ffmpeg_producer : public core::frame_producer\r
{\r
const std::wstring filename_;\r
video_decoder video_decoder_;\r
audio_decoder audio_decoder_;\r
\r
- std::queue<safe_ptr<core::basic_frame>> frame_buffer_;\r
std::queue<safe_ptr<core::basic_frame>> output_buffer_;\r
\r
- const bool auto_convert_;\r
- display_mode::type display_mode_;\r
-\r
- std::deque<safe_ptr<core::write_frame>> video_frames_;\r
- std::deque<std::vector<int16_t>> audio_chunks_;\r
-\r
+ core::frame_muxer muxer_;\r
+ \r
tbb::task_group tasks_;\r
\r
public:\r
- explicit ffmpeg_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filename, const std::wstring& filter_str, bool loop, int start, int length) \r
+ explicit ffmpeg_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filename, const std::wstring& filter, bool loop, int start, int length) \r
: filename_(filename)\r
, graph_(diagnostics::create_graph(narrow(print())))\r
, frame_factory_(frame_factory) \r
, format_desc_(frame_factory->get_video_format_desc())\r
, input_(safe_ptr<diagnostics::graph>(graph_), filename_, loop, start, length)\r
- , video_decoder_(input_.stream(AVMEDIA_TYPE_VIDEO), frame_factory)\r
- , audio_decoder_(input_.stream(AVMEDIA_TYPE_AUDIO), frame_factory->get_video_format_desc())\r
- , auto_convert_(env::properties().get("configuration.ffmpeg.auto-mode", false))\r
- , display_mode_(display_mode::invalid)\r
+ , video_decoder_(input_.context(), frame_factory, filter)\r
+ , audio_decoder_(input_.context(), frame_factory->get_video_format_desc())\r
+ , muxer_(video_decoder_.fps(), format_desc_.mode, format_desc_.fps)\r
+ //, adapt_(env::properties().get("configuration.ffmpeg.auto-mode", false))\r
{\r
graph_->add_guide("frame-time", 0.5);\r
graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
graph_->set_color("underflow", diagnostics::color(0.6f, 0.3f, 0.9f)); \r
\r
- for(int n = 0; n < 128 && frame_buffer_.size() < 4; ++n)\r
+ for(int n = 0; n < 128 && muxer_.size() < 4; ++n)\r
decode_frame();\r
}\r
\r
{ \r
tasks_.wait();\r
\r
- output_buffer_ = std::move(frame_buffer_);\r
+ while(muxer_.size() > 0)\r
+ output_buffer_.push(muxer_.pop());\r
\r
tasks_.run([=]\r
{\r
frame_timer_.restart();\r
\r
- for(int n = 0; n < 64 && frame_buffer_.empty(); ++n)\r
+ for(int n = 0; n < 64 && muxer_.empty(); ++n)\r
decode_frame();\r
\r
graph_->update_value("frame-time", static_cast<float>(frame_timer_.elapsed()*format_desc_.fps*0.5));\r
\r
void decode_frame()\r
{\r
- for(int n = 0; n < 32 && ((video_frames_.size() < 2 && !video_decoder_.ready()) || (audio_chunks_.size() < 2 && !audio_decoder_.ready())); ++n) \r
+ for(int n = 0; n < 32 && ((muxer_.video_frames() < 2 && !video_decoder_.ready()) || (muxer_.audio_chunks() < 2 && !audio_decoder_.ready())); ++n) \r
{\r
std::shared_ptr<AVPacket> pkt;\r
if(input_.try_pop(pkt))\r
tbb::parallel_invoke(\r
[=]\r
{\r
- if(video_frames_.size() < 2)\r
- boost::range::push_back(video_frames_, video_decoder_.poll());\r
+ if(muxer_.video_frames() < 2)\r
+ {\r
+ BOOST_FOREACH(auto& video_frame, video_decoder_.poll())\r
+ muxer_.push(video_frame);\r
+ }\r
},\r
[=]\r
{\r
- if(audio_chunks_.size() < 2)\r
- boost::range::push_back(audio_chunks_, audio_decoder_.poll());\r
+ if(muxer_.audio_chunks() < 2)\r
+ {\r
+ BOOST_FOREACH(auto& audio_chunk, audio_decoder_.poll())\r
+ muxer_.push(audio_chunk);\r
+ }\r
});\r
-\r
- if(video_frames_.empty() || audio_chunks_.empty())\r
- return;\r
-\r
- if(auto_convert_)\r
- auto_convert();\r
- else\r
- simple();\r
- }\r
-\r
- void auto_convert()\r
- {\r
- auto current_display_mode = get_display_mode(video_decoder_.mode(), video_decoder_.fps(), format_desc_.mode, format_desc_.fps); \r
- if(current_display_mode != display_mode_)\r
- {\r
- display_mode_ = current_display_mode;\r
- CASPAR_LOG(info) << print() << " display_mode: " << display_mode::print(display_mode_) << \r
- L" in: " << core::video_mode::print(video_decoder_.mode()) << L" " << video_decoder_.fps() << " fps" <<\r
- L" out: " << core::video_mode::print(format_desc_.mode) << L" " << format_desc_.fps << " fps";\r
- }\r
-\r
- switch(display_mode_)\r
- {\r
- case display_mode::simple:\r
- return simple();\r
- case display_mode::duplicate:\r
- return duplicate();\r
- case display_mode::half:\r
- return half();\r
- case display_mode::interlace:\r
- return interlace();\r
- case display_mode::deinterlace:\r
- return deinterlace();\r
- case display_mode::deinterlace_half:\r
- return deinterlace_half();\r
- default:\r
- BOOST_THROW_EXCEPTION(invalid_operation());\r
- }\r
- }\r
-\r
- void simple()\r
- {\r
- CASPAR_ASSERT(!video_frames_.empty());\r
- CASPAR_ASSERT(!audio_chunks_.empty());\r
-\r
- auto frame1 = video_frames_.front();\r
- video_frames_.pop_front();\r
-\r
- frame1->audio_data() = audio_chunks_.front();\r
- audio_chunks_.pop_front();\r
-\r
- frame_buffer_.push(frame1);\r
- }\r
-\r
- void duplicate()\r
- { \r
- CASPAR_ASSERT(!video_frames_.empty());\r
- CASPAR_ASSERT(!audio_chunks_.empty());\r
-\r
- auto frame = video_frames_.front();\r
- video_frames_.pop_front();\r
-\r
- auto frame1 = make_safe<core::write_frame>(*frame); // make a copy\r
- frame1->audio_data() = audio_chunks_.front();\r
- audio_chunks_.pop_front();\r
-\r
- auto frame2 = frame;\r
- frame2->audio_data() = audio_chunks_.front();\r
- audio_chunks_.pop_front();\r
-\r
- frame_buffer_.push(frame1);\r
- frame_buffer_.push(frame2);\r
- }\r
-\r
- void half()\r
- { \r
- CASPAR_ASSERT(!video_frames_.empty());\r
- CASPAR_ASSERT(!audio_chunks_.empty());\r
-\r
- if(video_frames_.size() < 2 && !input_.eof())\r
- return;\r
- \r
- if(video_frames_.size() < 2)\r
- video_frames_.push_back(create_color_frame(this, frame_factory_, L"#00000000"));\r
-\r
- CASPAR_ASSERT(video_frames_.size() == 2);\r
- \r
- auto frame1 =video_frames_.front();\r
- video_frames_.pop_front();\r
- frame1->audio_data() = audio_chunks_.front();\r
- audio_chunks_.pop_front();\r
- \r
- video_frames_.pop_front(); // Throw away\r
-\r
- frame_buffer_.push(frame1);\r
- }\r
- \r
- void interlace()\r
- { \r
- CASPAR_ASSERT(!video_frames_.empty());\r
- CASPAR_ASSERT(!audio_chunks_.empty());\r
-\r
- if(video_frames_.size() < 2 && !input_.eof())\r
- return;\r
-\r
- if(video_frames_.size() < 2)\r
- video_frames_.push_back(create_color_frame(this, frame_factory_, L"#00000000"));\r
- \r
- CASPAR_ASSERT(video_frames_.size() == 2);\r
-\r
- auto frame1 = video_frames_.front();\r
- video_frames_.pop_front();\r
-\r
- frame1->audio_data() = audio_chunks_.front();\r
- audio_chunks_.pop_front();\r
- \r
- auto frame2 = video_frames_.front();\r
- video_frames_.pop_front();\r
-\r
- frame_buffer_.push(core::basic_frame::interlace(frame1, frame2, format_desc_.mode)); \r
- }\r
- \r
- void deinterlace()\r
- {\r
- BOOST_THROW_EXCEPTION(not_implemented() << msg_info("deinterlace"));\r
- }\r
-\r
- void deinterlace_half()\r
- {\r
- BOOST_THROW_EXCEPTION(not_implemented() << msg_info("deinterlace_half"));\r
}\r
\r
virtual std::wstring print() const\r
#include <libavutil/imgutils.h>\r
#include <libavfilter/avfilter.h>\r
#include <libavfilter/avcodec.h>\r
- #include <libavfilter/vsrc_buffer.h>\r
#include <libavfilter/avfiltergraph.h>\r
+ #include <libavfilter/vsink_buffer.h>\r
+ #include <libavfilter/vsrc_buffer.h>\r
}\r
\r
namespace caspar {\r
\r
struct filter::implementation\r
{\r
- std::string filters_;\r
- std::shared_ptr<AVFilterGraph> graph_;\r
- AVFilterContext* video_in_filter_;\r
- AVFilterContext* video_out_filter_;\r
- size_t delay_;\r
- size_t count_;\r
-\r
- implementation(const std::string& filters) \r
- : filters_(filters)\r
- , delay_(0)\r
- , count_(0)\r
+ std::string filters_;\r
+ std::shared_ptr<AVFilterGraph> graph_; \r
+ AVFilterContext* buffersink_ctx_;\r
+ AVFilterContext* buffersrc_ctx_;\r
+ \r
+ implementation(const std::wstring& filters) \r
+ : filters_(narrow(filters))\r
{\r
std::transform(filters_.begin(), filters_.end(), filters_.begin(), ::tolower);\r
}\r
if(!graph_)\r
{\r
graph_.reset(avfilter_graph_alloc(), [](AVFilterGraph* p){avfilter_graph_free(&p);});\r
- \r
+ \r
// Input\r
- std::stringstream buffer_ss;\r
- buffer_ss << frame->width << ":" << frame->height << ":" << frame->format << ":" << 0 << ":" << 0 << ":" << 0 << ":" << 0; // don't care about pts and aspect_ratio\r
- errn = avfilter_graph_create_filter(&video_in_filter_, avfilter_get_by_name("buffer"), "src", buffer_ss.str().c_str(), NULL, graph_.get());\r
- if(errn < 0 || !video_in_filter_)\r
+ std::stringstream args;\r
+ args << frame->width << ":" << frame->height << ":" << frame->format << ":" << 0 << ":" << 0 << ":" << 0 << ":" << 0; // don't care about pts and aspect_ratio\r
+ errn = avfilter_graph_create_filter(&buffersrc_ctx_, avfilter_get_by_name("buffer"), "src", args.str().c_str(), NULL, graph_.get());\r
+ if(errn < 0)\r
{\r
BOOST_THROW_EXCEPTION(caspar_exception() << msg_info(av_error_str(errn)) <<\r
boost::errinfo_api_function("avfilter_graph_create_filter") << boost::errinfo_errno(AVUNERROR(errn)));\r
}\r
\r
+ PixelFormat pix_fmts[] = { PIX_FMT_BGRA, PIX_FMT_NONE };\r
+\r
// Output\r
- errn = avfilter_graph_create_filter(&video_out_filter_, avfilter_get_by_name("nullsink"), "out", NULL, NULL, graph_.get());\r
- if(errn < 0 || !video_out_filter_)\r
+ errn = avfilter_graph_create_filter(&buffersink_ctx_, avfilter_get_by_name("buffersink"), "out", NULL, pix_fmts, graph_.get());\r
+ if(errn < 0)\r
{\r
BOOST_THROW_EXCEPTION(caspar_exception() << msg_info(av_error_str(errn)) <<\r
boost::errinfo_api_function("avfilter_graph_create_filter") << boost::errinfo_errno(AVUNERROR(errn)));\r
}\r
\r
- AVFilterInOut* outputs = reinterpret_cast<AVFilterInOut*>(av_malloc(sizeof(AVFilterInOut)));\r
- AVFilterInOut* inputs = reinterpret_cast<AVFilterInOut*>(av_malloc(sizeof(AVFilterInOut)));\r
+ AVFilterInOut* outputs = avfilter_inout_alloc();\r
+ AVFilterInOut* inputs = avfilter_inout_alloc();\r
\r
outputs->name = av_strdup("in");\r
- outputs->filter_ctx = video_in_filter_;\r
+ outputs->filter_ctx = buffersrc_ctx_;\r
outputs->pad_idx = 0;\r
outputs->next = NULL;\r
\r
inputs->name = av_strdup("out");\r
- inputs->filter_ctx = video_out_filter_;\r
+ inputs->filter_ctx = buffersink_ctx_;\r
inputs->pad_idx = 0;\r
inputs->next = NULL;\r
\r
- errn = avfilter_graph_parse(graph_.get(), filters_.c_str(), inputs, outputs, NULL);\r
+ errn = avfilter_graph_parse(graph_.get(), filters_.c_str(), &inputs, &outputs, NULL);\r
if(errn < 0)\r
{\r
BOOST_THROW_EXCEPTION(caspar_exception() << msg_info(av_error_str(errn)) <<\r
boost::errinfo_api_function("avfilter_graph_parse") << boost::errinfo_errno(AVUNERROR(errn)));\r
}\r
-\r
-// av_free(outputs);\r
-// av_free(inputs);\r
-\r
+ \r
errn = avfilter_graph_config(graph_.get(), NULL);\r
if(errn < 0)\r
{\r
}\r
}\r
\r
- errn = av_vsrc_buffer_add_frame(video_in_filter_, frame.get(), 0);\r
+ errn = av_vsrc_buffer_add_frame(buffersrc_ctx_, frame.get(), 0);\r
if(errn < 0)\r
{\r
BOOST_THROW_EXCEPTION(caspar_exception() << msg_info(av_error_str(errn)) <<\r
boost::errinfo_api_function("av_vsrc_buffer_add_frame") << boost::errinfo_errno(AVUNERROR(errn)));\r
}\r
- ++count_;\r
}\r
\r
std::vector<safe_ptr<AVFrame>> poll()\r
{\r
std::vector<safe_ptr<AVFrame>> result;\r
\r
- if(!graph_ || count_ == 0)\r
+ if(!graph_)\r
return result;\r
\r
- --count_;\r
-\r
- int errn = avfilter_poll_frame(video_out_filter_->inputs[0]);\r
- if(errn < 0)\r
+ while (avfilter_poll_frame(buffersink_ctx_->inputs[0])) \r
{\r
- BOOST_THROW_EXCEPTION(caspar_exception() << msg_info(av_error_str(errn)) <<\r
- boost::errinfo_api_function("avfilter_poll_frame") << boost::errinfo_errno(AVUNERROR(errn)));\r
- }\r
-\r
- if(errn == 0)\r
- ++delay_;\r
- \r
- std::generate_n(std::back_inserter(result), errn, [&]{return request_frame();});\r
+ AVFilterBufferRef *picref;\r
+ av_vsink_buffer_get_video_buffer_ref(buffersink_ctx_, &picref, 0);\r
+ if (picref) \r
+ { \r
+ safe_ptr<AVFrame> frame(avcodec_alloc_frame(), [=](AVFrame* p)\r
+ {\r
+ av_free(p);\r
+ avfilter_unref_buffer(picref);\r
+ });\r
+\r
+ avcodec_get_frame_defaults(frame.get()); \r
+\r
+ for(size_t n = 0; n < 4; ++n)\r
+ {\r
+ frame->data[n] = picref->data[n];\r
+ frame->linesize[n] = picref->linesize[n];\r
+ }\r
+ \r
+ frame->format = picref->format;\r
+ frame->width = picref->video->w;\r
+ frame->height = picref->video->h;\r
+ frame->interlaced_frame = picref->video->interlaced;\r
+ frame->top_field_first = picref->video->top_field_first;\r
+ frame->key_frame = picref->video->key_frame;\r
+\r
+ result.push_back(frame);\r
+ }\r
+ }\r
\r
return result;\r
}\r
- \r
- safe_ptr<AVFrame> request_frame()\r
- { \r
- auto link = video_out_filter_->inputs[0];\r
- \r
- int errn = avfilter_request_frame(link); \r
- if(errn < 0)\r
- {\r
- BOOST_THROW_EXCEPTION(caspar_exception() << msg_info(av_error_str(errn)) <<\r
- boost::errinfo_api_function("avfilter_request_frame") << boost::errinfo_errno(AVUNERROR(errn)));\r
- }\r
- \r
- auto cur_buf = link->cur_buf;\r
- auto pic = reinterpret_cast<AVPicture*>(link->cur_buf->buf);\r
- \r
- safe_ptr<AVFrame> frame(avcodec_alloc_frame(), [=](AVFrame* p)\r
- {\r
- av_free(p);\r
- avfilter_unref_buffer(cur_buf);\r
- });\r
-\r
- avcodec_get_frame_defaults(frame.get()); \r
-\r
- for(size_t n = 0; n < 4; ++n)\r
- {\r
- frame->data[n] = pic->data[n];\r
- frame->linesize[n] = pic->linesize[n];\r
- }\r
-\r
- frame->width = link->cur_buf->video->w;\r
- frame->height = link->cur_buf->video->h;\r
- frame->format = link->cur_buf->format;\r
- frame->interlaced_frame = link->cur_buf->video->interlaced;\r
- frame->top_field_first = link->cur_buf->video->top_field_first;\r
- frame->key_frame = link->cur_buf->video->key_frame;\r
-\r
- return frame;\r
- }\r
};\r
\r
-filter::filter(const std::string& filters) : impl_(new implementation(filters)){}\r
+filter::filter(const std::wstring& filters) : impl_(new implementation(filters)){}\r
void filter::push(const safe_ptr<AVFrame>& frame) {impl_->push(frame);}\r
std::vector<safe_ptr<AVFrame>> filter::poll() {return impl_->poll();}\r
-size_t filter::delay() const{return impl_->delay_;}\r
}
\ No newline at end of file
\r
#include <common/memory/safe_ptr.h>\r
\r
+#include <string>\r
#include <vector>\r
\r
struct AVFrame;\r
\r
namespace caspar {\r
\r
+static bool double_rate(const std::wstring& filters)\r
+{\r
+ if(filters.find(L"YADIF=1") != std::string::npos)\r
+ return true;\r
+ \r
+ if(filters.find(L"YADIF=3") != std::string::npos)\r
+ return true;\r
+\r
+ return false;\r
+}\r
+\r
class filter\r
{\r
public:\r
- filter(const std::string& filters);\r
+ filter(const std::wstring& filters);\r
\r
void push(const safe_ptr<AVFrame>& frame);\r
std::vector<safe_ptr<AVFrame>> poll();\r
- size_t delay() const;\r
\r
private:\r
struct implementation;\r
namespace caspar {\r
\r
static const size_t MAX_BUFFER_COUNT = 128;\r
-static const size_t MAX_BUFFER_SIZE = 64 * 1000000;\r
+static const size_t MAX_BUFFER_SIZE = 32 * 1000000;\r
\r
struct input::implementation : boost::noncopyable\r
{ \r
}\r
return result;\r
}\r
- \r
- AVStream* stream(AVMediaType media_type)\r
- {\r
- const auto streams = boost::iterator_range<AVStream**>(format_context_->streams, format_context_->streams + format_context_->nb_streams);\r
- const auto it = boost::find_if(streams, [&](AVStream* stream) \r
- {\r
- return stream && stream->codec->codec_type == media_type;\r
- });\r
- \r
- if(it == streams.end()) \r
- return nullptr;\r
-\r
- return *it;\r
- }\r
\r
private:\r
\r
\r
input::input(const safe_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, int start, int length) \r
: impl_(new implementation(graph, filename, loop, start)){}\r
-AVStream* input::stream(AVMediaType media_type){return impl_->stream(media_type);}\r
bool input::eof() const {return !impl_->executor_.is_running();}\r
bool input::try_pop(std::shared_ptr<AVPacket>& packet){return impl_->try_pop(packet);}\r
+std::shared_ptr<AVFormatContext> input::context(){return impl_->format_context_;}\r
}
\ No newline at end of file
bool try_pop(std::shared_ptr<AVPacket>& packet);\r
bool eof() const;\r
\r
- AVStream* stream(AVMediaType media_type);\r
+ std::shared_ptr<AVFormatContext> context();\r
private:\r
struct implementation;\r
std::shared_ptr<implementation> impl_;\r
\r
#include "video_decoder.h"\r
#include "../util.h"\r
+#include "../filter/filter.h"\r
\r
#include "../../ffmpeg_error.h"\r
#include "../../tbb_avcodec.h"\r
core::video_mode::type mode_;\r
\r
std::queue<std::shared_ptr<AVPacket>> packet_buffer_;\r
+\r
+ std::unique_ptr<filter> filter_;\r
+\r
+ double fps_;\r
public:\r
- explicit implementation(AVStream* stream, const safe_ptr<core::frame_factory>& frame_factory) \r
+ explicit implementation(const std::shared_ptr<AVFormatContext>& context, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter) \r
: frame_factory_(frame_factory)\r
, mode_(core::video_mode::invalid)\r
+ //, filter_(filter.empty() ? nullptr : new caspar::filter(filter))\r
{\r
- if(!stream || !stream->codec)\r
- return;\r
+ AVCodec* dec;\r
+ index_ = av_find_best_stream(context.get(), AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);\r
\r
- auto codec = avcodec_find_decoder(stream->codec->codec_id); \r
- if(!codec)\r
+ if(index_ < 0)\r
return;\r
\r
- int errn = tbb_avcodec_open(stream->codec, codec);\r
+ int errn = tbb_avcodec_open(context->streams[index_]->codec, dec);\r
if(errn < 0)\r
return;\r
\r
- index_ = stream->index;\r
- codec_context_.reset(stream->codec, tbb_avcodec_close);\r
+ codec_context_.reset(context->streams[index_]->codec, tbb_avcodec_close);\r
\r
// Some files give an invalid time_base numerator, try to fix it.\r
if(codec_context_ && codec_context_->time_base.num == 1)\r
codec_context_->time_base.num = static_cast<int>(std::pow(10.0, static_cast<int>(std::log10(static_cast<float>(codec_context_->time_base.den)))-1)); \r
+\r
+ fps_ = static_cast<double>(codec_context_->time_base.den) / static_cast<double>(codec_context_->time_base.num);\r
+ //if(double_rate(filter))\r
+ // fps_ *= 2;\r
}\r
\r
void push(const std::shared_ptr<AVPacket>& packet)\r
}\r
}\r
}\r
-\r
+ \r
return result;\r
}\r
\r
\r
double fps() const\r
{\r
- return static_cast<double>(codec_context_->time_base.den) / static_cast<double>(codec_context_->time_base.num);\r
+ return fps_;\r
}\r
};\r
\r
-video_decoder::video_decoder(AVStream* stream, const safe_ptr<core::frame_factory>& frame_factory) : impl_(new implementation(stream, frame_factory)){}\r
+video_decoder::video_decoder(const std::shared_ptr<AVFormatContext>& context, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter) : impl_(new implementation(context, frame_factory, filter)){}\r
void video_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}\r
std::vector<safe_ptr<core::write_frame>> video_decoder::poll(){return impl_->poll();}\r
bool video_decoder::ready() const{return impl_->ready();}\r
class video_decoder : boost::noncopyable\r
{\r
public:\r
- explicit video_decoder(AVStream* stream, const safe_ptr<core::frame_factory>& frame_factory);\r
+ explicit video_decoder(const std::shared_ptr<AVFormatContext>& context, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter);\r
\r
void push(const std::shared_ptr<AVPacket>& packet);\r
bool ready() const;\r
<channel>\r
<video-mode>1080i5000</video-mode>\r
<consumers>\r
- <decklink>\r
+ <screen></screen>\r
+ <audio></audio>\r
+ <!--<decklink>\r
<device>2</device>\r
<embedded-audio>true</embedded-audio>\r
- </decklink>\r
+ </decklink>-->\r
</consumers>\r
</channel>\r
</channels>\r