// Methods \r
\r
virtual class draw_frame receive() = 0;\r
- virtual boost::unique_future<std::wstring> call(const std::wstring&);\r
+ virtual boost::unique_future<std::wstring> call(const std::wstring& params);\r
\r
// monitor::observable\r
\r
#include "../frame/frame_factory.h"\r
\r
#include <common/executor.h>\r
+#include <common/future.h>\r
#include <common/diagnostics/graph.h>\r
\r
#include <core/frame/frame_transform.h>\r
return layers_[index].info();\r
}, task_priority::high_priority);\r
} \r
+ \r
+ boost::unique_future<std::wstring> call(int index, const std::wstring& params)\r
+ {\r
+ return flatten(executor_.begin_invoke([=]\r
+ {\r
+ return make_shared(layers_[index].foreground()->call(params));\r
+ }, task_priority::high_priority));\r
+ }\r
};\r
\r
stage::stage(spl::shared_ptr<diagnostics::graph> graph) : impl_(new impl(std::move(graph))){}\r
+boost::unique_future<std::wstring> stage::call(int index, const std::wstring& params){return impl_->call(index, params);}\r
void stage::apply_transforms(const std::vector<stage::transform_tuple_t>& transforms){impl_->apply_transforms(transforms);}\r
void stage::apply_transform(int index, const std::function<core::frame_transform(core::frame_transform)>& transform, unsigned int mix_duration, const tweener& tween){impl_->apply_transform(index, transform, mix_duration, tween);}\r
void stage::clear_transforms(int index){impl_->clear_transforms(index);}\r
void swap_layer(int index, int other_index);\r
void swap_layer(int index, int other_index, stage& other);\r
\r
+ boost::unique_future<std::wstring> call(int index, const std::wstring& params);\r
+\r
// monitor::observable\r
\r
void subscribe(const monitor::observable::observer_ptr& o) override;\r
</Filter>\r
</ItemGroup>\r
<ItemGroup>\r
- <ClCompile Include="producer\video\video_decoder.cpp">\r
- <Filter>source\producer\video</Filter>\r
- </ClCompile>\r
<ClCompile Include="producer\audio\audio_decoder.cpp">\r
<Filter>source\producer\audio</Filter>\r
</ClCompile>\r
<ClCompile Include="ffmpeg_error.cpp">\r
<Filter>source</Filter>\r
</ClCompile>\r
+ <ClCompile Include="producer\video\video_decoder.cpp">\r
+ <Filter>source\producer\video</Filter>\r
+ </ClCompile>\r
</ItemGroup>\r
<ItemGroup>\r
<ClInclude Include="producer\ffmpeg_producer.h">\r
{ \r
monitor::basic_subject event_subject_;\r
int index_;\r
- const spl::shared_ptr<AVCodecContext> codec_context_; \r
+ const std::shared_ptr<AVCodecContext> codec_context_; \r
const core::video_format_desc format_desc_;\r
\r
- audio_resampler resampler_;\r
+ boost::optional<audio_resampler> resampler_;\r
\r
std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> buffer1_;\r
\r
const int64_t nb_frames_;\r
tbb::atomic<uint32_t> file_frame_number_;\r
public:\r
- explicit impl(const spl::shared_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc) \r
+ explicit impl() \r
+ : nb_frames_(0)//context->streams[index_]->nb_frames)\r
+ { \r
+ file_frame_number_ = 0; \r
+ }\r
+\r
+ explicit impl(const std::shared_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc) \r
: format_desc_(format_desc) \r
, codec_context_(open_codec(*context, AVMEDIA_TYPE_AUDIO, index_))\r
- , resampler_(format_desc.audio_channels, codec_context_->channels,\r
+ , resampler_(audio_resampler(format_desc.audio_channels, codec_context_->channels,\r
format_desc.audio_sample_rate, codec_context_->sample_rate,\r
- AV_SAMPLE_FMT_S32, codec_context_->sample_fmt)\r
+ AV_SAMPLE_FMT_S32, codec_context_->sample_fmt))\r
, buffer1_(AVCODEC_MAX_AUDIO_FRAME_SIZE*2)\r
, nb_frames_(0)//context->streams[index_]->nb_frames)\r
{ \r
return nullptr;\r
\r
auto packet = packets_.front();\r
-\r
- if(packet->data == nullptr)\r
+ \r
+ if(!codec_context_) \r
{\r
packets_.pop();\r
- file_frame_number_ = static_cast<uint32_t>(packet->pos);\r
- avcodec_flush_buffers(codec_context_.get());\r
- return flush_audio();\r
+ return packet->data == nullptr ? flush_audio() : empty_audio();\r
}\r
+ else\r
+ {\r
+ if(packet->data == nullptr)\r
+ {\r
+ packets_.pop();\r
+ file_frame_number_ = static_cast<uint32_t>(packet->pos);\r
+ avcodec_flush_buffers(codec_context_.get());\r
+ return flush_audio();\r
+ }\r
\r
- auto audio = decode(*packet);\r
+ auto audio = decode(*packet);\r
\r
- if(packet->size == 0) \r
- packets_.pop();\r
+ if(packet->size == 0) \r
+ packets_.pop();\r
\r
- return audio;\r
+ return audio;\r
+ }\r
}\r
\r
std::shared_ptr<core::audio_buffer> decode(AVPacket& pkt)\r
\r
buffer1_.resize(written_bytes);\r
\r
- buffer1_ = resampler_.resample(std::move(buffer1_));\r
+ buffer1_ = resampler_->resample(std::move(buffer1_));\r
\r
const auto n_samples = buffer1_.size() / av_get_bytes_per_sample(AV_SAMPLE_FMT_S32);\r
const auto samples = reinterpret_cast<int32_t*>(buffer1_.data());\r
\r
bool ready() const\r
{\r
- return !packets_.empty();\r
+ return !codec_context_ || !packets_.empty();\r
+ }\r
+ \r
+ void clear()\r
+ {\r
+ while(!packets_.empty())\r
+ packets_.pop();\r
}\r
\r
uint32_t nb_frames() const\r
}\r
};\r
\r
-audio_decoder::audio_decoder(const spl::shared_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc) : impl_(new impl(context, format_desc)){}\r
+audio_decoder::audio_decoder() : impl_(new impl()){}\r
+audio_decoder::audio_decoder(const std::shared_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc) : impl_(new impl(context, format_desc)){}\r
+audio_decoder::audio_decoder(audio_decoder&& other) : impl_(std::move(other.impl_)){}\r
+audio_decoder& audio_decoder::operator=(audio_decoder&& other){impl_ = std::move(other.impl_); return *this;}\r
void audio_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}\r
bool audio_decoder::ready() const{return impl_->ready();}\r
std::shared_ptr<core::audio_buffer> audio_decoder::poll(){return impl_->poll();}\r
uint32_t audio_decoder::nb_frames() const{return impl_->nb_frames();}\r
uint32_t audio_decoder::file_frame_number() const{return impl_->file_frame_number_;}\r
std::wstring audio_decoder::print() const{return impl_->print();}\r
+void audio_decoder::clear(){impl_->clear();}\r
void audio_decoder::subscribe(const monitor::observable::observer_ptr& o){impl_->event_subject_.subscribe(o);}\r
void audio_decoder::unsubscribe(const monitor::observable::observer_ptr& o){impl_->event_subject_.unsubscribe(o);}\r
\r
, boost::noncopyable\r
{\r
public:\r
- explicit audio_decoder(const spl::shared_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc);\r
+ explicit audio_decoder();\r
+ explicit audio_decoder(const std::shared_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc);\r
\r
+ audio_decoder(audio_decoder&& other);\r
+ audio_decoder& operator=(audio_decoder&& other);\r
+\r
bool ready() const;\r
void push(const std::shared_ptr<AVPacket>& packet);\r
std::shared_ptr<core::audio_buffer> poll();\r
\r
std::wstring print() const;\r
\r
+ void clear();\r
+\r
// monitor::observable\r
\r
void subscribe(const monitor::observable::observer_ptr& o) override;\r
const core::video_format_desc format_desc_;\r
\r
input input_; \r
- std::unique_ptr<video_decoder> video_decoder_;\r
- std::unique_ptr<audio_decoder> audio_decoder_; \r
- std::unique_ptr<frame_muxer> muxer_;\r
\r
const double fps_;\r
const uint32_t start_;\r
\r
+ video_decoder video_decoder_;\r
+ audio_decoder audio_decoder_; \r
+ frame_muxer muxer_;\r
+\r
int64_t frame_number_;\r
\r
core::draw_frame last_frame_;\r
, format_desc_(format_desc)\r
, input_(graph_, filename_, loop, start, length)\r
, fps_(read_fps(*input_.context(), format_desc_.fps))\r
+ , muxer_(fps_, frame_factory, format_desc_, filter)\r
, start_(start)\r
, frame_number_(0)\r
, last_frame_(core::draw_frame::empty())\r
\r
try\r
{\r
- video_decoder_.reset(new video_decoder(input_.context()));\r
- video_decoder_->subscribe(event_subject_);\r
- CASPAR_LOG(info) << print() << L" " << video_decoder_->print();\r
+ video_decoder_ = video_decoder(input_.context());\r
+ video_decoder_.subscribe(event_subject_);\r
+ CASPAR_LOG(info) << print() << L" " << video_decoder_.print();\r
}\r
catch(averror_stream_not_found&)\r
{\r
\r
try\r
{\r
- audio_decoder_.reset(new audio_decoder(input_.context(), format_desc_));\r
- audio_decoder_->subscribe(event_subject_);\r
- CASPAR_LOG(info) << print() << L" " << audio_decoder_->print();\r
+ audio_decoder_ = audio_decoder(input_.context(), format_desc_);\r
+ audio_decoder_.subscribe(event_subject_);\r
+ CASPAR_LOG(info) << print() << L" " << audio_decoder_.print();\r
}\r
catch(averror_stream_not_found&)\r
{\r
CASPAR_LOG_CURRENT_EXCEPTION();\r
CASPAR_LOG(warning) << print() << " Failed to open audio-stream. Running without audio."; \r
} \r
-\r
- if(!video_decoder_ && !audio_decoder_)\r
- BOOST_THROW_EXCEPTION(averror_stream_not_found() << msg_info("No streams found"));\r
-\r
- muxer_.reset(new frame_muxer(fps_, frame_factory, format_desc_, filter));\r
-\r
+ \r
CASPAR_LOG(info) << print() << L" Initialized";\r
}\r
\r
boost::timer frame_timer;\r
\r
auto frame = core::draw_frame::late(); \r
+ \r
if(!try_decode_frame(frame))\r
{\r
if(!input_.eof()) \r
uint32_t nb_frames = file_nb_frames();\r
\r
nb_frames = std::min(input_.length(), nb_frames);\r
- nb_frames = muxer_->calc_nb_frames(nb_frames);\r
+ nb_frames = muxer_.calc_nb_frames(nb_frames);\r
\r
return nb_frames > start_ ? nb_frames - start_ : 0;\r
}\r
uint32_t file_nb_frames() const\r
{\r
uint32_t file_nb_frames = 0;\r
- file_nb_frames = std::max(file_nb_frames, video_decoder_ ? video_decoder_->nb_frames() : 0);\r
- file_nb_frames = std::max(file_nb_frames, audio_decoder_ ? audio_decoder_->nb_frames() : 0);\r
+ file_nb_frames = std::max(file_nb_frames, video_decoder_.nb_frames());\r
+ file_nb_frames = std::max(file_nb_frames, audio_decoder_.nb_frames());\r
return file_nb_frames;\r
}\r
\r
uint32_t file_frame_number() const\r
{\r
- return video_decoder_ ? video_decoder_->file_frame_number() : 0;\r
+ return video_decoder_.file_frame_number();\r
}\r
- \r
+ \r
boost::unique_future<std::wstring> call(const std::wstring& param) override\r
{\r
static const boost::wregex loop_exp(L"LOOP\\s*(?<VALUE>\\d?)?", boost::regex::icase);\r
auto value = what["VALUE"].str();\r
if(!value.empty())\r
input_.loop(boost::lexical_cast<bool>(value));\r
- result = boost::lexical_cast<std::wstring>(input_.loop());\r
+ result = boost::lexical_cast<std::wstring>(loop());\r
}\r
else if(boost::regex_match(param, what, seek_exp))\r
{\r
auto value = what["VALUE"].str();\r
- input_.seek(boost::lexical_cast<uint32_t>(value));\r
+ seek(boost::lexical_cast<uint32_t>(value));\r
}\r
else if(boost::regex_match(param, what, length_exp))\r
{\r
auto value = what["VALUE"].str();\r
if(!value.empty())\r
- {\r
- if(boost::iequals(value, "NaN") || boost::iequals(value, "-1"))\r
- input_.length(std::numeric_limits<uint32_t>::max());\r
- else\r
- input_.length(boost::lexical_cast<uint32_t>(value));\r
- }\r
- result = boost::lexical_cast<std::wstring>(input_.length());\r
+ length(boost::lexical_cast<uint32_t>(value)); \r
+ result = boost::lexical_cast<std::wstring>(length());\r
}\r
else if(boost::regex_match(param, what, start_exp))\r
{\r
auto value = what["VALUE"].str();\r
if(!value.empty())\r
- input_.start(boost::lexical_cast<uint32_t>(value));\r
- result = boost::lexical_cast<std::wstring>(input_.start());\r
+ start(boost::lexical_cast<uint32_t>(value));\r
+ result = boost::lexical_cast<std::wstring>(start());\r
}\r
else\r
BOOST_THROW_EXCEPTION(invalid_argument());\r
boost::property_tree::wptree info;\r
info.add(L"type", L"ffmpeg");\r
info.add(L"filename", filename_);\r
- info.add(L"width", video_decoder_ ? video_decoder_->width() : 0);\r
- info.add(L"height", video_decoder_ ? video_decoder_->height() : 0);\r
- info.add(L"progressive", video_decoder_ ? video_decoder_->is_progressive() : false);\r
+ info.add(L"width", video_decoder_.width());\r
+ info.add(L"height", video_decoder_.height());\r
+ info.add(L"progressive", video_decoder_.is_progressive());\r
info.add(L"fps", fps_);\r
info.add(L"loop", input_.loop());\r
info.add(L"frame-number", frame_number_);\r
}\r
\r
// ffmpeg_producer\r
+ \r
+ void loop(bool value)\r
+ {\r
+ input_.loop(value);\r
+ }\r
+\r
+ bool loop() const\r
+ {\r
+ return input_.loop();\r
+ }\r
+\r
+ void length(uint32_t value)\r
+ {\r
+ input_.length(value);\r
+ }\r
+\r
+ uint32_t length()\r
+ {\r
+ return input_.length();\r
+ }\r
+ \r
+ void start(uint32_t value)\r
+ {\r
+ input_.start(value);\r
+ }\r
+\r
+ uint32_t start()\r
+ {\r
+ return input_.start();\r
+ }\r
+\r
+ void seek(uint32_t target)\r
+ {\r
+ muxer_.clear();\r
+ video_decoder_.clear();\r
+ audio_decoder_.clear();\r
+ \r
+ input_.seek(target);\r
+ \r
+ receive();\r
+ receive();\r
+ receive();\r
+ receive();\r
+ }\r
\r
std::wstring print_mode() const\r
{\r
- return video_decoder_ ? ffmpeg::print_mode(video_decoder_->width(), video_decoder_->height(), fps_, !video_decoder_->is_progressive()) : L"n/a";\r
+ return ffmpeg::print_mode(video_decoder_.width(), video_decoder_.height(), fps_, !video_decoder_.is_progressive());\r
}\r
\r
bool try_decode_frame(core::draw_frame& result)\r
{\r
for(int n = 0; n < 32; ++n)\r
{\r
- if(muxer_->try_pop(result)) \r
+ if(muxer_.try_pop(result)) \r
return true; \r
\r
std::shared_ptr<AVPacket> pkt;\r
-\r
- for(int n = 0; n < 32 && ((video_decoder_ && !video_decoder_->ready() && !muxer_->video_ready()) || \r
- (audio_decoder_ && !audio_decoder_->ready() && !muxer_->audio_ready())) && \r
- input_.try_pop(pkt); ++n)\r
+ for(int n = 0; n < 32 && (!video_decoder_.ready() || !audio_decoder_.ready()) && input_.try_pop(pkt); ++n)\r
{\r
- if(video_decoder_)\r
- video_decoder_->push(pkt);\r
- if(audio_decoder_)\r
- audio_decoder_->push(pkt);\r
+ video_decoder_.push(pkt);\r
+ audio_decoder_.push(pkt);\r
}\r
\r
std::shared_ptr<AVFrame> video;\r
(\r
[&]\r
{\r
- video = !muxer_->video_ready() && video_decoder_ ? video_decoder_->poll() : nullptr; \r
+ if(!muxer_.video_ready())\r
+ video = video_decoder_.poll(); \r
},\r
[&]\r
{ \r
- audio = !muxer_->audio_ready() && audio_decoder_ ? audio_decoder_->poll() : nullptr; \r
+ if(!muxer_.audio_ready())\r
+ audio = audio_decoder_.poll(); \r
}\r
);\r
\r
- muxer_->push(video);\r
- muxer_->push(audio);\r
-\r
- if(!audio_decoder_)\r
- {\r
- if(video == flush_video())\r
- muxer_->push(flush_audio());\r
- else if(!muxer_->audio_ready())\r
- muxer_->push(empty_audio());\r
- }\r
-\r
- if(!video_decoder_)\r
- {\r
- if(audio == flush_audio())\r
- muxer_->push(flush_video());\r
- else if(!muxer_->video_ready())\r
- muxer_->push(empty_video());\r
- }\r
+ muxer_.push(video);\r
+ muxer_.push(audio);\r
}\r
\r
return false;\r
buffer_size_ = 0;\r
\r
if(start_ > 0) \r
- queued_seek(start_);\r
+ seek(start_, false);\r
\r
graph_->set_color("seek", diagnostics::color(1.0f, 0.5f, 0.0f)); \r
graph_->set_color("buffer-count", diagnostics::color(0.7f, 0.4f, 0.4f));\r
return result;\r
}\r
\r
- void seek(uint32_t target)\r
+ void seek(uint32_t target, bool clear)\r
{\r
- executor_.begin_invoke([=]\r
+ executor_.invoke([=]\r
{\r
- std::shared_ptr<AVPacket> packet;\r
- while(buffer_.try_pop(packet) && packet)\r
- buffer_size_ -= packet->size;\r
+ if(clear)\r
+ {\r
+ std::shared_ptr<AVPacket> packet;\r
+ while(buffer_.try_pop(packet) && packet)\r
+ buffer_size_ -= packet->size;\r
+ }\r
+ \r
+ CASPAR_LOG(debug) << print() << " Seeking: " << target;\r
\r
- queued_seek(target);\r
+ int flags = AVSEEK_FLAG_FRAME;\r
+ if(target == 0)\r
+ {\r
+ // Fix VP6 seeking\r
+ int vid_stream_index = av_find_best_stream(format_context_.get(), AVMEDIA_TYPE_VIDEO, -1, -1, 0, 0);\r
+ if(vid_stream_index >= 0)\r
+ {\r
+ auto codec_id = format_context_->streams[vid_stream_index]->codec->codec_id;\r
+ if(codec_id == CODEC_ID_VP6A || codec_id == CODEC_ID_VP6F || codec_id == CODEC_ID_VP6)\r
+ flags = AVSEEK_FLAG_BYTE;\r
+ }\r
+ }\r
+ \r
+ auto stream = format_context_->streams[default_stream_index_];\r
+ auto codec = stream->codec;\r
+ auto fixed_target = (target*stream->time_base.den*codec->time_base.num)/(stream->time_base.num*codec->time_base.den)*codec->ticks_per_frame;\r
+ \r
+ THROW_ON_ERROR2(avformat_seek_file(format_context_.get(), default_stream_index_, std::numeric_limits<int64_t>::min(), fixed_target, std::numeric_limits<int64_t>::max(), 0), print()); \r
+ \r
+ auto flush_packet = create_packet();\r
+ flush_packet->data = nullptr;\r
+ flush_packet->size = 0;\r
+ flush_packet->pos = target;\r
\r
+ buffer_.push(flush_packet);\r
+ \r
tick();\r
}, task_priority::high_priority);\r
}\r
\r
if(is_eof(ret)) \r
{\r
- frame_number_ = 0;\r
+ frame_number_ = 0;\r
\r
if(loop_)\r
{\r
- queued_seek(start_);\r
+ seek(start_, false);\r
graph_->set_tag("seek"); \r
CASPAR_LOG(trace) << print() << " Looping."; \r
}\r
});\r
} \r
\r
- void queued_seek(const uint32_t target)\r
- { \r
- CASPAR_LOG(debug) << print() << " Seeking: " << target;\r
-\r
- int flags = AVSEEK_FLAG_FRAME;\r
- if(target == 0)\r
- {\r
- // Fix VP6 seeking\r
- int vid_stream_index = av_find_best_stream(format_context_.get(), AVMEDIA_TYPE_VIDEO, -1, -1, 0, 0);\r
- if(vid_stream_index >= 0)\r
- {\r
- auto codec_id = format_context_->streams[vid_stream_index]->codec->codec_id;\r
- if(codec_id == CODEC_ID_VP6A || codec_id == CODEC_ID_VP6F || codec_id == CODEC_ID_VP6)\r
- flags = AVSEEK_FLAG_BYTE;\r
- }\r
- }\r
- \r
- auto stream = format_context_->streams[default_stream_index_];\r
- auto codec = stream->codec;\r
- auto fixed_target = (target*stream->time_base.den*codec->time_base.num)/(stream->time_base.num*codec->time_base.den)*codec->ticks_per_frame;\r
- \r
- THROW_ON_ERROR2(avformat_seek_file(format_context_.get(), default_stream_index_, std::numeric_limits<int64_t>::min(), fixed_target, std::numeric_limits<int64_t>::max(), 0), print()); \r
- \r
- auto flush_packet = create_packet();\r
- flush_packet->data = nullptr;\r
- flush_packet->size = 0;\r
- flush_packet->pos = target;\r
-\r
- buffer_.push(flush_packet);\r
- } \r
-\r
bool is_eof(int ret)\r
{\r
#pragma warning (disable : 4146)\r
spl::shared_ptr<AVFormatContext> input::context(){return impl_->format_context_;}\r
void input::loop(bool value){impl_->loop_ = value;}\r
bool input::loop() const{return impl_->loop_;}\r
-void input::seek(uint32_t target){impl_->seek(target);}\r
+void input::seek(uint32_t target){impl_->seek(target, true);}\r
void input::start(uint32_t value){impl_->start_ = value;}\r
uint32_t input::start() const{return impl_->start_;}\r
void input::length(uint32_t value){impl_->length_ = value;}\r
public:\r
explicit input(const spl::shared_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, uint32_t start, uint32_t length);\r
\r
- bool try_pop(std::shared_ptr<AVPacket>& packet);\r
- bool eof() const;\r
-\r
- void loop(bool value);\r
- bool loop() const;\r
- void start(uint32_t value);\r
- uint32_t start() const;\r
- void length(uint32_t value);\r
- uint32_t length() const;\r
-\r
- void seek(uint32_t target);\r
+ bool try_pop(std::shared_ptr<AVPacket>& packet);\r
+ bool eof() const;\r
+ void loop(bool value);\r
+ bool loop() const;\r
+ void start(uint32_t value);\r
+ uint32_t start() const;\r
+ void length(uint32_t value);\r
+ uint32_t length() const;\r
+ void seek(uint32_t target);\r
\r
spl::shared_ptr<AVFormatContext> context();\r
private:\r
\r
return static_cast<uint32_t>(nb_frames2);\r
}\r
+\r
+ void clear()\r
+ {\r
+ while(!video_streams_.empty())\r
+ video_streams_.pop(); \r
+ while(!audio_streams_.empty())\r
+ audio_streams_.pop();\r
+ while(!frame_buffer_.empty())\r
+ frame_buffer_.pop();\r
+\r
+ video_streams_.push(std::queue<core::mutable_frame>());\r
+ audio_streams_.push(core::audio_buffer());\r
+ }\r
};\r
\r
frame_muxer::frame_muxer(double in_fps, const spl::shared_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, const std::wstring& filter)\r
void frame_muxer::push(const std::shared_ptr<AVFrame>& video_frame){impl_->push(video_frame);}\r
void frame_muxer::push(const std::shared_ptr<core::audio_buffer>& audio_samples){return impl_->push(audio_samples);}\r
bool frame_muxer::try_pop(core::draw_frame& result){return impl_->try_pop(result);}\r
+void frame_muxer::clear(){impl_->clear();}\r
uint32_t frame_muxer::calc_nb_frames(uint32_t nb_frames) const {return impl_->calc_nb_frames(nb_frames);}\r
bool frame_muxer::video_ready() const{return impl_->video_ready();}\r
bool frame_muxer::audio_ready() const{return impl_->audio_ready();}\r
bool video_ready() const;\r
bool audio_ready() const;\r
\r
+ void clear();\r
bool try_pop(core::draw_frame& result);\r
\r
uint32_t calc_nb_frames(uint32_t nb_frames) const;\r
{\r
monitor::basic_subject event_subject_;\r
int index_;\r
- const spl::shared_ptr<AVCodecContext> codec_context_;\r
+ const std::shared_ptr<AVCodecContext> codec_context_;\r
\r
std::queue<spl::shared_ptr<AVPacket>> packets_;\r
\r
tbb::atomic<uint32_t> file_frame_number_;\r
\r
public:\r
+ explicit impl() \r
+ : nb_frames_(0)\r
+ , width_(0)\r
+ , height_(0)\r
+ , is_progressive_(true)\r
+ {\r
+ file_frame_number_ = 0;\r
+ }\r
+\r
explicit impl(const spl::shared_ptr<AVFormatContext>& context) \r
: codec_context_(open_codec(*context, AVMEDIA_TYPE_VIDEO, index_))\r
, nb_frames_(static_cast<uint32_t>(context->streams[index_]->nb_frames))\r
return nullptr;\r
\r
auto packet = packets_.front();\r
+ \r
+ if(!codec_context_) \r
+ {\r
+ packets_.pop();\r
+ return packet->data == nullptr ? flush_video() : empty_video();\r
+ }\r
+ else\r
+ {\r
+ if(packet->data == nullptr)\r
+ { \r
+ if(codec_context_->codec->capabilities & CODEC_CAP_DELAY)\r
+ {\r
+ auto video = decode(*packet);\r
+ if(video)\r
+ return video;\r
+ }\r
\r
- if(packet->data == nullptr)\r
- { \r
- if(codec_context_->codec->capabilities & CODEC_CAP_DELAY)\r
- {\r
- auto video = decode(*packet);\r
- if(video)\r
- return video;\r
+ packets_.pop();\r
+ file_frame_number_ = static_cast<uint32_t>(packet->pos);\r
+ avcodec_flush_buffers(codec_context_.get());\r
+ return flush_video(); \r
}\r
- \r
+ \r
packets_.pop();\r
- file_frame_number_ = static_cast<uint32_t>(packet->pos);\r
- avcodec_flush_buffers(codec_context_.get());\r
- return flush_video(); \r
+ return decode(*packet);\r
}\r
- \r
- packets_.pop();\r
- return decode(*packet);\r
+ \r
}\r
\r
std::shared_ptr<AVFrame> decode(AVPacket& pkt)\r
\r
bool ready() const\r
{\r
- return !packets_.empty();\r
+ return !codec_context_ || !packets_.empty();\r
+ }\r
+\r
+ void clear()\r
+ {\r
+ while(!packets_.empty())\r
+ packets_.pop();\r
}\r
\r
uint32_t nb_frames() const\r
}\r
};\r
\r
+video_decoder::video_decoder() : impl_(new impl()){}\r
video_decoder::video_decoder(const spl::shared_ptr<AVFormatContext>& context) : impl_(new impl(context)){}\r
+video_decoder::video_decoder(video_decoder&& other) : impl_(std::move(other.impl_)){}\r
+video_decoder& video_decoder::operator=(video_decoder&& other){impl_ = std::move(other.impl_); return *this;}\r
void video_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}\r
std::shared_ptr<AVFrame> video_decoder::poll(){return impl_->poll();}\r
bool video_decoder::ready() const{return impl_->ready();}\r
uint32_t video_decoder::file_frame_number() const{return impl_->file_frame_number_;}\r
bool video_decoder::is_progressive() const{return impl_->is_progressive_;}\r
std::wstring video_decoder::print() const{return impl_->print();}\r
+void video_decoder::clear(){impl_->clear();}\r
void video_decoder::subscribe(const monitor::observable::observer_ptr& o){impl_->event_subject_.subscribe(o);}\r
void video_decoder::unsubscribe(const monitor::observable::observer_ptr& o){impl_->event_subject_.unsubscribe(o);}\r
\r
, boost::noncopyable\r
{\r
public:\r
+ explicit video_decoder();\r
explicit video_decoder(const spl::shared_ptr<AVFormatContext>& context);\r
\r
+ video_decoder(video_decoder&& other);\r
+ video_decoder& operator=(video_decoder&& other);\r
+\r
bool ready() const;\r
void push(const std::shared_ptr<AVPacket>& packet);\r
std::shared_ptr<AVFrame> poll();\r
\r
- int width() const;\r
- int height() const;\r
+ int width() const;\r
+ int height() const;\r
\r
uint32_t nb_frames() const;\r
uint32_t file_frame_number() const;\r
\r
- bool is_progressive() const;\r
+ bool is_progressive() const;\r
\r
std::wstring print() const;\r
+\r
+ void clear();\r
\r
// monitor::observable\r
\r
for(auto it = std::begin(_parameters2); it != std::end(_parameters2); ++it, param += L" ")\r
param += *it;\r
\r
- auto producer = GetChannel()->stage().foreground(GetLayerIndex()).get();\r
-\r
- auto result = producer->call(boost::trim_copy(param));\r
-\r
+ auto result = GetChannel()->stage().call(GetLayerIndex(), boost::trim_copy(param));\r
+ \r
if(!result.timed_wait(boost::posix_time::seconds(2)))\r
BOOST_THROW_EXCEPTION(timed_out());\r
\r