}\r
\r
std::shared_ptr<core::audio_buffer> poll()\r
- {\r
+ { \r
+ auto result = std::make_shared<core::audio_buffer>();\r
+\r
if(!codec_context_)\r
- return empty_audio();\r
+ {\r
+ result = empty_audio();\r
+ return result;\r
+ }\r
\r
std::shared_ptr<AVPacket> packet;\r
if(!input_->try_pop_audio(packet))\r
- return nullptr;\r
+ return result;\r
\r
if(packet == flush_packet())\r
{\r
avcodec_flush_buffers(codec_context_.get());\r
- return nullptr;\r
+ return result;\r
}\r
- else if(packet == null_packet())\r
+ \r
+ if(packet == null_packet())\r
{\r
if(codec_context_->codec->capabilities & CODEC_CAP_DELAY)\r
{\r
AVPacket pkt; \r
av_init_packet(&pkt);\r
- pkt.data = nullptr;\r
- pkt.size = 0;\r
- return decode(pkt);\r
+ pkt.data = nullptr;\r
+ pkt.size = 0;\r
+ \r
+ core::audio_buffer audio;\r
+ while(decode(pkt, audio))\r
+ boost::range::push_back(*result, audio);\r
}\r
- return nullptr;\r
+ return result;\r
+ }\r
+\r
+ while(packet->size > 0)\r
+ {\r
+ core::audio_buffer audio;\r
+ if(decode(*packet, audio))\r
+ boost::range::push_back(*result, audio); \r
}\r
\r
- return decode(*packet);\r
+ event_subject_ << monitor::event("file/audio/sample-rate") % codec_context_->sample_rate\r
+ << monitor::event("file/audio/channels") % codec_context_->channels\r
+ << monitor::event("file/audio/format") % u8(av_get_sample_fmt_name(codec_context_->sample_fmt))\r
+ << monitor::event("file/audio/codec") % u8(codec_context_->codec->long_name); \r
+ \r
+ return result;\r
}\r
\r
- std::shared_ptr<core::audio_buffer> decode(AVPacket& pkt)\r
+ bool decode(AVPacket& pkt, core::audio_buffer& result)\r
{ \r
- auto audio = std::make_shared<core::audio_buffer>();\r
- \r
- while(pkt.size > 0)\r
- { \r
- std::shared_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
+ std::shared_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
\r
- int got_frame = 0;\r
- auto len = THROW_ON_ERROR2(avcodec_decode_audio4(codec_context_.get(), decoded_frame.get(), &got_frame, &pkt), "[audio_decoder]");\r
+ int got_frame = 0;\r
+ auto len = THROW_ON_ERROR2(avcodec_decode_audio4(codec_context_.get(), decoded_frame.get(), &got_frame, &pkt), "[audio_decoder]");\r
\r
- if(len == 0)\r
- {\r
- pkt.size = 0;\r
- continue;\r
- }\r
+ if(len == 0)\r
+ {\r
+ pkt.size = 0;\r
+ return false;\r
+ }\r
\r
- pkt.data += len;\r
- pkt.size -= len;\r
+ pkt.data += len;\r
+ pkt.size -= len;\r
\r
- if(!got_frame)\r
- continue;\r
+ if(!got_frame)\r
+ return false;\r
\r
- const uint8_t *in[] = {decoded_frame->data[0]};\r
- uint8_t* out[] = {buffer_.data()};\r
+ const uint8_t *in[] = {decoded_frame->data[0]};\r
+ uint8_t* out[] = {buffer_.data()};\r
\r
- auto channel_samples = swr_convert(swr_.get(), \r
- out, static_cast<int>(buffer_.size()) / format_desc_.audio_channels / av_get_bytes_per_sample(AV_SAMPLE_FMT_S32), \r
- in, decoded_frame->nb_samples);\r
+ auto channel_samples = swr_convert(swr_.get(), \r
+ out, static_cast<int>(buffer_.size()) / format_desc_.audio_channels / av_get_bytes_per_sample(AV_SAMPLE_FMT_S32), \r
+ in, decoded_frame->nb_samples);\r
\r
- auto ptr = reinterpret_cast<int32_t*>(buffer_.data());\r
- audio->insert(audio->end(), ptr, ptr + channel_samples * format_desc_.audio_channels);\r
- }\r
- \r
- event_subject_ << monitor::event("file/audio/sample-rate") % codec_context_->sample_rate\r
- << monitor::event("file/audio/channels") % codec_context_->channels\r
- << monitor::event("file/audio/format") % u8(av_get_sample_fmt_name(codec_context_->sample_fmt))\r
- << monitor::event("file/audio/codec") % u8(codec_context_->codec->long_name); \r
+ auto ptr = reinterpret_cast<int32_t*>(buffer_.data());\r
+ result = core::audio_buffer(ptr, ptr + channel_samples * format_desc_.audio_channels);\r
\r
- return audio;\r
+ return true;\r
}\r
\r
uint32_t nb_frames() const\r
\r
if(is_eof(ret)) \r
{\r
- for(int n = 0; n < 3; ++n)\r
- {\r
- video_stream_.push(null_packet());\r
- audio_stream_.push(null_packet());\r
- }\r
+ video_stream_.push(null_packet());\r
+ audio_stream_.push(null_packet());\r
\r
if(loop_)\r
{\r
merge();\r
}\r
\r
+ void push(const std::vector<std::shared_ptr<AVFrame>> video_frames)\r
+ {\r
+ BOOST_FOREACH(auto& frame, video_frames)\r
+ push(frame);\r
+ }\r
+\r
void push(const std::shared_ptr<core::audio_buffer>& audio)\r
{\r
if(audio == empty_audio()) \r
frame_muxer::frame_muxer(double in_fps, const spl::shared_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, const std::wstring& filter)\r
: impl_(new impl(in_fps, frame_factory, format_desc, filter)){}\r
void frame_muxer::push(const std::shared_ptr<AVFrame>& video_frame){impl_->push(video_frame);}\r
+void frame_muxer::push(const std::vector<std::shared_ptr<AVFrame>>& video_frames){impl_->push(video_frames);}\r
void frame_muxer::push(const std::shared_ptr<core::audio_buffer>& audio_samples){return impl_->push(audio_samples);}\r
bool frame_muxer::empty() const{return impl_->empty();}\r
core::draw_frame frame_muxer::front() const{return impl_->front();}\r
frame_muxer(double in_fps, const spl::shared_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, const std::wstring& filter = L"");\r
\r
void push(const std::shared_ptr<AVFrame>& video_frame);\r
+ void push(const std::vector<std::shared_ptr<AVFrame>>& video_frame);\r
void push(const std::shared_ptr<core::audio_buffer>& audio_samples);\r
\r
bool video_ready() const;\r
{\r
}\r
\r
- std::shared_ptr<AVFrame> poll()\r
+ std::vector<std::shared_ptr<AVFrame>> poll()\r
{ \r
+ std::vector<std::shared_ptr<AVFrame>> result;\r
+\r
if(!codec_context_)\r
- return empty_video();\r
+ {\r
+ result.push_back(empty_video());\r
+ return result;\r
+ }\r
\r
std::shared_ptr<AVPacket> packet;\r
if(!input_->try_pop_video(packet))\r
- return nullptr;\r
+ return result;\r
\r
if(packet == flush_packet())\r
{\r
avcodec_flush_buffers(codec_context_.get());\r
- return nullptr;\r
+ return result;\r
}\r
- else if(packet == null_packet())\r
+ \r
+ if(packet == null_packet())\r
{\r
if(codec_context_->codec->capabilities & CODEC_CAP_DELAY)\r
{\r
AVPacket pkt; \r
av_init_packet(&pkt);\r
- pkt.data = nullptr;\r
- pkt.size = 0;\r
- return decode(pkt);\r
+ pkt.data = nullptr;\r
+ pkt.size = 0;\r
+ \r
+ std::shared_ptr<AVFrame> frame;\r
+ while(decode(pkt, frame))\r
+ result.push_back(frame); \r
}\r
- return nullptr;\r
+ return result;\r
+ }\r
+\r
+ while(packet->size > 0)\r
+ {\r
+ std::shared_ptr<AVFrame> frame;\r
+ if(decode(*packet, frame))\r
+ result.push_back(frame); \r
}\r
\r
- return decode(*packet);\r
+ return result;\r
}\r
\r
- std::shared_ptr<AVFrame> decode(AVPacket& pkt)\r
+ bool decode(AVPacket& pkt, std::shared_ptr<AVFrame>& result)\r
{\r
- std::shared_ptr<AVFrame> frame(avcodec_alloc_frame(), av_free);\r
+ result = std::shared_ptr<AVFrame>(avcodec_alloc_frame(), av_free);\r
\r
int got_frame = 0;\r
- THROW_ON_ERROR2(avcodec_decode_video2(codec_context_.get(), frame.get(), &got_frame, &pkt), "[video_decocer]");\r
- \r
- // If a decoder consumes less then the whole packet then something is wrong\r
- // that might be just harmless padding at the end, or a problem with the\r
- // AVParser or demuxer which puted more then one frame in a AVPacket.\r
+ auto len = THROW_ON_ERROR2(avcodec_decode_video2(codec_context_.get(), result.get(), &got_frame, &pkt), "[video_decocer]");\r
+ \r
+ if(len == 0)\r
+ {\r
+ pkt.size = 0;\r
+ return false;\r
+ }\r
+\r
+ pkt.data += len;\r
+ pkt.size -= len;\r
\r
if(got_frame == 0) \r
- return nullptr;\r
+ return false;\r
\r
auto stream_time_base = stream_->time_base;\r
auto packet_frame_number = static_cast<uint32_t>((static_cast<double>(pkt.pts * stream_time_base.num)/stream_time_base.den)*fps_);\r
\r
file_frame_number_ = packet_frame_number;\r
\r
- is_progressive_ = !frame->interlaced_frame;\r
-\r
- if(frame->repeat_pict > 0)\r
- CASPAR_LOG(warning) << "[video_decoder] Field repeat_pict not implemented.";\r
+ is_progressive_ = !result->interlaced_frame;\r
+ \r
+ if(result->repeat_pict > 0)\r
+ CASPAR_LOG(warning) << "[video_decoder] repeat_pict not implemented.";\r
\r
event_subject_ << monitor::event("file/video/width") % width_\r
<< monitor::event("file/video/height") % height_\r
- << monitor::event("file/video/field") % u8(!frame->interlaced_frame ? "progressive" : (frame->top_field_first ? "upper" : "lower"))\r
+ << monitor::event("file/video/field") % u8(!result->interlaced_frame ? "progressive" : (result->top_field_first ? "upper" : "lower"))\r
<< monitor::event("file/video/codec") % u8(codec_context_->codec->long_name);\r
\r
- return frame;\r
+ return true;\r
}\r
\r
uint32_t nb_frames() const\r
video_decoder::video_decoder(input& in) : impl_(new impl(in)){}\r
video_decoder::video_decoder(video_decoder&& other) : impl_(std::move(other.impl_)){}\r
video_decoder& video_decoder::operator=(video_decoder&& other){impl_ = std::move(other.impl_); return *this;}\r
-std::shared_ptr<AVFrame> video_decoder::operator()(){return impl_->poll();}\r
+std::vector<std::shared_ptr<AVFrame>> video_decoder::operator()(){return impl_->poll();}\r
int video_decoder::width() const{return impl_->width_;}\r
int video_decoder::height() const{return impl_->height_;}\r
uint32_t video_decoder::nb_frames() const{return impl_->nb_frames();}\r
video_decoder(video_decoder&& other);\r
video_decoder& operator=(video_decoder&& other);\r
\r
- std::shared_ptr<AVFrame> operator()();\r
+ std::vector<std::shared_ptr<AVFrame>> operator()();\r
\r
int width() const;\r
int height() const;\r