boost::unique_future<array<const std::uint8_t>> operator()(std::vector<item> items, const core::video_format_desc& format_desc)\r
{ \r
convert(items, format_desc.width, format_desc.height); \r
+ \r
+ // Remove first field stills.\r
+ boost::range::remove_erase_if(items, [&](const item& item)\r
+ {\r
+ return item.transform.is_still && item.transform.field_mode == format_desc.field_mode; // only us last field for stills.\r
+ });\r
\r
+ // Stills are progressive\r
+ BOOST_FOREACH(auto item, items)\r
+ {\r
+ if(item.transform.is_still)\r
+ item.transform.field_mode = core::field_mode::progressive;\r
+ }\r
+\r
auto result = spl::make_shared<buffer>(format_desc.size, 0);\r
if(format_desc.field_mode != core::field_mode::progressive)\r
{ \r
{\r
return item.transform.field_mode == core::field_mode::empty;\r
});\r
- \r
- // Remove first field stills.\r
- boost::range::remove_erase_if(items, [&](const item& item)\r
- {\r
- return item.transform.is_still && item.transform.field_mode == field_mode; // only us last field for stills.\r
- });\r
- \r
- // Stills are progressive, TODO: deinterlace.\r
- BOOST_FOREACH(auto item, items)\r
- {\r
- if(item.transform.is_still)\r
- item.transform.field_mode = core::field_mode::progressive;\r
- }\r
\r
if(items.empty())\r
return;\r
});\r
} \r
\r
+ BOOST_FOREACH(auto& layer, layers)\r
+ { \r
+ // Remove first field stills.\r
+ boost::range::remove_erase_if(layer.items, [&](const item& item)\r
+ {\r
+ return item.transform.is_still && item.transform.field_mode == format_desc.field_mode; // only us last field for stills.\r
+ });\r
+ \r
+ // Stills are progressive\r
+ BOOST_FOREACH(auto& item, layer.items)\r
+ {\r
+ if(item.transform.is_still)\r
+ item.transform.field_mode = core::field_mode::progressive;\r
+ }\r
+ }\r
+\r
return flatten(ogl_->begin_invoke([=]() mutable -> boost::shared_future<array<const std::uint8_t>>\r
{\r
auto target_texture = ogl_->create_texture(format_desc.width, format_desc.height, 4);\r
draw(target_texture, std::move(layer), layer_key_texture, format_desc, field_mode);\r
}\r
\r
- void draw(spl::shared_ptr<texture>& target_texture,\r
- layer layer, \r
- std::shared_ptr<texture>& layer_key_texture,\r
+ void draw(spl::shared_ptr<texture>& target_texture,\r
+ layer layer, \r
+ std::shared_ptr<texture>& layer_key_texture,\r
const core::video_format_desc& format_desc,\r
- core::field_mode field_mode)\r
+ core::field_mode field_mode)\r
{ \r
// Fix frames \r
BOOST_FOREACH(auto& item, layer.items) \r
return item.transform.field_mode == core::field_mode::empty;\r
});\r
\r
- // Remove first field stills.\r
- if(format_desc.field_mode != core::field_mode::progressive)\r
- {\r
- boost::range::remove_erase_if(layer.items, [&](const item& item)\r
- {\r
- return item.transform.is_still && item.transform.field_mode == format_desc.field_mode; // only use last field for stills.\r
- });\r
- }\r
-\r
- // Stills are progressive, TODO: deinterlace.\r
- BOOST_FOREACH(auto& item, layer.items)\r
- {\r
- if(item.transform.is_still)\r
- item.transform.field_mode = core::field_mode::progressive;\r
- }\r
-\r
if(layer.items.empty())\r
return;\r
\r
<GenerateDebugInformation>true</GenerateDebugInformation>\r
</Link>\r
<PreBuildEvent>\r
- <Command>"SubWCRev.exe" "$(SolutionDir)." "$(SolutionDir)\version.tmpl" "$(SolutionDir)\version.h"</Command>\r
+ <Command>\r
+ </Command>\r
</PreBuildEvent>\r
<Lib />\r
<PostBuildEvent>\r
<OptimizeReferences>true</OptimizeReferences>\r
</Link>\r
<PreBuildEvent>\r
- <Command>"SubWCRev.exe" "$(SolutionDir)." "$(SolutionDir)\version.tmpl" "$(SolutionDir)\version.h"</Command>\r
+ <Command>\r
+ </Command>\r
</PreBuildEvent>\r
<Lib>\r
<LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
draw_frame draw_frame::still(draw_frame frame)\r
{\r
frame.transform().image_transform.is_still = true; \r
- frame.transform().audio_transform.volume = 0.0; \r
+ frame.transform().audio_transform.is_still = true; \r
return frame;\r
}\r
\r
\r
audio_transform::audio_transform() \r
: volume(1.0)\r
+ , is_still(false)\r
{\r
}\r
\r
audio_transform& audio_transform::operator*=(const audio_transform &other)\r
{\r
- volume *= other.volume; \r
+ volume *= other.volume; \r
+ is_still |= other.is_still;\r
return *this;\r
}\r
\r
};\r
\r
audio_transform result; \r
+ result.is_still = source.is_still | dest.is_still;\r
result.volume = do_tween(time, source.volume, dest.volume, duration, tween);\r
\r
return result;\r
return std::abs(lhs - rhs) < 5e-8;\r
};\r
\r
- return eq(lhs.volume, rhs.volume);\r
+ return eq(lhs.volume, rhs.volume) && lhs.is_still == rhs.is_still;\r
}\r
\r
bool operator!=(const audio_transform& lhs, const audio_transform& rhs)\r
public:\r
audio_transform();\r
\r
- double volume;\r
+ double volume;\r
+ bool is_still;\r
\r
audio_transform& operator*=(const audio_transform &other);\r
audio_transform operator*(const audio_transform &other) const;\r
{\r
audio_transform prev_transform;\r
audio_buffer_ps audio_data;\r
+ bool is_still;\r
+\r
+ audio_stream() \r
+ : is_still(false)\r
+ {\r
+ }\r
};\r
\r
struct audio_mixer::impl : boost::noncopyable\r
item.tag = frame.stream_tag();\r
item.transform = transform_stack_.top();\r
item.audio_data = frame.audio_data();\r
+\r
+ if(item.transform.is_still)\r
+ item.transform.volume = 0.0;\r
\r
items_.push_back(std::move(item)); \r
}\r
next_audio.push_back(item.audio_data[n] * (prev_volume + (n/format_desc_.audio_channels) * alpha));\r
\r
next_audio_streams[tag].prev_transform = std::move(next_transform); // Store all active tags, inactive tags will be removed at the end.\r
- next_audio_streams[tag].audio_data = std::move(next_audio); \r
+ next_audio_streams[tag].audio_data = std::move(next_audio); \r
+ next_audio_streams[tag].is_still = item.transform.is_still;\r
} \r
\r
items_.clear();\r
audio_streams_[nullptr].audio_data = audio_buffer_ps(audio_cadence_.front(), 0.0f);\r
\r
{ // sanity check\r
-\r
- auto nb_invalid_streams = boost::count_if(audio_streams_ | boost::adaptors::map_values, [&](const audio_stream& x)\r
+ BOOST_FOREACH(auto& stream, audio_streams_ | boost::adaptors::map_values)\r
{\r
- return x.audio_data.size() < audio_cadence_.front();\r
- });\r
-\r
- if(nb_invalid_streams > 0) \r
- CASPAR_LOG(trace) << "[audio_mixer] Incorrect frame audio cadence detected. Appending empty samples."; \r
+ if(stream.audio_data.size() < audio_cadence_.front() && !stream.is_still)\r
+ {\r
+ stream.audio_data.resize(audio_cadence_.front(), 0.0f);\r
+ CASPAR_LOG(trace) << "[audio_mixer] Incorrect frame audio cadence detected. Appending empty samples."; \r
+ }\r
+ } \r
}\r
\r
std::vector<float> result_ps(audio_cadence_.front(), 0.0f);\r
BOOST_FOREACH(auto& stream, audio_streams_ | boost::adaptors::map_values)\r
{\r
- //CASPAR_LOG(debug) << stream.audio_data.size() << L" : " << result_ps.size();\r
-\r
- if(stream.audio_data.size() < result_ps.size())\r
- stream.audio_data.resize(result_ps.size(), 0.0f);\r
-\r
+ CASPAR_ASSERT(stream.audio_data.size() == result_ps.size());\r
auto out = boost::range::transform(result_ps, stream.audio_data, std::begin(result_ps), std::plus<float>());\r
stream.audio_data.erase(std::begin(stream.audio_data), std::begin(stream.audio_data) + std::distance(std::begin(result_ps), out));\r
} \r
{\r
auto& layer = layers_[index];\r
auto& tween = tweens_[index];\r
- auto transform = tween.fetch_and_tick(1);\r
\r
auto frame = layer.receive(format_desc); \r
auto frame1 = frame;\r
- frame1.transform() = transform;\r
+ frame1.transform() *= tween.fetch_and_tick(1);\r
\r
if(format_desc.field_mode != core::field_mode::progressive)\r
{ \r
auto frame2 = frame;\r
- frame2.transform() = tween.fetch_and_tick(1);\r
+ frame2.transform() *= tween.fetch_and_tick(1);\r
frame1 = core::draw_frame::interlace(frame1, frame2, format_desc.field_mode);\r
}\r
\r
\r
#include "ffmpeg_consumer.h"\r
\r
-#include "../producer/audio/audio_resampler.h"\r
#include "../producer/tbb_avcodec.h"\r
\r
#include <core/frame/frame.h>\r
#include <libavutil/pixdesc.h>\r
#include <libavutil/parseutils.h>\r
#include <libavutil/samplefmt.h>\r
+ #include <libswresample/swresample.h>\r
}\r
#if defined(_MSC_VER)\r
#pragma warning (pop)\r
byte_vector audio_buf_;\r
byte_vector video_outbuf_;\r
byte_vector picture_buf_;\r
- std::shared_ptr<audio_resampler> swr_;\r
+ std::shared_ptr<SwrContext> swr_;\r
std::shared_ptr<SwsContext> sws_;\r
\r
int64_t in_frame_number_;\r
} \r
return nullptr;\r
}\r
- \r
+
+ uint64_t get_channel_layout(AVCodecContext* dec)\r
+ {\r
+ auto layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);\r
+ return layout;\r
+ }\r
+\r
byte_vector convert_audio(core::const_frame& frame, AVCodecContext* c)\r
{\r
- if(!swr_) \r
- swr_.reset(new audio_resampler(c->channels, format_desc_.audio_channels, \r
- c->sample_rate, format_desc_.audio_sample_rate,\r
- c->sample_fmt, AV_SAMPLE_FMT_S32));\r
- \r
+ if(!swr_) \r
+ {\r
+ swr_ = std::shared_ptr<SwrContext>(swr_alloc_set_opts(nullptr,
+ get_channel_layout(c), c->sample_fmt, c->sample_rate,
+ av_get_default_channel_layout(format_desc_.audio_channels), AV_SAMPLE_FMT_S32, format_desc_.audio_sample_rate,
+ 0, nullptr), [](SwrContext* p){swr_free(&p);});\r
\r
- auto audio_data = frame.audio_data();\r
+ if(!swr_)\r
+ BOOST_THROW_EXCEPTION(std::bad_alloc("swr_"));\r
\r
- std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> audio_resample_buffer;\r
- std::copy(reinterpret_cast<const uint8_t*>(audio_data.data()), \r
- reinterpret_cast<const uint8_t*>(audio_data.data()) + audio_data.size()*4, \r
- std::back_inserter(audio_resample_buffer));\r
- \r
- audio_resample_buffer = swr_->resample(std::move(audio_resample_buffer));\r
- \r
- return byte_vector(audio_resample_buffer.begin(), audio_resample_buffer.end());\r
+ THROW_ON_ERROR2(swr_init(swr_.get()), "[audio_decoder]");\r
+ }\r
+
+ byte_vector buffer(48000);
+
+ const uint8_t *in[] = {reinterpret_cast<const uint8_t*>(frame.audio_data().data())};
+ uint8_t* out[] = {buffer.data()};
+
+ auto channel_samples = swr_convert(swr_.get(), \r
+ out, static_cast<int>(buffer.size()) / c->channels / av_get_bytes_per_sample(c->sample_fmt), \r
+ in, static_cast<int>(frame.audio_data().size()/format_desc_.audio_channels));\r
+\r
+ buffer.resize(channel_samples * c->channels * av_get_bytes_per_sample(c->sample_fmt)); \r
+\r
+ return buffer;\r
}\r
\r
std::shared_ptr<AVPacket> encode_audio_frame(core::const_frame frame)\r
{ \r
+ // TODO: Sometimes audio is missing towards end of resulting file.\r
+\r
auto c = audio_st_->codec;\r
\r
boost::range::push_back(audio_buf_, convert_audio(frame, c));\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">../../stdafx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|x64'">../../StdAfx.h</PrecompiledHeaderFile>\r
</ClCompile>\r
- <ClCompile Include="producer\audio\audio_resampler.cpp">\r
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">../../stdafx.h</PrecompiledHeaderFile>\r
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|x64'">../../StdAfx.h</PrecompiledHeaderFile>\r
- </ClCompile>\r
<ClCompile Include="producer\ffmpeg_producer.cpp">\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|x64'">../StdAfx.h</PrecompiledHeaderFile>\r
<ClInclude Include="ffmpeg.h" />\r
<ClInclude Include="ffmpeg_error.h" />\r
<ClInclude Include="producer\audio\audio_decoder.h" />\r
- <ClInclude Include="producer\audio\audio_resampler.h" />\r
<ClInclude Include="producer\ffmpeg_producer.h" />\r
<ClInclude Include="producer\filter\filter.h" />\r
<ClInclude Include="producer\filter\parallel_yadif.h" />\r
<ClCompile Include="producer\filter\parallel_yadif.cpp">\r
<Filter>source\producer\filter</Filter>\r
</ClCompile>\r
- <ClCompile Include="producer\audio\audio_resampler.cpp">\r
- <Filter>source\producer\audio</Filter>\r
- </ClCompile>\r
<ClCompile Include="producer\util\util.cpp">\r
<Filter>source\producer\util</Filter>\r
</ClCompile>\r
<ClInclude Include="producer\filter\parallel_yadif.h">\r
<Filter>source\producer\filter</Filter>\r
</ClInclude>\r
- <ClInclude Include="producer\audio\audio_resampler.h">\r
- <Filter>source\producer\audio</Filter>\r
- </ClInclude>\r
<ClInclude Include="producer\util\flv.h">\r
<Filter>source\producer\util</Filter>\r
</ClInclude>\r
\r
#include "audio_decoder.h"\r
\r
-#include "audio_resampler.h"\r
-\r
#include "../util/util.h"\r
+#include "../input/input.h"\r
#include "../../ffmpeg_error.h"\r
\r
#include <core/video_format.h>\r
{\r
#include <libavformat/avformat.h>\r
#include <libavcodec/avcodec.h>\r
+ #include <libswresample/swresample.h>\r
}\r
#if defined(_MSC_VER)\r
#pragma warning (pop)\r
\r
namespace caspar { namespace ffmpeg {\r
\r
+uint64_t get_channel_layout(AVCodecContext* dec)\r
+{\r
+ auto layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);\r
+ return layout;\r
+}\r
+\r
struct audio_decoder::impl : boost::noncopyable\r
{ \r
monitor::basic_subject event_subject_;\r
+ input* input_;\r
int index_;\r
const std::shared_ptr<AVCodecContext> codec_context_; \r
const core::video_format_desc format_desc_;\r
\r
- boost::optional<audio_resampler> resampler_;\r
+ std::shared_ptr<SwrContext> swr_;\r
\r
- std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> buffer1_;\r
+ std::vector<uint8_t, tbb::cache_aligned_allocator<int8_t>> buffer_;\r
\r
std::queue<spl::shared_ptr<AVPacket>> packets_;\r
-\r
- const int64_t nb_frames_;\r
- uint32_t file_frame_number_;\r
+ \r
public:\r
- explicit impl() \r
- : index_(0)\r
- , nb_frames_(0)//context->streams[index_]->nb_frames)\r
- , file_frame_number_(0)\r
- { \r
+ impl()\r
+ : input_(nullptr)\r
+ {\r
}\r
\r
- explicit impl(const std::shared_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc) \r
- : format_desc_(format_desc) \r
- , codec_context_(open_codec(*context, AVMEDIA_TYPE_AUDIO, index_))\r
- , resampler_(audio_resampler(format_desc.audio_channels, codec_context_->channels,\r
- format_desc.audio_sample_rate, codec_context_->sample_rate,\r
- AV_SAMPLE_FMT_S32, codec_context_->sample_fmt))\r
- , buffer1_(AVCODEC_MAX_AUDIO_FRAME_SIZE*2)\r
- , nb_frames_(0)//context->streams[index_]->nb_frames)\r
+ explicit impl(input& in, const core::video_format_desc& format_desc) \r
+ : input_(&in)\r
+ , format_desc_(format_desc) \r
+ , codec_context_(open_codec(*input_->context(), AVMEDIA_TYPE_AUDIO, index_))\r
+ , swr_(swr_alloc_set_opts(nullptr,\r
+ av_get_default_channel_layout(format_desc_.audio_channels), AV_SAMPLE_FMT_S32, format_desc_.audio_sample_rate,\r
+ get_channel_layout(codec_context_.get()), codec_context_->sample_fmt, codec_context_->sample_rate,\r
+ 0, nullptr), [](SwrContext* p){swr_free(&p);})\r
+ , buffer_(AVCODEC_MAX_AUDIO_FRAME_SIZE*4)\r
{ \r
- file_frame_number_ = 0; \r
- }\r
+ if(!swr_)\r
+ BOOST_THROW_EXCEPTION(std::bad_alloc("swr_"));\r
\r
- void push(const std::shared_ptr<AVPacket>& packet)\r
- { \r
- if(!packet || !codec_context_)\r
- return;\r
-\r
- if(packet->stream_index == index_ || packet->data == nullptr)\r
- packets_.push(spl::make_shared_ptr(packet));\r
- } \r
- \r
+ THROW_ON_ERROR2(swr_init(swr_.get()), "[audio_decoder]");\r
+ }\r
+ \r
std::shared_ptr<core::audio_buffer> poll()\r
{\r
if(!codec_context_)\r
return empty_audio();\r
\r
- if(packets_.empty())\r
+ std::shared_ptr<AVPacket> packet;\r
+ if(!input_->try_pop_audio(packet))\r
return nullptr;\r
- \r
- auto packet = packets_.front();\r
- \r
- if(packet->data == nullptr)\r
+\r
+ if(packet == flush_packet())\r
{\r
- packets_.pop();\r
avcodec_flush_buffers(codec_context_.get());\r
return nullptr;\r
}\r
-\r
- auto audio = decode(*packet);\r
-\r
- if(packet->size == 0) \r
- packets_.pop();\r
-\r
- return audio;\r
+ else if(packet == eof_packet())\r
+ {\r
+ if(codec_context_->codec->capabilities & CODEC_CAP_DELAY)\r
+ {\r
+ AVPacket pkt; \r
+ av_init_packet(&pkt);\r
+ pkt.data = nullptr;\r
+ pkt.size = 0;\r
+ return decode(pkt);\r
+ }\r
+ return nullptr;\r
+ }\r
+ \r
+ return decode(*packet);\r
}\r
\r
std::shared_ptr<core::audio_buffer> decode(AVPacket& pkt)\r
{ \r
- buffer1_.resize(AVCODEC_MAX_AUDIO_FRAME_SIZE*2);\r
- int written_bytes = static_cast<int>(buffer1_.size()) - FF_INPUT_BUFFER_PADDING_SIZE;\r
+ auto audio = std::make_shared<core::audio_buffer>();\r
\r
- int ret = THROW_ON_ERROR2(avcodec_decode_audio3(codec_context_.get(), reinterpret_cast<int16_t*>(buffer1_.data()), &written_bytes, &pkt), "[audio_decoder]");\r
-\r
- // There might be several frames in one packet.\r
- pkt.size -= ret;\r
- pkt.data += ret;\r
+ while(pkt.size > 0)\r
+ { \r
+ std::shared_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
+\r
+ int frame_finished = 0;\r
+ auto len = THROW_ON_ERROR2(avcodec_decode_audio4(codec_context_.get(), decoded_frame.get(), &frame_finished, &pkt), "[audio_decoder]");\r
+ \r
+ if(len == 0)\r
+ {\r
+ pkt.size = 0;\r
+ continue;\r
+ }\r
+\r
+ pkt.data += len;\r
+ pkt.size -= len;\r
+\r
+ if(!frame_finished)\r
+ return audio;\r
+ \r
+ const uint8_t *in[] = {decoded_frame->data[0]};\r
+ uint8_t* out[] = {buffer_.data()};\r
+\r
+ auto channel_samples = swr_convert(swr_.get(), \r
+ out, static_cast<int>(buffer_.size()) / format_desc_.audio_channels / av_get_bytes_per_sample(AV_SAMPLE_FMT_S32), \r
+ in, decoded_frame->nb_samples);\r
\r
- buffer1_.resize(written_bytes);\r
-\r
- buffer1_ = resampler_->resample(std::move(buffer1_));\r
- \r
- const auto n_samples = buffer1_.size() / av_get_bytes_per_sample(AV_SAMPLE_FMT_S32);\r
- const auto samples = reinterpret_cast<int32_t*>(buffer1_.data());\r
+ auto ptr = reinterpret_cast<int32_t*>(buffer_.data());\r
+ audio->insert(audio->end(), ptr, ptr + channel_samples * format_desc_.audio_channels);\r
+ }\r
\r
event_subject_ << monitor::event("file/audio/sample-rate") % codec_context_->sample_rate\r
<< monitor::event("file/audio/channels") % codec_context_->channels\r
<< monitor::event("file/audio/format") % u8(av_get_sample_fmt_name(codec_context_->sample_fmt))\r
- << monitor::event("file/audio/codec") % u8(codec_context_->codec->long_name);\r
+ << monitor::event("file/audio/codec") % u8(codec_context_->codec->long_name); \r
\r
- file_frame_number_ = static_cast<uint32_t>(pkt.pts);\r
- \r
- return std::make_shared<core::audio_buffer>(samples, samples + n_samples);\r
- }\r
-\r
- bool ready() const\r
- {\r
- return !codec_context_ || !packets_.empty();\r
+ return audio;\r
}\r
\r
- void clear()\r
- {\r
- while(!packets_.empty())\r
- packets_.pop();\r
- }\r
-\r
uint32_t nb_frames() const\r
{\r
- return 0;//std::max<int64_t>(nb_frames_, file_frame_number_);\r
+ return 0;\r
}\r
\r
std::wstring print() const\r
};\r
\r
audio_decoder::audio_decoder() : impl_(new impl()){}\r
-audio_decoder::audio_decoder(const std::shared_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc) : impl_(new impl(context, format_desc)){}\r
+audio_decoder::audio_decoder(input& input, const core::video_format_desc& format_desc) : impl_(new impl(input, format_desc)){}\r
audio_decoder::audio_decoder(audio_decoder&& other) : impl_(std::move(other.impl_)){}\r
audio_decoder& audio_decoder::operator=(audio_decoder&& other){impl_ = std::move(other.impl_); return *this;}\r
-void audio_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}\r
-bool audio_decoder::ready() const{return impl_->ready();}\r
-std::shared_ptr<core::audio_buffer> audio_decoder::poll(){return impl_->poll();}\r
+std::shared_ptr<core::audio_buffer> audio_decoder::operator()(){return impl_->poll();}\r
uint32_t audio_decoder::nb_frames() const{return impl_->nb_frames();}\r
-uint32_t audio_decoder::file_frame_number() const{return impl_->file_frame_number_;}\r
std::wstring audio_decoder::print() const{return impl_->print();}\r
-void audio_decoder::clear(){impl_->clear();}\r
void audio_decoder::subscribe(const monitor::observable::observer_ptr& o){impl_->event_subject_.subscribe(o);}\r
void audio_decoder::unsubscribe(const monitor::observable::observer_ptr& o){impl_->event_subject_.unsubscribe(o);}\r
\r
{\r
public:\r
explicit audio_decoder();\r
- explicit audio_decoder(const std::shared_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc);\r
+ explicit audio_decoder(class input& input, const core::video_format_desc& format_desc);\r
\r
audio_decoder(audio_decoder&& other);\r
audio_decoder& operator=(audio_decoder&& other);\r
\r
- bool ready() const;\r
- void push(const std::shared_ptr<AVPacket>& packet);\r
- std::shared_ptr<core::audio_buffer> poll();\r
+ std::shared_ptr<core::audio_buffer> operator()();\r
\r
uint32_t nb_frames() const;\r
\r
- uint32_t file_frame_number() const;\r
-\r
std::wstring print() const;\r
\r
- void clear();\r
-\r
// monitor::observable\r
\r
void subscribe(const monitor::observable::observer_ptr& o) override;\r
+++ /dev/null
-/*\r
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>\r
-*\r
-* This file is part of CasparCG (www.casparcg.com).\r
-*\r
-* CasparCG is free software: you can redistribute it and/or modify\r
-* it under the terms of the GNU General Public License as published by\r
-* the Free Software Foundation, either version 3 of the License, or\r
-* (at your option) any later version.\r
-*\r
-* CasparCG is distributed in the hope that it will be useful,\r
-* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
-* GNU General Public License for more details.\r
-*\r
-* You should have received a copy of the GNU General Public License\r
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
-*\r
-* Author: Robert Nagy, ronag89@gmail.com\r
-*/\r
-\r
-#include "../../StdAfx.h"\r
-\r
-#include "audio_resampler.h"\r
-\r
-#include <common/except.h>\r
-#include <common/log.h>\r
-\r
-#if defined(_MSC_VER)\r
-#pragma warning (push)\r
-#pragma warning (disable : 4244)\r
-#endif\r
-extern "C" \r
-{\r
- #include <libavcodec/avcodec.h>\r
-}\r
-#if defined(_MSC_VER)\r
-#pragma warning (pop)\r
-#endif\r
-\r
-namespace caspar { namespace ffmpeg {\r
-\r
-struct audio_resampler::impl\r
-{ \r
- std::shared_ptr<ReSampleContext> resampler_;\r
- \r
- std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> copy_buffer_;\r
- std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> buffer2_;\r
-\r
- const int output_channels_;\r
- const AVSampleFormat output_sample_format_;\r
-\r
- const int input_channels_;\r
- const AVSampleFormat input_sample_format_;\r
-\r
- impl(int output_channels, int input_channels, int output_sample_rate, int input_sample_rate, AVSampleFormat output_sample_format, AVSampleFormat input_sample_format)\r
- : output_channels_(output_channels)\r
- , output_sample_format_(output_sample_format)\r
- , input_channels_(input_channels)\r
- , input_sample_format_(input_sample_format)\r
- {\r
- if(input_channels != output_channels || \r
- input_sample_rate != output_sample_rate ||\r
- input_sample_format != output_sample_format)\r
- { \r
- auto resampler = av_audio_resample_init(output_channels, input_channels,\r
- output_sample_rate, input_sample_rate,\r
- output_sample_format, input_sample_format,\r
- 16, 10, 0, 0.8);\r
-\r
- buffer2_.resize(AVCODEC_MAX_AUDIO_FRAME_SIZE*2);\r
-\r
- char sample_fmt_string[200];\r
- av_get_sample_fmt_string(sample_fmt_string, 200, input_sample_format);\r
- \r
- if(resampler)\r
- resampler_.reset(resampler, audio_resample_close);\r
- else\r
- BOOST_THROW_EXCEPTION(caspar_exception());\r
- } \r
- }\r
-\r
- std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> resample(const std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>>& data)\r
- {\r
- if(resampler_ && !data.empty())\r
- {\r
- buffer2_.resize(AVCODEC_MAX_AUDIO_FRAME_SIZE*2);\r
- auto ret = audio_resample(resampler_.get(),\r
- reinterpret_cast<short*>(buffer2_.data()), \r
- const_cast<short*>(reinterpret_cast<const short*>(data.data())), \r
- static_cast<int>(data.size()) / (av_get_bytes_per_sample(input_sample_format_) * input_channels_)); \r
- buffer2_.resize(ret * av_get_bytes_per_sample(output_sample_format_) * output_channels_);\r
- }\r
-\r
- return buffer2_;\r
- }\r
-};\r
-\r
-\r
-audio_resampler::audio_resampler(int output_channels, int input_channels, int output_sample_rate, int input_sample_rate, AVSampleFormat output_sample_format, AVSampleFormat input_sample_format)\r
- : impl_(new impl(output_channels, input_channels, output_sample_rate, input_sample_rate, output_sample_format, input_sample_format)){}\r
-std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> audio_resampler::resample(const std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>>& data){return impl_->resample(std::move(data));}\r
-\r
-}}
\ No newline at end of file
+++ /dev/null
-/*\r
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>\r
-*\r
-* This file is part of CasparCG (www.casparcg.com).\r
-*\r
-* CasparCG is free software: you can redistribute it and/or modify\r
-* it under the terms of the GNU General Public License as published by\r
-* the Free Software Foundation, either version 3 of the License, or\r
-* (at your option) any later version.\r
-*\r
-* CasparCG is distributed in the hope that it will be useful,\r
-* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
-* GNU General Public License for more details.\r
-*\r
-* You should have received a copy of the GNU General Public License\r
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
-*\r
-* Author: Robert Nagy, ronag89@gmail.com\r
-*/\r
-\r
-#pragma once\r
-\r
-#include <memory>\r
-\r
-#include <libavutil/samplefmt.h>\r
-\r
-namespace caspar { namespace ffmpeg {\r
-\r
-class audio_resampler\r
-{\r
-public:\r
- audio_resampler(int output_channels, int input_channels, \r
- int output_sample_rate, int input_sample_rate, \r
- AVSampleFormat output_sample_format, AVSampleFormat input_sample_format);\r
- \r
- std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> resample(const std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>>& data);\r
-private:\r
- struct impl;\r
- std::shared_ptr<impl> impl_;\r
-};\r
-\r
-}}
\ No newline at end of file
graph_->set_color("underflow", diagnostics::color(0.6f, 0.3f, 0.9f)); \r
diagnostics::register_graph(graph_);\r
\r
+ video_decoder_.subscribe(event_subject_);\r
+ audio_decoder_.subscribe(event_subject_);\r
+\r
try\r
{\r
- video_decoder_ = video_decoder(input_.context());\r
- video_decoder_.subscribe(event_subject_);\r
+ video_decoder_ = video_decoder(input_);\r
+ \r
CASPAR_LOG(info) << print() << L" " << video_decoder_.print();\r
}\r
catch(averror_stream_not_found&)\r
\r
try\r
{\r
- audio_decoder_ = audio_decoder(input_.context(), format_desc_);\r
- audio_decoder_.subscribe(event_subject_);\r
+ audio_decoder_ = audio_decoder(input_, format_desc_);\r
+ \r
CASPAR_LOG(info) << print() << L" " << audio_decoder_.print();\r
}\r
catch(averror_stream_not_found&)\r
auto frame = core::draw_frame::late(); \r
if(!muxer_.empty())\r
{\r
- frame = std::move(muxer_.front());\r
+ last_frame_ = frame = std::move(muxer_.front());\r
muxer_.pop();\r
+\r
++frame_number_; \r
}\r
- else if(!input_.eof()) \r
+ else \r
graph_->set_tag("underflow"); \r
\r
graph_->set_value("frame-time", frame_timer.elapsed()*format_desc_.fps*0.5);\r
<< monitor::event("file/fps") % fps_\r
<< monitor::event("file/path") % filename_\r
<< monitor::event("loop") % input_.loop();\r
- \r
- if(frame == core::draw_frame::late())\r
- return input_.eof() ? last_frame() : core::draw_frame::late();\r
- \r
- return last_frame_ = frame;\r
+ \r
+ return frame;\r
}\r
\r
core::draw_frame last_frame() const override\r
void seek(uint32_t target)\r
{\r
muxer_.clear();\r
- video_decoder_.clear();\r
- audio_decoder_.clear();\r
- \r
- target = std::min(target, file_nb_frames()-25);\r
+ \r
+ // TODO: Fix HACK.\r
+\r
+ // BEGIN HACK: There is no way to flush yadif. Need to poll 2 frames.\r
+ if(target > 0)\r
+ target -= 1; \r
+ // END HACK\r
+\r
+ target = std::min(target, file_nb_frames());\r
\r
input_.seek(target);\r
- \r
- decode_next_frame();\r
- decode_next_frame();\r
- decode_next_frame();\r
- decode_next_frame();\r
- decode_next_frame();\r
- decode_next_frame();\r
muxer_.clear();\r
+ \r
+ // BEGIN HACK: There is no way to flush yadif. Need to poll 2 frames.\r
decode_next_frame();\r
+ if(!muxer_.empty())\r
+ muxer_.pop();\r
+ // END HACK\r
+\r
decode_next_frame();\r
\r
last_frame_ = !muxer_.empty() ? muxer_.front() : last_frame_; \r
void decode_next_frame()\r
{\r
for(int n = 0; n < 64 && muxer_.empty(); ++n)\r
- {\r
- std::shared_ptr<AVPacket> pkt;\r
- for(int n = 0; n < 32 && (!video_decoder_.ready() || !audio_decoder_.ready()) && input_.try_pop(pkt); ++n)\r
- {\r
- video_decoder_.push(pkt);\r
- audio_decoder_.push(pkt);\r
- }\r
- \r
+ { \r
std::shared_ptr<AVFrame> video;\r
std::shared_ptr<core::audio_buffer> audio;\r
\r
[&]\r
{\r
if(!muxer_.video_ready())\r
- video = video_decoder_.poll(); \r
+ video = video_decoder_();\r
},\r
[&]\r
{ \r
if(!muxer_.audio_ready())\r
- audio = audio_decoder_.poll(); \r
+ audio = audio_decoder_(); \r
}\r
);\r
\r
#pragma warning (pop)\r
#endif\r
\r
-static const size_t MAX_BUFFER_COUNT = 100;\r
-static const size_t MIN_BUFFER_COUNT = 32;\r
-static const size_t MAX_BUFFER_SIZE = 32 * 1000000;\r
-\r
namespace caspar { namespace ffmpeg {\r
+\r
+static const int MIN_FRAMES = 25;\r
+\r
+class stream\r
+{\r
+ stream(const stream&);\r
+ stream& operator=(const stream&);\r
+\r
+ typedef tbb::concurrent_bounded_queue<std::shared_ptr<AVPacket>>::size_type size_type;\r
+\r
+ int index_;\r
+ tbb::concurrent_bounded_queue<std::shared_ptr<AVPacket>> packets_;\r
+public:\r
+\r
+ stream(int index) \r
+ : index_(index)\r
+ {\r
+ }\r
+ \r
+ void push(const std::shared_ptr<AVPacket>& packet)\r
+ {\r
+ if(packet->stream_index != index_ && packet != flush_packet() && packet != eof_packet())\r
+ return;\r
+\r
+ packets_.push(packet);\r
+ }\r
+\r
+ bool try_pop(std::shared_ptr<AVPacket>& packet)\r
+ {\r
+ return packets_.try_pop(packet);\r
+ }\r
+\r
+ void clear()\r
+ {\r
+ std::shared_ptr<AVPacket> packet;\r
+ while(packets_.try_pop(packet));\r
+ }\r
+\r
+ size_type size() const\r
+ {\r
+ return index_ != -1 ? packets_.size() : std::numeric_limits<size_type>::max();\r
+ }\r
+};\r
\r
struct input::impl : boost::noncopyable\r
{ \r
- const spl::shared_ptr<diagnostics::graph> graph_;\r
+ const spl::shared_ptr<diagnostics::graph> graph_;\r
\r
- const spl::shared_ptr<AVFormatContext> format_context_; // Destroy this last\r
+ const spl::shared_ptr<AVFormatContext> format_context_; // Destroy this last\r
const int default_stream_index_;\r
\r
const std::wstring filename_;\r
tbb::atomic<uint32_t> start_; \r
tbb::atomic<uint32_t> length_;\r
tbb::atomic<bool> loop_;\r
- tbb::atomic<bool> eof_;\r
- uint32_t frame_number_;\r
double fps_;\r
+ uint32_t frame_number_;\r
\r
- tbb::concurrent_bounded_queue<std::shared_ptr<AVPacket>> buffer_;\r
- tbb::atomic<size_t> buffer_size_;\r
- \r
+ stream video_stream_;\r
+ stream audio_stream_;\r
+\r
executor executor_;\r
\r
impl(const spl::shared_ptr<diagnostics::graph> graph, const std::wstring& filename, const bool loop, const uint32_t start, const uint32_t length) \r
, default_stream_index_(av_find_default_stream_index(format_context_.get()))\r
, filename_(filename)\r
, frame_number_(0)\r
- , fps_(read_fps(*format_context_, 25))\r
+ , fps_(read_fps(*format_context_, 0.0))\r
+ , video_stream_(av_find_best_stream(format_context_.get(), AVMEDIA_TYPE_VIDEO, -1, -1, 0, 0))\r
+ , audio_stream_(av_find_best_stream(format_context_.get(), AVMEDIA_TYPE_AUDIO, -1, -1, 0, 0))\r
, executor_(print())\r
{ \r
start_ = start;\r
length_ = length;\r
loop_ = loop;\r
- eof_ = false;\r
- buffer_size_ = 0;\r
\r
if(start_ > 0) \r
seek(start_, false);\r
\r
graph_->set_color("seek", diagnostics::color(1.0f, 0.5f, 0.0f)); \r
- graph_->set_color("buffer-count", diagnostics::color(0.7f, 0.4f, 0.4f));\r
- graph_->set_color("buffer-size", diagnostics::color(1.0f, 1.0f, 0.0f)); \r
-\r
+ graph_->set_color("audio-buffer", diagnostics::color(0.7f, 0.4f, 0.4f));\r
+ graph_->set_color("video-buffer", diagnostics::color(1.0f, 1.0f, 0.0f)); \r
+ \r
tick();\r
}\r
\r
- bool try_pop(std::shared_ptr<AVPacket>& packet)\r
- {\r
- auto result = buffer_.try_pop(packet);\r
+ bool try_pop_video(std::shared_ptr<AVPacket>& packet)\r
+ { \r
+ bool result = video_stream_.try_pop(packet);\r
+ if(result)\r
+ tick();\r
\r
+ graph_->set_value("video-buffer", std::min(1.0, static_cast<double>(video_stream_.size()/MIN_FRAMES)));\r
+ \r
+ return result;\r
+ }\r
+ \r
+ bool try_pop_audio(std::shared_ptr<AVPacket>& packet)\r
+ { \r
+ bool result = audio_stream_.try_pop(packet);\r
if(result)\r
- {\r
- if(packet)\r
- buffer_size_ -= packet->size;\r
tick();\r
- }\r
+ \r
+ graph_->set_value("audio-buffer", std::min(1.0, static_cast<double>(audio_stream_.size()/MIN_FRAMES)));\r
\r
- graph_->set_value("buffer-size", (static_cast<double>(buffer_size_)+0.001)/MAX_BUFFER_SIZE);\r
- graph_->set_value("buffer-count", (static_cast<double>(buffer_.size()+0.001)/MAX_BUFFER_COUNT));\r
- \r
return result;\r
}\r
\r
void seek(uint32_t target, bool clear)\r
{\r
executor_.invoke([=]\r
- {\r
- if(clear)\r
- {\r
- std::shared_ptr<AVPacket> packet;\r
- while(buffer_.try_pop(packet) && packet)\r
- buffer_size_ -= packet->size;\r
- }\r
- \r
+ { \r
CASPAR_LOG(debug) << print() << " Seeking: " << target;\r
\r
int flags = AVSEEK_FLAG_FRAME;\r
\r
THROW_ON_ERROR2(avformat_seek_file(format_context_.get(), default_stream_index_, std::numeric_limits<int64_t>::min(), fixed_target, fixed_target, 0), print()); \r
\r
- auto flush_packet = create_packet();\r
- flush_packet->data = nullptr;\r
- flush_packet->size = 0;\r
- buffer_.push(flush_packet);\r
+ if(clear)\r
+ {\r
+ video_stream_.clear();\r
+ audio_stream_.clear();\r
+ }\r
+\r
+ video_stream_.push(flush_packet());\r
+ audio_stream_.push(flush_packet());\r
\r
tick();\r
}, task_priority::high_priority);\r
\r
bool full() const\r
{\r
- return (buffer_size_ > MAX_BUFFER_SIZE || buffer_.size() > MAX_BUFFER_COUNT) && buffer_.size() > MIN_BUFFER_COUNT;\r
+ return video_stream_.size() > MIN_FRAMES && audio_stream_.size() > MIN_FRAMES;\r
}\r
\r
void tick()\r
\r
if(is_eof(ret)) \r
{\r
- frame_number_ = 0;\r
+ video_stream_.push(eof_packet());\r
+ audio_stream_.push(eof_packet());\r
\r
if(loop_)\r
{\r
seek(start_, false);\r
graph_->set_tag("seek"); \r
- CASPAR_LOG(trace) << print() << " Looping."; \r
- }\r
- else\r
- {\r
- auto flush_packet = create_packet();\r
- flush_packet->data = nullptr;\r
- flush_packet->size = 0;\r
- buffer_.push(flush_packet);\r
-\r
- eof_ = true;\r
}\r
}\r
else\r
{ \r
- eof_ = false;\r
-\r
THROW_ON_ERROR(ret, "av_read_frame", print());\r
\r
THROW_ON_ERROR2(av_dup_packet(packet.get()), print());\r
packet->size = size;\r
packet->data = data; \r
});\r
+ \r
+ auto stream_time_base = format_context_->streams[packet->stream_index]->time_base;\r
+ auto packet_frame_number = static_cast<uint32_t>((static_cast<double>(packet->pts * stream_time_base.num)/stream_time_base.den)*fps_);\r
\r
- auto time_base = format_context_->streams[packet->stream_index]->time_base;\r
- packet->pts = static_cast<uint64_t>((static_cast<double>(packet->pts * time_base.num)/time_base.den)*fps_);\r
- frame_number_ = static_cast<uint32_t>(packet->pts);\r
-\r
- if(frame_number_ <= frame_number_)\r
+ if(packet->stream_index == default_stream_index_)\r
+ frame_number_ = packet_frame_number;\r
+ \r
+ if(packet_frame_number >= start_ && packet_frame_number < length_)\r
{\r
- buffer_.try_push(packet);\r
- buffer_size_ += packet->size;\r
- \r
- graph_->set_value("buffer-size", (static_cast<double>(buffer_size_)+0.001)/MAX_BUFFER_SIZE);\r
- graph_->set_value("buffer-count", (static_cast<double>(buffer_.size()+0.001)/MAX_BUFFER_COUNT));\r
+ video_stream_.push(packet);\r
+ audio_stream_.push(packet);\r
+ \r
+ graph_->set_value("video-buffer", std::min(1.0, static_cast<double>(video_stream_.size()/MIN_FRAMES)));\r
+ graph_->set_value("audio-buffer", std::min(1.0, static_cast<double>(audio_stream_.size()/MIN_FRAMES)));\r
}\r
- } \r
\r
- if(!eof_)\r
tick(); \r
+ } \r
}\r
catch(...)\r
{\r
\r
input::input(const spl::shared_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, uint32_t start, uint32_t length) \r
: impl_(new impl(graph, filename, loop, start, length)){}\r
-bool input::eof() const {return impl_->eof_;}\r
-bool input::try_pop(std::shared_ptr<AVPacket>& packet){return impl_->try_pop(packet);}\r
+bool input::try_pop_video(std::shared_ptr<AVPacket>& packet){return impl_->try_pop_video(packet);}\r
+bool input::try_pop_audio(std::shared_ptr<AVPacket>& packet){return impl_->try_pop_audio(packet);}\r
spl::shared_ptr<AVFormatContext> input::context(){return impl_->format_context_;}\r
void input::loop(bool value){impl_->loop_ = value;}\r
bool input::loop() const{return impl_->loop_;}\r
public:\r
explicit input(const spl::shared_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, uint32_t start, uint32_t length);\r
\r
- bool try_pop(std::shared_ptr<AVPacket>& packet);\r
- bool eof() const;\r
+ bool try_pop_video(std::shared_ptr<AVPacket>& packet);\r
+ bool try_pop_audio(std::shared_ptr<AVPacket>& packet);\r
+\r
void loop(bool value);\r
bool loop() const;\r
+\r
void start(uint32_t value);\r
uint32_t start() const;\r
+\r
void length(uint32_t value);\r
uint32_t length() const;\r
+\r
void seek(uint32_t target);\r
\r
spl::shared_ptr<AVFormatContext> context();\r
\r
while(!frame_buffer_.empty())\r
frame_buffer_.pop();\r
+\r
+ filter_ = filter(filter_.filter_str());\r
}\r
};\r
\r
return video;\r
}\r
\r
+spl::shared_ptr<AVPacket> flush_packet()\r
+{\r
+ static spl::shared_ptr<AVPacket> pkt(new AVPacket());\r
+ return pkt;\r
+}\r
+\r
+spl::shared_ptr<AVPacket> eof_packet()\r
+{\r
+ static spl::shared_ptr<AVPacket> pkt(new AVPacket());\r
+ return pkt;\r
+}\r
+\r
+\r
core::field_mode get_mode(const AVFrame& frame)\r
{\r
if(!frame.interlaced_frame)\r
std::shared_ptr<AVFrame> flush_video();\r
std::shared_ptr<AVFrame> empty_video();\r
\r
+spl::shared_ptr<AVPacket> flush_packet(); \r
+spl::shared_ptr<AVPacket> eof_packet(); \r
+\r
// Utils\r
\r
core::field_mode get_mode(const AVFrame& frame);\r
#include "video_decoder.h"\r
\r
#include "../util/util.h"\r
+#include "../input/input.h"\r
\r
#include "../../ffmpeg_error.h"\r
\r
struct video_decoder::impl : boost::noncopyable\r
{\r
monitor::basic_subject event_subject_;\r
+ input* input_;\r
int index_;\r
const std::shared_ptr<AVCodecContext> codec_context_;\r
\r
const int width_;\r
const int height_;\r
bool is_progressive_;\r
-\r
uint32_t file_frame_number_;\r
+ double fps_;\r
\r
public:\r
explicit impl() \r
- : index_(0)\r
+ : input_(nullptr)\r
, nb_frames_(0)\r
, width_(0)\r
, height_(0)\r
- , is_progressive_(true)\r
, file_frame_number_(0)\r
+ , fps_(0.0)\r
{\r
}\r
\r
- explicit impl(const spl::shared_ptr<AVFormatContext>& context) \r
- : codec_context_(open_codec(*context, AVMEDIA_TYPE_VIDEO, index_))\r
- , nb_frames_(static_cast<uint32_t>(context->streams[index_]->nb_frames))\r
+ explicit impl(input& in) \r
+ : input_(&in)\r
+ , codec_context_(open_codec(*input_->context(), AVMEDIA_TYPE_VIDEO, index_))\r
+ , nb_frames_(static_cast<uint32_t>(input_->context()->streams[index_]->nb_frames))\r
, width_(codec_context_->width)\r
, height_(codec_context_->height)\r
+ , file_frame_number_(0)\r
+ , fps_(read_fps(*input_->context(), 0.0))\r
{\r
- file_frame_number_ = 0;\r
- }\r
-\r
- void push(const std::shared_ptr<AVPacket>& packet)\r
- {\r
- if(!packet || !codec_context_)\r
- return;\r
-\r
- if(packet->stream_index == index_ || packet->data == nullptr)\r
- packets_.push(spl::make_shared_ptr(packet));\r
}\r
-\r
+ \r
std::shared_ptr<AVFrame> poll()\r
- { \r
+ { \r
if(!codec_context_)\r
return empty_video();\r
\r
- if(packets_.empty())\r
+ std::shared_ptr<AVPacket> packet;\r
+ if(!input_->try_pop_video(packet))\r
return nullptr;\r
- \r
- auto packet = packets_.front();\r
- \r
- if(packet->data == nullptr)\r
- { \r
+\r
+ if(packet == flush_packet())\r
+ {\r
+ avcodec_flush_buffers(codec_context_.get());\r
+ return nullptr;\r
+ }\r
+ else if(packet == eof_packet())\r
+ {\r
if(codec_context_->codec->capabilities & CODEC_CAP_DELAY)\r
{\r
- auto video = decode(*packet);\r
- if(video)\r
- return video;\r
+ AVPacket pkt; \r
+ av_init_packet(&pkt);\r
+ pkt.data = nullptr;\r
+ pkt.size = 0;\r
+ return decode(pkt);\r
}\r
- \r
- packets_.pop();\r
- avcodec_flush_buffers(codec_context_.get()); \r
- return nullptr; \r
+ return nullptr;\r
}\r
- \r
- packets_.pop();\r
- return decode(*packet); \r
+ \r
+ return decode(*packet);\r
}\r
\r
std::shared_ptr<AVFrame> decode(AVPacket& pkt)\r
\r
if(frame_finished == 0) \r
return nullptr;\r
+ \r
+ auto stream_time_base = input_->context()->streams[pkt.stream_index]->time_base;\r
+ auto packet_frame_number = static_cast<uint32_t>((static_cast<double>(pkt.pts * stream_time_base.num)/stream_time_base.den)*fps_);\r
+\r
+ file_frame_number_ = packet_frame_number;\r
\r
is_progressive_ = !decoded_frame->interlaced_frame;\r
\r
<< monitor::event("file/video/field") % u8(!decoded_frame->interlaced_frame ? "progressive" : (decoded_frame->top_field_first ? "upper" : "lower"))\r
<< monitor::event("file/video/codec") % u8(codec_context_->codec->long_name);\r
\r
- file_frame_number_ = static_cast<uint32_t>(pkt.pts);\r
-\r
- decoded_frame->pts = file_frame_number_;\r
-\r
return decoded_frame;\r
}\r
\r
- bool ready() const\r
- {\r
- return !codec_context_ || !packets_.empty();\r
- }\r
-\r
- void clear()\r
- {\r
- while(!packets_.empty())\r
- packets_.pop();\r
- }\r
-\r
uint32_t nb_frames() const\r
{\r
- return std::max<uint32_t>(nb_frames_, file_frame_number_);\r
+ return std::max(nb_frames_, file_frame_number_);\r
}\r
\r
std::wstring print() const\r
};\r
\r
video_decoder::video_decoder() : impl_(new impl()){}\r
-video_decoder::video_decoder(const spl::shared_ptr<AVFormatContext>& context) : impl_(new impl(context)){}\r
+video_decoder::video_decoder(input& in) : impl_(new impl(in)){}\r
video_decoder::video_decoder(video_decoder&& other) : impl_(std::move(other.impl_)){}\r
video_decoder& video_decoder::operator=(video_decoder&& other){impl_ = std::move(other.impl_); return *this;}\r
-void video_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}\r
-std::shared_ptr<AVFrame> video_decoder::poll(){return impl_->poll();}\r
-bool video_decoder::ready() const{return impl_->ready();}\r
+std::shared_ptr<AVFrame> video_decoder::operator()(){return impl_->poll();}\r
int video_decoder::width() const{return impl_->width_;}\r
int video_decoder::height() const{return impl_->height_;}\r
uint32_t video_decoder::nb_frames() const{return impl_->nb_frames();}\r
uint32_t video_decoder::file_frame_number() const{return impl_->file_frame_number_;}\r
-bool video_decoder::is_progressive() const{return impl_->is_progressive_;}\r
+bool video_decoder::is_progressive() const{return impl_->is_progressive_;}\r
std::wstring video_decoder::print() const{return impl_->print();}\r
-void video_decoder::clear(){impl_->clear();}\r
void video_decoder::subscribe(const monitor::observable::observer_ptr& o){impl_->event_subject_.subscribe(o);}\r
void video_decoder::unsubscribe(const monitor::observable::observer_ptr& o){impl_->event_subject_.unsubscribe(o);}\r
\r
{\r
public:\r
explicit video_decoder();\r
- explicit video_decoder(const spl::shared_ptr<AVFormatContext>& context);\r
+ explicit video_decoder(class input& input);\r
\r
video_decoder(video_decoder&& other);\r
video_decoder& operator=(video_decoder&& other);\r
\r
- bool ready() const;\r
- void push(const std::shared_ptr<AVPacket>& packet);\r
- std::shared_ptr<AVFrame> poll();\r
+ std::shared_ptr<AVFrame> operator()();\r
\r
int width() const;\r
int height() const;\r
bool is_progressive() const;\r
\r
std::wstring print() const;\r
-\r
- void clear();\r
- \r
+ \r
// monitor::observable\r
\r
void subscribe(const monitor::observable::observer_ptr& o) override;\r
<ForcedIncludeFiles>common/compiler/vs/disable_silly_warnings.h</ForcedIncludeFiles>\r
</ClCompile>\r
<Link>\r
- <AdditionalDependencies>alibcof64.lib;sfml-system-d.lib;sfml-window-d.lib;sfml-graphics-d.lib;sfml-audio-d.lib;Winmm.lib;Ws2_32.lib;avformat.lib;avcodec.lib;avutil.lib;avfilter.lib;swscale.lib;tbb.lib;OpenGL32.lib;FreeImaged.lib;glew32.lib;freetype248_D.lib;openal32.lib</AdditionalDependencies>\r
+ <AdditionalDependencies>alibcof64.lib;sfml-system-d.lib;sfml-window-d.lib;sfml-graphics-d.lib;sfml-audio-d.lib;Winmm.lib;Ws2_32.lib;avformat.lib;avcodec.lib;avutil.lib;avfilter.lib;swscale.lib;swresample.lib;tbb.lib;OpenGL32.lib;FreeImaged.lib;glew32.lib;freetype248_D.lib;openal32.lib</AdditionalDependencies>\r
<Version>\r
</Version>\r
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\r
</ClCompile>\r
<Link>\r
<OptimizeReferences>true</OptimizeReferences>\r
- <AdditionalDependencies>alibcof64.lib;sfml-system.lib;sfml-window.lib;sfml-graphics.lib;Winmm.lib;Ws2_32.lib;avformat.lib;avcodec.lib;avutil.lib;avfilter.lib;swscale.lib;tbb.lib;OpenGL32.lib;glew32.lib;freetype248.lib;openal32.lib;freeimage.lib</AdditionalDependencies>\r
+ <AdditionalDependencies>alibcof64.lib;sfml-system.lib;sfml-window.lib;sfml-graphics.lib;Winmm.lib;Ws2_32.lib;avformat.lib;avcodec.lib;avutil.lib;avfilter.lib;swscale.lib;swresample.lib;tbb.lib;OpenGL32.lib;glew32.lib;freetype248.lib;openal32.lib;freeimage.lib</AdditionalDependencies>\r
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\r
<GenerateDebugInformation>true</GenerateDebugInformation>\r
<ProgramDatabaseFile>$(TargetDir)$(TargetName).pdb</ProgramDatabaseFile>\r