tick_timer_.restart();\r
\r
boost::timer frame_timer; \r
+ \r
+ // Video\r
\r
- // PUSH\r
-\r
- void* bytes = nullptr;\r
- if(FAILED(video->GetBytes(&bytes)) || !bytes)\r
+ void* video_bytes = nullptr;\r
+ if(FAILED(video->GetBytes(&video_bytes)) || !video_bytes)\r
return S_OK;\r
\r
- std::shared_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free); \r
- avcodec_get_frame_defaults(av_frame.get());\r
+ auto video_frame = ffmpeg::create_frame();\r
\r
- av_frame->data[0] = reinterpret_cast<uint8_t*>(bytes);\r
- av_frame->linesize[0] = video->GetRowBytes(); \r
- av_frame->format = PIX_FMT_UYVY422;\r
- av_frame->width = video->GetWidth();\r
- av_frame->height = video->GetHeight();\r
- av_frame->interlaced_frame = in_format_desc_.field_mode != core::field_mode::progressive;\r
- av_frame->top_field_first = in_format_desc_.field_mode == core::field_mode::upper ? 1 : 0;\r
+ video_frame->data[0] = reinterpret_cast<uint8_t*>(video_bytes);\r
+ video_frame->linesize[0] = video->GetRowBytes(); \r
+ video_frame->format = PIX_FMT_UYVY422;\r
+ video_frame->width = video->GetWidth();\r
+ video_frame->height = video->GetHeight();\r
+ video_frame->interlaced_frame = in_format_desc_.field_mode != core::field_mode::progressive;\r
+ video_frame->top_field_first = in_format_desc_.field_mode == core::field_mode::upper ? 1 : 0;\r
\r
event_subject_ << monitor::event("file/name") % model_name_\r
<< monitor::event("file/path") % device_index_\r
<< monitor::event("file/video/width") % video->GetWidth()\r
<< monitor::event("file/video/height") % video->GetHeight()\r
- << monitor::event("file/video/field") % u8(!av_frame->interlaced_frame ? "progressive" : (av_frame->top_field_first ? "upper" : "lower"))\r
+ << monitor::event("file/video/field") % u8(!video_frame->interlaced_frame ? "progressive" : (video_frame->top_field_first ? "upper" : "lower"))\r
<< monitor::event("file/audio/sample-rate") % 48000\r
<< monitor::event("file/audio/channels") % 2\r
<< monitor::event("file/audio/format") % u8(av_get_sample_fmt_name(AV_SAMPLE_FMT_S32))\r
<< monitor::event("file/fps") % in_format_desc_.fps;\r
\r
- std::shared_ptr<core::audio_buffer> audio_buffer;\r
+ // Audio\r
\r
- // It is assumed that audio is always equal or ahead of video.\r
- if(audio && SUCCEEDED(audio->GetBytes(&bytes)) && bytes)\r
- {\r
- auto sample_frame_count = audio->GetSampleFrameCount();\r
- auto audio_data = reinterpret_cast<int32_t*>(bytes);\r
- audio_buffer = std::make_shared<core::audio_buffer>(audio_data, audio_data + sample_frame_count*out_format_desc_.audio_channels);\r
- }\r
- else \r
- audio_buffer = std::make_shared<core::audio_buffer>(audio_cadence_.front(), 0);\r
+ std::shared_ptr<core::audio_buffer> audio_buffer;\r
\r
+ void* audio_bytes = nullptr;\r
+ if(FAILED(audio->GetBytes(&audio_bytes)) || !audio_bytes)\r
+ return S_OK;\r
+ \r
+ auto audio_frame = ffmpeg::create_frame();\r
+\r
+ audio_frame->data[0] = reinterpret_cast<uint8_t*>(audio_bytes);\r
+ audio_frame->linesize[0] = audio->GetSampleFrameCount()*out_format_desc_.audio_channels*sizeof(int32_t);\r
+ audio_frame->nb_samples = audio->GetSampleFrameCount();\r
+ audio_frame->format = AV_SAMPLE_FMT_S32;\r
+ \r
// Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)\r
// This cadence fills the audio mixer most optimally.\r
\r
- sync_buffer_.push_back(audio_buffer->size()); \r
+ sync_buffer_.push_back(audio->GetSampleFrameCount()*out_format_desc_.audio_channels); \r
if(!boost::range::equal(sync_buffer_, audio_cadence_))\r
{\r
CASPAR_LOG(trace) << print() << L" Syncing audio.";\r
return S_OK;\r
}\r
- \r
- muxer_.push(av_frame); \r
- muxer_.push(audio_buffer);\r
- \r
boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1);\r
\r
+ // PUSH\r
+\r
+ muxer_.push(video_frame); \r
+ muxer_.push(audio_frame); \r
+ \r
// POLL\r
\r
auto frame = core::draw_frame::late();\r
THROW_ON_ERROR2(swr_init(swr_.get()), "[audio_decoder]");\r
}\r
\r
- std::shared_ptr<core::audio_buffer> poll()\r
+ std::shared_ptr<AVFrame> poll()\r
{ \r
if(!codec_context_)\r
- return empty_audio();\r
+ return create_frame();\r
\r
if(!current_packet_ && !input_->try_pop_audio(current_packet_))\r
return nullptr;\r
\r
- std::shared_ptr<core::audio_buffer> audio;\r
+ std::shared_ptr<AVFrame> audio;\r
\r
if(!current_packet_) \r
{\r
return audio ? audio : poll();\r
}\r
\r
- std::shared_ptr<core::audio_buffer> decode(AVPacket& pkt)\r
+ std::shared_ptr<AVFrame> decode(AVPacket& pkt)\r
{ \r
- std::shared_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
+ auto frame = create_frame();\r
\r
int got_frame = 0;\r
- auto len = THROW_ON_ERROR2(avcodec_decode_audio4(codec_context_.get(), decoded_frame.get(), &got_frame, &pkt), "[audio_decoder]");\r
+ auto len = THROW_ON_ERROR2(avcodec_decode_audio4(codec_context_.get(), frame.get(), &got_frame, &pkt), "[audio_decoder]");\r
\r
if(len == 0)\r
{\r
if(!got_frame)\r
return nullptr;\r
\r
- const uint8_t *in[] = {decoded_frame->data[0]};\r
+ const uint8_t *in[] = {frame->data[0]};\r
uint8_t* out[] = {buffer_.data()};\r
\r
auto channel_samples = swr_convert(swr_.get(), \r
out, static_cast<int>(buffer_.size()) / format_desc_.audio_channels / av_get_bytes_per_sample(AV_SAMPLE_FMT_S32), \r
- in, decoded_frame->nb_samples);\r
- \r
- auto ptr = reinterpret_cast<int32_t*>(buffer_.data());\r
- \r
+ in, frame->nb_samples); \r
+\r
+ frame->data[0] = buffer_.data();\r
+ frame->linesize[0] = channel_samples * format_desc_.audio_channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S32);\r
+ frame->nb_samples = channel_samples;\r
+ frame->format = AV_SAMPLE_FMT_S32;\r
+ \r
event_subject_ << monitor::event("file/audio/sample-rate") % codec_context_->sample_rate\r
<< monitor::event("file/audio/channels") % codec_context_->channels\r
<< monitor::event("file/audio/format") % u8(av_get_sample_fmt_name(codec_context_->sample_fmt))\r
- << monitor::event("file/audio/codec") % u8(codec_context_->codec->long_name); \r
+ << monitor::event("file/audio/codec") % u8(codec_context_->codec->long_name); \r
\r
- return std::make_shared<core::audio_buffer>(ptr, ptr + channel_samples * format_desc_.audio_channels);\r
+ return frame;\r
}\r
\r
uint32_t nb_frames() const\r
audio_decoder::audio_decoder(input& input, const core::video_format_desc& format_desc) : impl_(new impl(input, format_desc)){}\r
audio_decoder::audio_decoder(audio_decoder&& other) : impl_(std::move(other.impl_)){}\r
audio_decoder& audio_decoder::operator=(audio_decoder&& other){impl_ = std::move(other.impl_); return *this;}\r
-std::shared_ptr<core::audio_buffer> audio_decoder::operator()(){return impl_->poll();}\r
+std::shared_ptr<AVFrame> audio_decoder::operator()(){return impl_->poll();}\r
uint32_t audio_decoder::nb_frames() const{return impl_->nb_frames();}\r
std::wstring audio_decoder::print() const{return impl_->print();}\r
void audio_decoder::subscribe(const monitor::observable::observer_ptr& o){impl_->event_subject_.subscribe(o);}\r
audio_decoder(audio_decoder&& other);\r
audio_decoder& operator=(audio_decoder&& other);\r
\r
- std::shared_ptr<core::audio_buffer> operator()();\r
+ std::shared_ptr<AVFrame> operator()();\r
\r
uint32_t nb_frames() const;\r
\r
// This cadence fills the audio mixer most optimally.\r
boost::range::rotate(audio_cadence_, std::end(audio_cadence_)-1);\r
}\r
+ \r
+ void push(const std::shared_ptr<AVFrame>& frame)\r
+ {\r
+ if(frame->nb_samples > 0)\r
+ push_audio(frame);\r
+ else\r
+ push_video(frame);\r
+ }\r
\r
- void push(const std::shared_ptr<AVFrame>& video)\r
+ void push_video(const std::shared_ptr<AVFrame>& video)\r
{ \r
- if(video == empty_video())\r
+ if(!video->data)\r
{\r
auto empty_frame = frame_factory_->create_frame(this, core::pixel_format_desc(core::pixel_format::invalid));\r
video_stream_.push(std::move(empty_frame));\r
merge();\r
}\r
\r
- void push(const std::shared_ptr<core::audio_buffer>& audio)\r
+ void push_audio(const std::shared_ptr<AVFrame>& audio)\r
{\r
- if(audio == empty_audio()) \r
+ if(!audio->data) \r
boost::range::push_back(audio_stream_, core::audio_buffer(audio_cadence_.front(), 0)); \r
else if(audio) \r
- boost::range::push_back(audio_stream_, *audio); \r
+ {\r
+ auto ptr = reinterpret_cast<int32_t*>(audio->data[0]);\r
+ audio_stream_.insert(audio_stream_.end(), ptr, ptr + audio->linesize[0]/sizeof(int32_t));\r
+ }\r
\r
merge();\r
}\r
\r
frame_muxer::frame_muxer(double in_fps, const spl::shared_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, const std::wstring& filter)\r
: impl_(new impl(in_fps, frame_factory, format_desc, filter)){}\r
-void frame_muxer::push(const std::shared_ptr<AVFrame>& video_frame){impl_->push(video_frame);}\r
-void frame_muxer::push(const std::shared_ptr<core::audio_buffer>& audio_samples){return impl_->push(audio_samples);}\r
+void frame_muxer::push(const std::shared_ptr<AVFrame>& frame){impl_->push(frame);}\r
bool frame_muxer::empty() const{return impl_->empty();}\r
core::draw_frame frame_muxer::front() const{return impl_->front();}\r
void frame_muxer::pop(){return impl_->pop();}\r
public:\r
frame_muxer(double in_fps, const spl::shared_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, const std::wstring& filter = L"");\r
\r
- void push(const std::shared_ptr<AVFrame>& video_frame);\r
- void push(const std::shared_ptr<core::audio_buffer>& audio_samples);\r
+ void push(const std::shared_ptr<AVFrame>& frame);\r
\r
bool video_ready() const;\r
bool audio_ready() const;\r
\r
namespace caspar { namespace ffmpeg {\r
\r
-std::shared_ptr<core::audio_buffer> empty_audio()\r
-{\r
- static std::shared_ptr<core::audio_buffer> audio(new core::audio_buffer());\r
- return audio;\r
-}\r
-\r
-std::shared_ptr<AVFrame> empty_video()\r
-{\r
- static std::shared_ptr<AVFrame> video(avcodec_alloc_frame(), av_free);\r
- return video;\r
-}\r
-\r
core::field_mode get_mode(const AVFrame& frame)\r
{\r
if(!frame.interlaced_frame)\r
return packet;\r
}\r
\r
+spl::shared_ptr<AVFrame> create_frame()\r
+{ \r
+ spl::shared_ptr<AVFrame> frame(avcodec_alloc_frame(), av_free);\r
+ avcodec_get_frame_defaults(frame.get());\r
+ return frame;\r
+}\r
+\r
spl::shared_ptr<AVCodecContext> open_codec(AVFormatContext& context, enum AVMediaType type, int& index)\r
{ \r
AVCodec* decoder;\r
\r
namespace caspar { namespace ffmpeg {\r
\r
-std::shared_ptr<core::audio_buffer> empty_audio();\r
-std::shared_ptr<AVFrame> empty_video(); \r
-\r
// Utils\r
\r
core::field_mode get_mode(const AVFrame& frame);\r
-core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>& decoded_frame, double fps, core::frame_factory& frame_factory);\r
+core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>& decoded_frame, double fps, core::frame_factory& frame_factory);\r
spl::shared_ptr<AVFrame> make_av_frame(core::mutable_frame& frame);\r
spl::shared_ptr<AVFrame> make_av_frame(core::const_frame& frame);\r
spl::shared_ptr<AVFrame> make_av_frame(std::array<uint8_t*, 4> data, const core::pixel_format_desc& pix_desc);\r
core::pixel_format_desc pixel_format_desc(PixelFormat pix_fmt, int width, int height);\r
\r
spl::shared_ptr<AVPacket> create_packet();\r
+spl::shared_ptr<AVFrame> create_frame();\r
\r
spl::shared_ptr<AVCodecContext> open_codec(AVFormatContext& context, enum AVMediaType type, int& index);\r
spl::shared_ptr<AVFormatContext> open_input(const std::wstring& filename);\r
std::shared_ptr<AVFrame> poll()\r
{ \r
if(!codec_context_)\r
- return empty_video();\r
+ return create_frame();\r
\r
if(!current_packet_ && !input_->try_pop_video(current_packet_))\r
return nullptr;\r
\r
std::shared_ptr<AVFrame> decode(AVPacket& pkt)\r
{\r
- auto frame = std::shared_ptr<AVFrame>(avcodec_alloc_frame(), av_free);\r
+ auto frame = create_frame();\r
\r
int got_frame = 0;\r
auto len = THROW_ON_ERROR2(avcodec_decode_video2(codec_context_.get(), frame.get(), &got_frame, &pkt), "[video_decocer]");\r
<screen>\r
<device>1</device>\r
</screen>\r
+ <system-audio></system-audio>\r
+ </consumers>\r
+ </channel>\r
+ <channel>\r
+ <video-mode>720p5000</video-mode>\r
+ <consumers>\r
+ <decklink>\r
+ <device>1</device>\r
+ </decklink>\r
</consumers>\r
</channel>\r
</channels>\r