\r
LOADBG [channel:int]{-[layer:int]|-0} [clip:string] {[loop:LOOP]}\r
{[transition:CUT,MIX,PUSH,WIPE] [duration:uint] {[tween:string]|linear} \r
- {[direction:LEFT,RIGHT]|RIGHT}|CUT 0} {([start_frame:uint]{,[end_frame:uint]})|(0)}\r
+ {[direction:LEFT,RIGHT]|RIGHT}|CUT 0} {([start:uint]{,[length:uint]})|(0)}\r
\r
Description: \r
\r
<ClInclude Include="producer\audio\audio_decoder.h" />\r
<ClInclude Include="producer\ffmpeg_producer.h" />\r
<ClInclude Include="producer\input.h" />\r
+ <ClInclude Include="producer\packet.h" />\r
<ClInclude Include="producer\video\video_decoder.h" />\r
<ClInclude Include="StdAfx.h" />\r
</ItemGroup>\r
<ClInclude Include="StdAfx.h" />\r
<ClInclude Include="ffmpeg.h" />\r
<ClInclude Include="ffmpeg_error.h" />\r
+ <ClInclude Include="producer\packet.h">\r
+ <Filter>producer</Filter>\r
+ </ClInclude>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
}\r
}\r
\r
- std::vector<std::vector<short>> execute(const std::shared_ptr<aligned_buffer>& audio_packet)\r
- { \r
- if(!audio_packet)\r
- return std::vector<std::vector<short>>();\r
+ std::vector<std::vector<short>> execute(const packet& audio_packet)\r
+ { \r
+ std::vector<std::vector<short>> result;\r
\r
- if(audio_packet->empty()) // Need to flush\r
+ switch(audio_packet.type)\r
{\r
+ case flush_packet:\r
avcodec_flush_buffers(codec_context_);\r
- return std::vector<std::vector<short>>();\r
- }\r
-\r
- auto s = current_chunk_.size();\r
- current_chunk_.resize(s + 4*format_desc_.audio_sample_rate*2+FF_INPUT_BUFFER_PADDING_SIZE/2);\r
+ break;\r
+ case data_packet:\r
+ auto s = current_chunk_.size();\r
+ current_chunk_.resize(s + 4*format_desc_.audio_sample_rate*2+FF_INPUT_BUFFER_PADDING_SIZE/2);\r
\r
- int written_bytes = (current_chunk_.size() - s)*2 - FF_INPUT_BUFFER_PADDING_SIZE;\r
- const int errn = avcodec_decode_audio2(codec_context_, ¤t_chunk_[s], &written_bytes, audio_packet->data(), audio_packet->size());\r
- if(errn < 0)\r
- { \r
- BOOST_THROW_EXCEPTION(\r
- invalid_operation() <<\r
- boost::errinfo_api_function("avcodec_decode_audio2") <<\r
- boost::errinfo_errno(AVUNERROR(errn)));\r
- }\r
+ int written_bytes = (current_chunk_.size() - s)*2 - FF_INPUT_BUFFER_PADDING_SIZE;\r
+ const int errn = avcodec_decode_audio2(codec_context_, ¤t_chunk_[s], &written_bytes, audio_packet.data->data(), audio_packet.data->size());\r
+ if(errn < 0)\r
+ { \r
+ BOOST_THROW_EXCEPTION(\r
+ invalid_operation() <<\r
+ boost::errinfo_api_function("avcodec_decode_audio2") <<\r
+ boost::errinfo_errno(AVUNERROR(errn)));\r
+ }\r
\r
- current_chunk_.resize(s + written_bytes/2);\r
+ current_chunk_.resize(s + written_bytes/2);\r
\r
- const auto last = current_chunk_.end() - current_chunk_.size() % format_desc_.audio_samples_per_frame;\r
+ const auto last = current_chunk_.end() - current_chunk_.size() % format_desc_.audio_samples_per_frame;\r
\r
- std::vector<std::vector<short>> chunks;\r
- for(auto it = current_chunk_.begin(); it != last; it += format_desc_.audio_samples_per_frame) \r
- chunks.push_back(std::vector<short>(it, it + format_desc_.audio_samples_per_frame)); \r
+ for(auto it = current_chunk_.begin(); it != last; it += format_desc_.audio_samples_per_frame) \r
+ result.push_back(std::vector<short>(it, it + format_desc_.audio_samples_per_frame)); \r
\r
- current_chunk_.erase(current_chunk_.begin(), last);\r
- \r
- return chunks;\r
+ current_chunk_.erase(current_chunk_.begin(), last);\r
+ }\r
+ \r
+ return result;\r
}\r
};\r
\r
audio_decoder::audio_decoder(AVCodecContext* codec_context, const core::video_format_desc& format_desc) : impl_(new implementation(codec_context, format_desc)){}\r
-std::vector<std::vector<short>> audio_decoder::execute(const std::shared_ptr<aligned_buffer>& audio_packet){return impl_->execute(audio_packet);}\r
+std::vector<std::vector<short>> audio_decoder::execute(const packet& audio_packet){return impl_->execute(audio_packet);}\r
}
\ No newline at end of file
*/\r
#pragma once\r
\r
+#include "../packet.h"\r
+\r
#include <core/video_format.h>\r
\r
#include <tbb/cache_aligned_allocator.h>\r
\r
namespace caspar {\r
\r
-typedef std::vector<unsigned char, tbb::cache_aligned_allocator<unsigned char>> aligned_buffer;\r
-\r
class audio_decoder : boost::noncopyable\r
{\r
public:\r
explicit audio_decoder(AVCodecContext* codec_context, const core::video_format_desc& format_desc);\r
- std::vector<std::vector<short>> execute(const std::shared_ptr<aligned_buffer>& audio_packet);\r
+ std::vector<std::vector<short>> execute(const packet& audio_packet);\r
private:\r
struct implementation;\r
std::shared_ptr<implementation> impl_;\r
#include <core/video_format.h>\r
\r
#include <common/env.h>\r
-#include <common/utility/timer.h>\r
-#include <common/utility/assert.h>\r
\r
#include <tbb/parallel_invoke.h>\r
\r
#include <boost/timer.hpp>\r
\r
#include <deque>\r
-#include <functional>\r
\r
namespace caspar {\r
\r
struct ffmpeg_producer : public core::frame_producer\r
{\r
- const std::wstring filename_;\r
- const bool loop_;\r
+ const std::wstring filename_;\r
+ const bool loop_;\r
\r
- std::shared_ptr<diagnostics::graph> graph_;\r
- boost::timer perf_timer_;\r
+ std::shared_ptr<diagnostics::graph> graph_;\r
+ boost::timer frame_timer_;\r
\r
std::deque<safe_ptr<core::write_frame>> video_frame_buffer_; \r
std::deque<std::vector<short>> audio_chunk_buffer_;\r
-\r
- std::queue<safe_ptr<core::basic_frame>> ouput_channel_;\r
- \r
+ \r
std::shared_ptr<core::frame_factory> frame_factory_;\r
\r
- input input_; \r
- std::unique_ptr<video_decoder> video_decoder_;\r
- std::unique_ptr<audio_decoder> audio_decoder_;\r
+ input input_; \r
+ std::unique_ptr<video_decoder> video_decoder_;\r
+ std::unique_ptr<audio_decoder> audio_decoder_;\r
public:\r
- explicit ffmpeg_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filename, bool loop, int start_frame, int end_frame) \r
+ explicit ffmpeg_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filename, bool loop, int start, int length) \r
: filename_(filename)\r
, loop_(loop) \r
, graph_(diagnostics::create_graph(narrow(print())))\r
, frame_factory_(frame_factory) \r
- , input_(safe_ptr<diagnostics::graph>(graph_), filename_, loop_, start_frame, end_frame)\r
+ , input_(safe_ptr<diagnostics::graph>(graph_), filename_, loop_, start, length)\r
{\r
graph_->add_guide("frame-time", 0.5);\r
graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
\r
try\r
{ \r
- video_decoder_.reset(input_.get_video_codec_context().get() ? \r
+ video_decoder_.reset(input_.get_video_codec_context() ? \r
new video_decoder(input_.get_video_codec_context().get(), frame_factory) : nullptr);\r
}\r
catch(...)\r
\r
try\r
{ \r
- audio_decoder_.reset(input_.get_audio_codec_context().get() ? \r
+ audio_decoder_.reset(input_.get_audio_codec_context() ? \r
new audio_decoder(input_.get_audio_codec_context().get(), frame_factory->get_video_format_desc()) : nullptr);\r
}\r
catch(...)\r
{\r
BOOST_THROW_EXCEPTION(\r
caspar_exception() <<\r
+ source_info(narrow(print())) << \r
msg_info("Failed to initialize any decoder"));\r
}\r
}\r
\r
virtual safe_ptr<core::basic_frame> receive()\r
{\r
- perf_timer_.restart();\r
+ frame_timer_.restart();\r
\r
- for(size_t n = 0; ouput_channel_.size() < 2 && input_.has_packet() && n < 32; ++n) // 32 packets should be enough. Otherwise there probably was an error and we want to avoid infinite looping.\r
+ std::shared_ptr<core::basic_frame> frame; \r
+ for(size_t n = 0; !frame && input_.has_packet() && n < 64; ++n) // 64 packets should be enough. Otherwise there probably was an error and we want to avoid infinite looping.\r
{ \r
tbb::parallel_invoke\r
(\r
}\r
); \r
\r
- merge_audio_and_video(); \r
+ frame = try_merge_audio_and_video(); \r
}\r
\r
- graph_->update_value("frame-time", static_cast<float>(perf_timer_.elapsed()*frame_factory_->get_video_format_desc().fps*0.5));\r
+ graph_->update_value("frame-time", static_cast<float>(frame_timer_.elapsed()*frame_factory_->get_video_format_desc().fps*0.5));\r
\r
- return get_next_frame();\r
+ if(frame)\r
+ return make_safe(frame);\r
+ \r
+ if(!input_.is_running())\r
+ return core::basic_frame::eof();\r
+\r
+ if(!video_decoder_ && !audio_decoder_)\r
+ return core::basic_frame::eof();\r
+ \r
+ graph_->add_tag("underflow");\r
+ return core::basic_frame::late(); \r
}\r
\r
virtual std::wstring print() const\r
return L"ffmpeg[" + boost::filesystem::wpath(filename_).filename() + L"]";\r
}\r
\r
- void try_decode_video_packet(const std::shared_ptr<aligned_buffer>& video_packet)\r
+ void try_decode_video_packet(const packet& video_packet)\r
{\r
- if(video_decoder_) // Video Decoding.\r
+ if(!video_decoder_) // Video Decoding.\r
+ return;\r
+\r
+ try\r
{\r
- try\r
- {\r
- auto frames = video_decoder_->execute(this, video_packet);\r
- video_frame_buffer_.insert(video_frame_buffer_.end(), frames.begin(), frames.end());\r
- }\r
- catch(...)\r
- {\r
- CASPAR_LOG_CURRENT_EXCEPTION();\r
- video_decoder_.reset();\r
- CASPAR_LOG(warning) << print() << " removed video-stream.";\r
- }\r
+ auto frames = video_decoder_->execute(this, video_packet);\r
+ video_frame_buffer_.insert(video_frame_buffer_.end(), frames.begin(), frames.end());\r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
+ video_decoder_.reset();\r
+ CASPAR_LOG(warning) << print() << " removed video-stream.";\r
}\r
}\r
\r
- void try_decode_audio_packet(const std::shared_ptr<aligned_buffer>& audio_packet)\r
+ void try_decode_audio_packet(const packet& audio_packet)\r
{\r
- if(audio_decoder_) // Audio Decoding.\r
+ if(!audio_decoder_) // Audio Decoding.\r
+ return;\r
+\r
+ try\r
{\r
- try\r
- {\r
- auto chunks = audio_decoder_->execute(audio_packet);\r
- audio_chunk_buffer_.insert(audio_chunk_buffer_.end(), chunks.begin(), chunks.end());\r
- }\r
- catch(...)\r
- {\r
- CASPAR_LOG_CURRENT_EXCEPTION();\r
- audio_decoder_.reset();\r
- CASPAR_LOG(warning) << print() << " removed audio-stream.";\r
- }\r
+ auto chunks = audio_decoder_->execute(audio_packet);\r
+ audio_chunk_buffer_.insert(audio_chunk_buffer_.end(), chunks.begin(), chunks.end());\r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
+ audio_decoder_.reset();\r
+ CASPAR_LOG(warning) << print() << " removed audio-stream.";\r
}\r
}\r
\r
- void merge_audio_and_video()\r
+ std::shared_ptr<core::basic_frame> try_merge_audio_and_video()\r
{ \r
std::shared_ptr<core::write_frame> frame; \r
\r
audio_chunk_buffer_.pop_front();\r
}\r
\r
- if(frame)\r
- ouput_channel_.push(make_safe(frame)); \r
- }\r
- \r
- safe_ptr<core::basic_frame> get_next_frame()\r
- {\r
- if(is_eof())\r
- return core::basic_frame::eof();\r
-\r
- if(ouput_channel_.empty())\r
- {\r
- graph_->add_tag("underflow");\r
- return core::basic_frame::late(); \r
- }\r
-\r
- auto frame = std::move(ouput_channel_.front());\r
- ouput_channel_.pop(); \r
- return frame;\r
- }\r
-\r
- bool is_eof() const\r
- {\r
- return ouput_channel_.empty() && ((!video_decoder_ && !audio_decoder_) || !input_.is_running());\r
+ return frame; \r
}\r
};\r
\r
std::wstring path = filename + L"." + *ext;\r
bool loop = std::find(params.begin(), params.end(), L"LOOP") != params.end();\r
\r
- static const boost::wregex expr(L"\\((?<START>\\d+)(,(?<END>\\d+)?)?\\)");//(,(?<END>\\d+))?\\]"); // boost::regex has no repeated captures?\r
+ static const boost::wregex expr(L"\\((?<START>\\d+)(,(?<LENGTH>\\d+)?)?\\)");//(,(?<END>\\d+))?\\]"); // boost::regex has no repeated captures?\r
boost::wsmatch what;\r
auto it = std::find_if(params.begin(), params.end(), [&](const std::wstring& str)\r
{\r
});\r
\r
int start = -1;\r
- int end = -1;\r
+ int length = -1;\r
\r
if(it != params.end())\r
{\r
start = lexical_cast_or_default(what["START"].str(), -1);\r
- if(what["END"].matched)\r
- end = lexical_cast_or_default(what["END"].str(), -1);\r
+ if(what["LENGTH"].matched)\r
+ length = lexical_cast_or_default(what["LENGTH"].str(), -1);\r
}\r
\r
- return make_safe<ffmpeg_producer>(frame_factory, path, loop, start, end);\r
+ return make_safe<ffmpeg_producer>(frame_factory, path, loop, start, length);\r
}\r
\r
}
\ No newline at end of file
const bool loop_;\r
int video_s_index_;\r
int audio_s_index_;\r
- const int start_frame_;\r
- const int end_frame_;\r
+ const int start_;\r
+ const int length_;\r
int eof_count_;\r
\r
- tbb::concurrent_bounded_queue<std::shared_ptr<aligned_buffer>> video_packet_buffer_;\r
- tbb::concurrent_bounded_queue<std::shared_ptr<aligned_buffer>> audio_packet_buffer_;\r
+ tbb::concurrent_bounded_queue<packet> video_packet_buffer_;\r
+ tbb::concurrent_bounded_queue<packet> audio_packet_buffer_;\r
\r
boost::condition_variable cond_;\r
boost::mutex mutex_;\r
std::exception_ptr exception_;\r
executor executor_;\r
public:\r
- explicit implementation(const safe_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, int start_frame, int end_frame) \r
+ explicit implementation(const safe_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, int start, int length) \r
: graph_(graph)\r
, loop_(loop)\r
, video_s_index_(-1)\r
, audio_s_index_(-1)\r
, filename_(filename)\r
, executor_(print())\r
- , start_frame_(std::max(start_frame, 0))\r
- , end_frame_(end_frame)\r
- , eof_count_(end_frame-start_frame)\r
+ , start_(std::max(start, 0))\r
+ , length_(length)\r
+ , eof_count_(length)\r
{ \r
- if(end_frame_ > 0 && end_frame <= start_frame_)\r
- { \r
- BOOST_THROW_EXCEPTION(\r
- invalid_argument() << \r
- source_info(narrow(print())) << \r
- msg_info("End-frame cannot be lower than start-frame.")); \r
- }\r
-\r
graph_->set_color("input-buffer", diagnostics::color(1.0f, 1.0f, 0.0f));\r
graph_->set_color("seek", diagnostics::color(0.5f, 1.0f, 0.5f)); \r
\r
msg_info("No video or audio codec context found.")); \r
}\r
\r
- if(start_frame_ != 0) \r
- seek_frame(start_frame_);\r
+ if(start_ != 0) \r
+ seek_frame(start_);\r
\r
executor_.start();\r
executor_.begin_invoke([this]{read_file();});\r
cond_.notify_all();\r
}\r
\r
- std::shared_ptr<aligned_buffer> get_video_packet()\r
+ packet get_video_packet()\r
{\r
return get_packet(video_packet_buffer_);\r
}\r
\r
- std::shared_ptr<aligned_buffer> get_audio_packet()\r
+ packet get_audio_packet()\r
{\r
return get_packet(audio_packet_buffer_);\r
}\r
{\r
if(loop_)\r
{\r
- seek_frame(start_frame_, AVSEEK_FLAG_BACKWARD);\r
+ seek_frame(start_, AVSEEK_FLAG_BACKWARD);\r
// AVCodecContext.frame_number is not reset. Increase the target frame_number.\r
- eof_count_ += end_frame_ - start_frame_; \r
+ eof_count_ += length_; \r
graph_->add_tag("seek"); \r
} \r
else\r
else\r
{\r
if(read_packet->stream_index == video_s_index_) \r
- video_packet_buffer_.try_push(std::make_shared<aligned_buffer>(read_packet->data, read_packet->data + read_packet->size)); \r
- else if(read_packet->stream_index == audio_s_index_) \r
- audio_packet_buffer_.try_push(std::make_shared<aligned_buffer>(read_packet->data, read_packet->data + read_packet->size)); \r
+ video_packet_buffer_.try_push(packet(read_packet->data, read_packet->data + read_packet->size)); \r
+ else if(read_packet->stream_index)\r
+ audio_packet_buffer_.try_push(packet(read_packet->data, read_packet->data + read_packet->size)); \r
}\r
\r
graph_->update_value("input-buffer", static_cast<float>(video_packet_buffer_.size())/static_cast<float>(PACKET_BUFFER_COUNT)); \r
}\r
\r
// Notify decoders to flush buffers.\r
- video_packet_buffer_.try_push(std::make_shared<aligned_buffer>()); \r
- audio_packet_buffer_.try_push(std::make_shared<aligned_buffer>());\r
- }\r
- \r
+ video_packet_buffer_.try_push(flush_packet); \r
+ audio_packet_buffer_.try_push(flush_packet);\r
+ } \r
\r
bool is_eof(int errn)\r
{\r
- if(end_frame_ != -1)\r
+ if(length_ != -1)\r
return get_default_context()->frame_number > eof_count_; \r
\r
return errn == AVERROR_EOF || errn == AVERROR_IO;\r
}\r
\r
- std::shared_ptr<aligned_buffer> get_packet(tbb::concurrent_bounded_queue<std::shared_ptr<aligned_buffer>>& buffer)\r
+ packet get_packet(tbb::concurrent_bounded_queue<packet>& buffer)\r
{\r
+ packet packet;\r
+ buffer.try_pop(packet);\r
cond_.notify_all();\r
- std::shared_ptr<aligned_buffer> packet;\r
- return buffer.try_pop(packet) ? packet : nullptr;\r
+ return packet;\r
}\r
\r
std::wstring print() const\r
}\r
};\r
\r
-input::input(const safe_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, int start_frame, int end_frame) : impl_(new implementation(graph, filename, loop, start_frame, end_frame)){}\r
+input::input(const safe_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, int start, int length) \r
+ : impl_(new implementation(graph, filename, loop, start, length)){}\r
const std::shared_ptr<AVCodecContext>& input::get_video_codec_context() const{return impl_->video_codec_context_;}\r
const std::shared_ptr<AVCodecContext>& input::get_audio_codec_context() const{return impl_->audio_codex_context_;}\r
bool input::has_packet() const{return impl_->has_packet();}\r
bool input::is_running() const {return impl_->executor_.is_running();}\r
-std::shared_ptr<aligned_buffer> input::get_video_packet(){return impl_->get_video_packet();}\r
-std::shared_ptr<aligned_buffer> input::get_audio_packet(){return impl_->get_audio_packet();}\r
+packet input::get_video_packet(){return impl_->get_video_packet();}\r
+packet input::get_audio_packet(){return impl_->get_audio_packet();}\r
double input::fps() const { return impl_->fps(); }\r
}
\ No newline at end of file
\r
#include <common/diagnostics/graph.h>\r
\r
-#include <tbb/cache_aligned_allocator.h>\r
+#include "packet.h"\r
\r
#include <memory>\r
#include <string>\r
\r
namespace caspar {\r
\r
-typedef std::vector<unsigned char, tbb::cache_aligned_allocator<unsigned char>> aligned_buffer;\r
-\r
class input : boost::noncopyable\r
{\r
public:\r
- explicit input(const safe_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, int start_frame, int end_frame);\r
+ explicit input(const safe_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, int start, int length);\r
const std::shared_ptr<AVCodecContext>& get_video_codec_context() const;\r
const std::shared_ptr<AVCodecContext>& get_audio_codec_context() const;\r
\r
- std::shared_ptr<aligned_buffer> get_video_packet();\r
- std::shared_ptr<aligned_buffer> get_audio_packet();\r
+ packet get_video_packet();\r
+ packet get_audio_packet();\r
\r
bool has_packet() const;\r
bool is_running() const;\r
#include <core/producer/frame/basic_frame.h>\r
#include <core/producer/frame/write_frame.h>\r
#include <core/producer/frame/image_transform.h>\r
+#include <core/producer/frame/pixel_format.h>\r
+#include <core/producer/frame/frame_factory.h>\r
\r
#include <tbb/parallel_for.h>\r
\r
}\r
}\r
\r
- std::vector<safe_ptr<core::write_frame>> execute(void* tag, const std::shared_ptr<aligned_buffer>& video_packet)\r
+ std::vector<safe_ptr<core::write_frame>> execute(void* tag, const packet& video_packet)\r
{ \r
std::vector<safe_ptr<core::write_frame>> result;\r
\r
- if(!video_packet)\r
- return result;\r
-\r
- if(video_packet->empty()) // Need to flush\r
+ switch(video_packet.type)\r
{\r
+ case flush_packet:\r
avcodec_flush_buffers(codec_context_);\r
- return result;\r
- }\r
+ break;\r
+ case data_packet: \r
+ safe_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
\r
- safe_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
-\r
- int frame_finished = 0;\r
- const int errn = avcodec_decode_video(codec_context_, decoded_frame.get(), &frame_finished, video_packet->data(), video_packet->size());\r
- \r
- if(errn < 0)\r
- {\r
- BOOST_THROW_EXCEPTION(\r
- invalid_operation() <<\r
- msg_info(av_error_str(errn)) <<\r
- boost::errinfo_api_function("avcodec_decode_video") <<\r
- boost::errinfo_errno(AVUNERROR(errn)));\r
- }\r
+ int frame_finished = 0;\r
+ const int errn = avcodec_decode_video(codec_context_, decoded_frame.get(), &frame_finished, video_packet.data->data(), video_packet.data->size());\r
\r
- if(frame_finished == 0)\r
- return result;\r
+ if(errn < 0)\r
+ {\r
+ BOOST_THROW_EXCEPTION(\r
+ invalid_operation() <<\r
+ msg_info(av_error_str(errn)) <<\r
+ boost::errinfo_api_function("avcodec_decode_video") <<\r
+ boost::errinfo_errno(AVUNERROR(errn)));\r
+ }\r
\r
- result.push_back(make_write_frame(tag, decoded_frame));\r
+ if(frame_finished != 0) \r
+ result.push_back(make_write_frame(tag, decoded_frame));\r
+ }\r
\r
return result;\r
}\r
}\r
else\r
{\r
- // Uses sws_scale when we don't support the provided colorspace.\r
+ // Use sws_scale when provided colorspace has no hw-accel.\r
safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free); \r
avcodec_get_frame_defaults(av_frame.get()); \r
avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width_, height_);\r
};\r
\r
video_decoder::video_decoder(AVCodecContext* codec_context, const safe_ptr<core::frame_factory>& frame_factory) : impl_(new implementation(codec_context, frame_factory)){}\r
-std::vector<safe_ptr<core::write_frame>> video_decoder::execute(void* tag, const std::shared_ptr<aligned_buffer>& video_packet){return impl_->execute(tag, video_packet);}\r
+std::vector<safe_ptr<core::write_frame>> video_decoder::execute(void* tag, const packet& video_packet){return impl_->execute(tag, video_packet);}\r
\r
}
\ No newline at end of file
*/\r
#pragma once\r
\r
-#include <common/memory/safe_ptr.h>\r
+#include "../packet.h"\r
\r
-#include <core/mixer/frame_mixer_device.h>\r
+#include <common/memory/safe_ptr.h>\r
\r
struct AVCodecContext;\r
\r
namespace caspar {\r
\r
-typedef std::vector<unsigned char, tbb::cache_aligned_allocator<unsigned char>> aligned_buffer;\r
+namespace core {\r
+ struct frame_factory;\r
+ class write_frame;\r
+}\r
\r
class video_decoder : boost::noncopyable\r
{\r
public:\r
explicit video_decoder(AVCodecContext* codec_context, const safe_ptr<core::frame_factory>& frame_factory);\r
- std::vector<safe_ptr<core::write_frame>> execute(void* tag, const std::shared_ptr<aligned_buffer>& video_packet); \r
+ std::vector<safe_ptr<core::write_frame>> execute(void* tag, const packet& video_packet); \r
private:\r
struct implementation;\r
safe_ptr<implementation> impl_;\r