\r
safe_ptr<basic_frame> basic_frame::interlace(const safe_ptr<basic_frame>& frame1, const safe_ptr<basic_frame>& frame2, video_mode::type mode)\r
{ \r
+ if(frame1 == basic_frame::empty() && frame2 == basic_frame::empty())\r
+ return basic_frame::empty();\r
+\r
if(frame1 == frame2 || mode == video_mode::progressive)\r
return frame2;\r
\r
\r
safe_ptr<basic_frame> basic_frame::fill_and_key(const safe_ptr<basic_frame>& fill, const safe_ptr<basic_frame>& key)\r
{\r
+ if(key == basic_frame::empty())\r
+ return fill;\r
+\r
std::vector<safe_ptr<basic_frame>> frames;\r
key->get_image_transform().set_is_key(true);\r
frames.push_back(key);\r
frames.push_back(fill);\r
- return make_safe<basic_frame>(frames);\r
+ return make_safe<basic_frame>(std::move(frames));\r
}\r
\r
}}
\ No newline at end of file
static safe_ptr<basic_frame> frame = make_safe<basic_frame>();\r
return frame;\r
}\r
+\r
+ static const safe_ptr<basic_frame>& late()\r
+ {\r
+ static safe_ptr<basic_frame> frame = make_safe<basic_frame>();\r
+ return frame;\r
+ }\r
\r
virtual void accept(frame_visitor& visitor);\r
private:\r
std::shared_ptr<implementation> impl_;\r
};\r
\r
+inline bool is_concrete_frame(const safe_ptr<basic_frame>& frame)\r
+{\r
+ return frame != basic_frame::empty() && frame != basic_frame::eof() && frame != basic_frame::late();\r
+}\r
+\r
+inline bool is_concrete_frame(const std::shared_ptr<basic_frame>& frame)\r
+{\r
+ return frame != nullptr && frame.get() != basic_frame::empty().get() && frame.get() != basic_frame::eof().get() && frame.get() != basic_frame::late().get();\r
+}\r
+\r
}}
\ No newline at end of file
\r
if(frame == basic_frame::eof())\r
{\r
+ CASPAR_LOG(info) << producer->print() << " End Of File.";\r
auto following = producer->get_following_producer();\r
following->set_leading_producer(producer);\r
producer = std::move(following); \r
-\r
+ \r
return receive_and_follow(producer);\r
}\r
return frame;\r
if(output_.empty())\r
return; \r
\r
+ auto frame = draw();\r
output_timer_.restart();\r
- output_(draw());\r
+ output_(frame);\r
diag_->update_value("output-time", static_cast<float>(output_timer_.elapsed()*format_desc_.fps*0.5));\r
\r
executor_.begin_invoke([=]{tick();});\r
tbb::parallel_for_each(layers_.begin(), layers_.end(), [&](decltype(*layers_.begin())& pair)\r
{\r
auto frame = pair.second.receive();\r
- if(frame != basic_frame::empty() && frame != basic_frame::eof())\r
+ if(is_concrete_frame(frame))\r
frames.local()[pair.first] = frame; \r
});\r
\r
\r
safe_ptr<basic_frame> receive()\r
{ \r
- if(is_paused_) \r
- last_frame_->get_audio_transform().set_has_audio(false); \r
- else\r
- last_frame_ = receive_and_follow(foreground_);\r
+ if(is_paused_)\r
+ return last_frame_;\r
\r
- return last_frame_;\r
+ auto next_frame = receive_and_follow(foreground_);\r
+ if(next_frame == core::basic_frame::late())\r
+ return last_frame_;\r
+ \r
+ last_frame_ = basic_frame(next_frame);\r
+ last_frame_->get_audio_transform().set_has_audio(false);\r
+\r
+ return next_frame;\r
}\r
};\r
\r
{ \r
safe_ptr<frame_producer> fill_producer_;\r
safe_ptr<frame_producer> key_producer;\r
+ safe_ptr<basic_frame> last_fill_;\r
+ safe_ptr<basic_frame> last_key_;\r
+ safe_ptr<basic_frame> last_frame_;\r
\r
explicit separated_producer(const safe_ptr<frame_producer>& fill, const safe_ptr<frame_producer>& key) \r
: fill_producer_(fill)\r
- , key_producer(key){}\r
+ , key_producer(key)\r
+ , last_fill_(core::basic_frame::empty())\r
+ , last_key_(core::basic_frame::empty())\r
+ , last_frame_(core::basic_frame::empty()){}\r
\r
// frame_producer\r
\r
virtual safe_ptr<basic_frame> receive()\r
{\r
- auto fill = basic_frame::empty();\r
- auto key = basic_frame::empty();\r
tbb::parallel_invoke\r
(\r
- [&]{fill = receive_and_follow(fill_producer_);},\r
- [&]{key = receive_and_follow(key_producer);}\r
+ [&]\r
+ {\r
+ if(last_fill_ == core::basic_frame::empty())\r
+ last_fill_ = receive_and_follow(fill_producer_);\r
+ },\r
+ [&]\r
+ {\r
+ if(last_key_ == core::basic_frame::empty())\r
+ last_key_ = receive_and_follow(key_producer);\r
+ }\r
);\r
\r
- if(fill == basic_frame::eof())\r
+ if(last_fill_ == basic_frame::eof())\r
return basic_frame::eof();\r
\r
- if(key != basic_frame::empty() || key != basic_frame::eof())\r
- return basic_frame::fill_and_key(fill, key);\r
+ if(last_fill_ == core::basic_frame::late() || last_key_ == core::basic_frame::late()) // One of the producers is lagging, keep them in sync.\r
+ return last_frame_;\r
+ \r
+ if(last_key_ == basic_frame::eof())\r
+ {\r
+ last_frame_ = last_fill_;\r
+ last_fill_ = basic_frame::empty();\r
+ }\r
+ else\r
+ {\r
+ last_frame_= basic_frame::fill_and_key(last_fill_, last_key_);\r
+ last_fill_ = basic_frame::empty();\r
+ last_key_ = basic_frame::empty();\r
+ }\r
\r
- return fill;\r
+ return last_frame_;\r
}\r
\r
virtual std::wstring print() const\r
\r
safe_ptr<frame_producer> dest_producer_;\r
safe_ptr<frame_producer> source_producer_;\r
+ safe_ptr<basic_frame> last_dest_;\r
+ safe_ptr<basic_frame> last_source_;\r
\r
explicit transition_producer(const video_mode::type& mode, const safe_ptr<frame_producer>& dest, const transition_info& info) \r
: mode_(mode)\r
, current_frame_(0)\r
, info_(info)\r
, dest_producer_(dest)\r
- , source_producer_(frame_producer::empty()){}\r
+ , source_producer_(frame_producer::empty())\r
+ , last_dest_(core::basic_frame::empty())\r
+ , last_source_(core::basic_frame::empty()){}\r
\r
// frame_producer\r
\r
{\r
if(current_frame_++ >= info_.duration)\r
return basic_frame::eof();\r
-\r
- auto source = basic_frame::empty();\r
- auto dest = basic_frame::empty();\r
-\r
+ \r
tbb::parallel_invoke\r
(\r
- [&]{dest = receive_and_follow(dest_producer_);},\r
- [&]{source = receive_and_follow(source_producer_);}\r
+ [&]{last_dest_ = receive_and_follow_w_last(dest_producer_, last_dest_);},\r
+ [&]{last_source_ = receive_and_follow_w_last(source_producer_, last_source_);}\r
);\r
\r
- return compose(dest, source);\r
+ return compose(last_dest_, last_source_);\r
}\r
\r
virtual std::wstring print() const\r
return L"transition";\r
}\r
\r
+ safe_ptr<basic_frame> receive_and_follow_w_last(safe_ptr<frame_producer>& producer, safe_ptr<basic_frame> last_frame)\r
+ {\r
+ auto frame = core::receive_and_follow(producer);\r
+ if(frame == basic_frame::late())\r
+ {\r
+ last_frame->get_audio_transform().set_has_audio(false);\r
+ return last_frame;\r
+ }\r
+ return frame;\r
+ }\r
+\r
// transition_producer\r
\r
safe_ptr<basic_frame> compose(const safe_ptr<basic_frame>& dest_frame, const safe_ptr<basic_frame>& src_frame) \r
d_frame1->get_image_transform().set_key_scale(delta1, 1.0); \r
d_frame2->get_image_transform().set_key_scale(delta2, 1.0); \r
}\r
- \r
+ \r
auto s_frame = s_frame1->get_image_transform() == s_frame2->get_image_transform() ? s_frame2 : basic_frame::interlace(s_frame1, s_frame2, mode_);\r
- auto d_frame = basic_frame::interlace(d_frame1, d_frame2, mode_);\r
+ auto d_frame = d_frame1->get_image_transform() == d_frame2->get_image_transform() ? d_frame2 : basic_frame::interlace(d_frame1, d_frame2, mode_);\r
+\r
+ if(dest_frame == core::basic_frame::empty())\r
+ return s_frame;\r
+\r
+ if(src_frame == core::basic_frame::empty())\r
+ return d_frame;\r
\r
return basic_frame(s_frame, d_frame);\r
}\r
#define __STDC_LIMIT_MACROS\r
#include <libavformat/avformat.h>\r
#include <libswscale/swscale.h>\r
- #include <libavcodec/avcodec.h>\r
}\r
\r
namespace caspar {\r
av_register_all();\r
avcodec_init();\r
\r
- AVHWAccel* hwaccel = av_hwaccel_next(nullptr);\r
- while(hwaccel != nullptr)\r
- {\r
- CASPAR_LOG(info) << "FFMPEG: Found hwaccel: " << hwaccel->name;\r
- hwaccel = av_hwaccel_next(hwaccel);\r
- }\r
-\r
core::register_consumer_factory(create_ffmpeg_consumer);\r
core::register_producer_factory(create_ffmpeg_producer);\r
}\r
std::vector<std::vector<short>> execute(const aligned_buffer& audio_packet)\r
{ \r
int written_bytes = audio_buffer_.size()*2 - FF_INPUT_BUFFER_PADDING_SIZE;\r
- const int result = avcodec_decode_audio2(codec_context_, audio_buffer_.data(), &written_bytes, audio_packet.data(), audio_packet.size());\r
+ const int errn = avcodec_decode_audio2(codec_context_, audio_buffer_.data(), &written_bytes, audio_packet.data(), audio_packet.size());\r
+\r
+ if(errn < 0 || codec_context_->sample_rate != SAMPLE_RATE || codec_context_->channels != 2)\r
+ { \r
+ BOOST_THROW_EXCEPTION(\r
+ invalid_operation() <<\r
+ boost::errinfo_api_function("avcodec_decode_audio2") <<\r
+ boost::errinfo_errno(AVUNERROR(errn)));\r
+ }\r
\r
- if(result <= 0 || codec_context_->sample_rate != SAMPLE_RATE || codec_context_->channels != 2)\r
- BOOST_THROW_EXCEPTION(invalid_operation() << msg_info("Invalid audio stream"));\r
- \r
current_chunk_.insert(current_chunk_.end(), audio_buffer_.data(), audio_buffer_.data() + written_bytes/2);\r
\r
std::vector<std::vector<short>> chunks;\r
const bool loop_;\r
\r
std::shared_ptr<diagnostics::graph> graph_;\r
- boost::timer perf_timer_;\r
+ boost::timer perf_timer_;\r
\r
- std::unique_ptr<audio_decoder> audio_decoder_;\r
- std::unique_ptr<video_decoder> video_decoder_;\r
-\r
- std::deque<safe_ptr<core::write_frame>> video_frame_channel_; \r
- std::deque<std::vector<short>> audio_chunk_channel_;\r
+ std::deque<safe_ptr<core::write_frame>> video_frame_buffer_; \r
+ std::deque<std::vector<short>> audio_chunk_buffer_;\r
\r
std::queue<safe_ptr<core::basic_frame>> ouput_channel_;\r
\r
- safe_ptr<core::basic_frame> last_frame_;\r
std::shared_ptr<core::frame_factory> frame_factory_;\r
\r
- std::unique_ptr<input> input_; \r
+ input input_; \r
+ std::unique_ptr<video_decoder> video_decoder_;\r
+ std::unique_ptr<audio_decoder> audio_decoder_;\r
public:\r
explicit ffmpeg_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filename, bool loop) \r
: filename_(filename)\r
, loop_(loop) \r
- , last_frame_(core::basic_frame(core::basic_frame::empty()))\r
+ , graph_(diagnostics::create_graph(narrow(print())))\r
, frame_factory_(frame_factory) \r
+ , input_(safe_ptr<diagnostics::graph>(graph_), filename_, loop_)\r
+ , video_decoder_(input_.get_video_codec_context().get() ? new video_decoder(input_.get_video_codec_context().get(), frame_factory) : nullptr)\r
+ , audio_decoder_(input_.get_audio_codec_context().get() ? new audio_decoder(input_.get_audio_codec_context().get(), frame_factory->get_video_format_desc().fps) : nullptr)\r
{\r
- graph_ = diagnostics::create_graph(boost::bind(&ffmpeg_producer::print, this)); \r
graph_->add_guide("frame-time", 0.5);\r
graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
graph_->set_color("underflow", diagnostics::color(0.6f, 0.3f, 0.9f)); \r
-\r
- input_.reset(new input(safe_ptr<diagnostics::graph>(graph_), filename_, loop_));\r
- video_decoder_.reset(input_->get_video_codec_context().get() ? new video_decoder(input_->get_video_codec_context().get(), frame_factory) : nullptr);\r
- audio_decoder_.reset(input_->get_audio_codec_context().get() ? new audio_decoder(input_->get_audio_codec_context().get(), frame_factory->get_video_format_desc().fps) : nullptr);\r
-\r
- double frame_time = 1.0f/input_->fps();\r
+ \r
+ double frame_time = 1.0f/input_.fps();\r
double format_frame_time = 1.0/frame_factory->get_video_format_desc().fps;\r
if(abs(frame_time - format_frame_time) > 0.0001)\r
CASPAR_LOG(warning) << print() << L" Invalid framerate detected. This may cause distorted audio during playback. frame-time: " << frame_time;\r
}\r
- \r
+\r
virtual safe_ptr<core::basic_frame> receive()\r
{\r
perf_timer_.restart();\r
\r
- while(ouput_channel_.size() < 2 && !input_->is_eof())\r
+ for(size_t n = 0; ouput_channel_.size() < 2 && input_.has_packet() && n < 32; ++n) // 32 packets should be enough. Otherwise there probably was an error and we want to avoid infinite recursion.\r
{ \r
- aligned_buffer video_packet;\r
- if(video_frame_channel_.size() < 3 && video_decoder_) \r
- video_packet = input_->get_video_packet(); \r
- \r
- aligned_buffer audio_packet;\r
- if(audio_chunk_channel_.size() < 3 && audio_decoder_) \r
- audio_packet = input_->get_audio_packet(); \r
-\r
- if(video_packet.empty() && audio_packet.empty()) // Skip frame if lagging. \r
- break;\r
-\r
- tbb::parallel_invoke(\r
- [&]\r
- {\r
- if(!video_packet.empty() && video_decoder_) // Video Decoding.\r
+ tbb::parallel_invoke\r
+ (\r
+ [&]\r
{\r
- try\r
- {\r
- auto frame = video_decoder_->execute(this, video_packet);\r
- video_frame_channel_.push_back(std::move(frame));\r
- }\r
- catch(...)\r
- {\r
- CASPAR_LOG_CURRENT_EXCEPTION();\r
- video_decoder_.reset();\r
- CASPAR_LOG(warning) << print() << " removed video-stream.";\r
- }\r
- }\r
- }, \r
- [&] \r
- { \r
- if(!audio_packet.empty() && audio_decoder_) // Audio Decoding.\r
+ if(video_frame_buffer_.size() < 3)\r
+ try_decode_video_packet(input_.get_video_packet());\r
+ }, \r
+ [&]\r
{\r
- try\r
- {\r
- auto chunks = audio_decoder_->execute(audio_packet);\r
- audio_chunk_channel_.insert(audio_chunk_channel_.end(), chunks.begin(), chunks.end());\r
- }\r
- catch(...)\r
- {\r
- CASPAR_LOG_CURRENT_EXCEPTION();\r
- audio_decoder_.reset();\r
- CASPAR_LOG(warning) << print() << " removed audio-stream.";\r
- }\r
+ if(audio_chunk_buffer_.size() < 3)\r
+ try_decode_audio_packet(input_.get_audio_packet());\r
}\r
- });\r
+ ); \r
+\r
+ merge_audio_and_video(); \r
+ }\r
+ \r
+ graph_->update_value("frame-time", static_cast<float>(perf_timer_.elapsed()*frame_factory_->get_video_format_desc().fps*0.5));\r
+ \r
+ return get_next_frame();\r
+ }\r
+\r
+ virtual std::wstring print() const\r
+ {\r
+ return L"ffmpeg[" + boost::filesystem::wpath(filename_).filename() + L"]";\r
+ }\r
\r
- while((!video_frame_channel_.empty() || !video_decoder_) && (!audio_chunk_channel_.empty() || !audio_decoder_))\r
+ void try_decode_video_packet(const aligned_buffer& video_packet)\r
+ {\r
+ if(!video_packet.empty() && video_decoder_) // Video Decoding.\r
+ {\r
+ try\r
+ {\r
+ auto frame = video_decoder_->execute(this, video_packet);\r
+ if(frame)\r
+ video_frame_buffer_.push_back(make_safe(std::move(frame)));\r
+ }\r
+ catch(...)\r
{\r
- std::shared_ptr<core::write_frame> frame;\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
+ video_decoder_.reset();\r
+ CASPAR_LOG(warning) << print() << " removed video-stream.";\r
+ }\r
+ }\r
+ }\r
\r
- if(video_decoder_)\r
- {\r
- frame = video_frame_channel_.front();\r
- video_frame_channel_.pop_front();\r
- }\r
+ void try_decode_audio_packet(const aligned_buffer& audio_packet)\r
+ {\r
+ if(audio_chunk_buffer_.size() < 3 && !audio_packet.empty() && audio_decoder_) // Audio Decoding.\r
+ {\r
+ try\r
+ {\r
+ auto chunks = audio_decoder_->execute(audio_packet);\r
+ audio_chunk_buffer_.insert(audio_chunk_buffer_.end(), chunks.begin(), chunks.end());\r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
+ audio_decoder_.reset();\r
+ CASPAR_LOG(warning) << print() << " removed audio-stream.";\r
+ }\r
+ }\r
+ }\r
\r
- if(audio_decoder_) \r
- {\r
- if(!frame) // If there is no video create a dummy frame.\r
- {\r
- frame = frame_factory_->create_frame(this, 1, 1);\r
- std::fill(frame->image_data().begin(), frame->image_data().end(), 0);\r
- }\r
- \r
- frame->audio_data() = std::move(audio_chunk_channel_.front());\r
- audio_chunk_channel_.pop_front();\r
- }\r
- else\r
- frame->get_audio_transform().set_has_audio(false);\r
- \r
- ouput_channel_.push(make_safe(frame)); \r
- } \r
-\r
- if(ouput_channel_.empty() && video_packet.empty() && audio_packet.empty()) \r
- return last_frame_; \r
+ void merge_audio_and_video()\r
+ { \r
+ std::shared_ptr<core::write_frame> frame; \r
+\r
+ if(!video_frame_buffer_.empty() && !audio_chunk_buffer_.empty())\r
+ {\r
+ frame = video_frame_buffer_.front(); \r
+ video_frame_buffer_.pop_front();\r
+ \r
+ frame->audio_data() = std::move(audio_chunk_buffer_.front());\r
+ audio_chunk_buffer_.pop_front(); \r
+ }\r
+ else if(!video_frame_buffer_.empty() && !audio_decoder_)\r
+ {\r
+ frame = video_frame_buffer_.front(); \r
+ video_frame_buffer_.pop_front();\r
+ frame->get_audio_transform().set_has_audio(false); \r
+ }\r
+ else if(!audio_chunk_buffer_.empty() && !video_decoder_)\r
+ {\r
+ frame = frame_factory_->create_frame(this, 1, 1);\r
+ std::fill(frame->image_data().begin(), frame->image_data().end(), 0);\r
+ \r
+ frame->audio_data() = std::move(audio_chunk_buffer_.front());\r
+ audio_chunk_buffer_.pop_front();\r
}\r
\r
- graph_->update_value("frame-time", static_cast<float>(perf_timer_.elapsed()*frame_factory_->get_video_format_desc().fps*0.5));\r
+ if(frame)\r
+ ouput_channel_.push(make_safe(frame)); \r
+ }\r
+ \r
+ safe_ptr<core::basic_frame> get_next_frame()\r
+ {\r
+ if(is_eof())\r
+ return core::basic_frame::eof();\r
\r
- auto result = last_frame_;\r
- if(!ouput_channel_.empty()) \r
- result = get_frame(); // TODO: Support 50p \r
- else if(!input_->is_running())\r
- result = core::basic_frame::eof();\r
- else\r
+ if(ouput_channel_.empty())\r
+ {\r
graph_->add_tag("underflow");\r
- \r
- return result;\r
- }\r
+ return core::basic_frame::late(); \r
+ }\r
\r
- core::basic_frame get_frame()\r
- {\r
- CASPAR_ASSERT(!ouput_channel_.empty());\r
- auto result = std::move(ouput_channel_.front());\r
- last_frame_ = core::basic_frame(result);\r
- last_frame_->get_audio_transform().set_gain(0.0); // last_frame should not have audio\r
- ouput_channel_.pop();\r
- return result;\r
+ auto frame = std::move(ouput_channel_.front());\r
+ ouput_channel_.pop(); \r
+ return frame;\r
}\r
\r
- virtual std::wstring print() const\r
+ bool is_eof() const\r
{\r
- return L"ffmpeg[" + boost::filesystem::wpath(filename_).filename() + L"]";\r
+ return ouput_channel_.empty() && (!video_decoder_ && !audio_decoder_) || !input_.is_running();\r
}\r
};\r
\r
\r
struct input::implementation : boost::noncopyable\r
{ \r
- static const size_t PACKET_BUFFER_COUNT = 50;\r
+ static const size_t PACKET_BUFFER_COUNT = 25;\r
\r
safe_ptr<diagnostics::graph> graph_;\r
\r
boost::condition_variable cond_;\r
boost::mutex mutex_;\r
\r
+ std::exception_ptr exception_;\r
executor executor_;\r
public:\r
explicit implementation(const safe_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop) \r
graph_->set_color("seek", diagnostics::color(0.5f, 1.0f, 0.5f)); \r
\r
int errn;\r
- AVFormatContext* weak_format_context_;\r
- if((errn = -av_open_input_file(&weak_format_context_, narrow(filename).c_str(), nullptr, 0, nullptr)) > 0)\r
+ AVFormatContext* weak_format_context_ = nullptr;\r
+ if((errn = av_open_input_file(&weak_format_context_, narrow(filename).c_str(), nullptr, 0, nullptr)) < 0 || weak_format_context_ == nullptr)\r
BOOST_THROW_EXCEPTION(\r
file_read_error() << \r
source_info(narrow(print())) << \r
- msg_info("No format context found.") << \r
boost::errinfo_api_function("av_open_input_file") <<\r
- boost::errinfo_errno(errn) <<\r
+ boost::errinfo_errno(AVUNERROR(errn)) <<\r
boost::errinfo_file_name(narrow(filename)));\r
\r
format_context_.reset(weak_format_context_, av_close_input_file);\r
\r
- if((errn = -av_find_stream_info(format_context_.get())) > 0)\r
+ if((errn = av_find_stream_info(format_context_.get())) < 0)\r
BOOST_THROW_EXCEPTION(\r
file_read_error() << \r
source_info(narrow(print())) << \r
boost::errinfo_api_function("av_find_stream_info") <<\r
- msg_info("No stream found.") << \r
- boost::errinfo_errno(errn));\r
+ boost::errinfo_errno(AVUNERROR(errn)));\r
\r
video_codec_context_ = open_stream(CODEC_TYPE_VIDEO, video_s_index_);\r
if(!video_codec_context_)\r
source_info(narrow(print())) << \r
msg_info("No video or audio codec context found.")); \r
\r
- if(video_codec_context_->hwaccel != nullptr || video_codec_context_->hwaccel_context != nullptr)\r
- CASPAR_LOG(info) << print() << " Found hwaccel.";\r
-\r
executor_.start();\r
executor_.begin_invoke([this]{read_file();});\r
CASPAR_LOG(info) << print() << " Started.";\r
}\r
\r
void read_file()\r
- { \r
- AVPacket tmp_packet;\r
- safe_ptr<AVPacket> read_packet(&tmp_packet, av_free_packet); \r
-\r
- if (av_read_frame(format_context_.get(), read_packet.get()) >= 0) // NOTE: read_packet is only valid until next call of av_read_frame or av_close_input_file\r
+ { \r
+ try\r
{\r
- auto packet = std::make_shared<aligned_buffer>(read_packet->data, read_packet->data + read_packet->size);\r
- if(read_packet->stream_index == video_s_index_) \r
- video_packet_buffer_.try_push(std::move(packet)); \r
- else if(read_packet->stream_index == audio_s_index_) \r
- audio_packet_buffer_.try_push(std::move(packet)); \r
+ AVPacket tmp_packet;\r
+ safe_ptr<AVPacket> read_packet(&tmp_packet, av_free_packet); \r
+\r
+ auto read_frame_ret = av_read_frame(format_context_.get(), read_packet.get());\r
+ if(read_frame_ret == AVERROR_EOF || read_frame_ret == AVERROR_IO)\r
+ {\r
+ if(loop_)\r
+ {\r
+ auto seek_frame_ret = av_seek_frame(format_context_.get(), -1, 0, AVSEEK_FLAG_BACKWARD);\r
+ if(seek_frame_ret >= 0)\r
+ graph_->add_tag("seek");\r
+ else\r
+ {\r
+ BOOST_THROW_EXCEPTION(\r
+ invalid_operation() <<\r
+ boost::errinfo_api_function("av_seek_frame") <<\r
+ boost::errinfo_errno(AVUNERROR(seek_frame_ret)));\r
+ } \r
+ } \r
+ else\r
+ stop();\r
+ }\r
+ else if(read_frame_ret < 0)\r
+ {\r
+ BOOST_THROW_EXCEPTION(\r
+ invalid_operation() <<\r
+ boost::errinfo_api_function("av_read_frame") <<\r
+ boost::errinfo_errno(AVUNERROR(read_frame_ret)));\r
+ }\r
+ else\r
+ {\r
+ auto packet = std::make_shared<aligned_buffer>(read_packet->data, read_packet->data + read_packet->size);\r
+ if(read_packet->stream_index == video_s_index_) \r
+ video_packet_buffer_.try_push(std::move(packet)); \r
+ else if(read_packet->stream_index == audio_s_index_) \r
+ audio_packet_buffer_.try_push(std::move(packet)); \r
+ }\r
+ \r
+ graph_->update_value("input-buffer", static_cast<float>(video_packet_buffer_.size())/static_cast<float>(PACKET_BUFFER_COUNT)); \r
}\r
- else if(!loop_ || av_seek_frame(format_context_.get(), -1, 0, AVSEEK_FLAG_BACKWARD) < 0) // TODO: av_seek_frame does not work for all formats\r
+ catch(...)\r
{\r
- executor_.stop();\r
- CASPAR_LOG(info) << print() << " eof";\r
- } \r
- else\r
- graph_->add_tag("seek"); \r
- \r
- graph_->update_value("input-buffer", static_cast<float>(video_packet_buffer_.size())/static_cast<float>(PACKET_BUFFER_COUNT)); \r
+ stop();\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
+ return;\r
+ }\r
\r
executor_.begin_invoke([this]{read_file();}); \r
boost::unique_lock<boost::mutex> lock(mutex_);\r
while(executor_.is_running() && audio_packet_buffer_.size() > PACKET_BUFFER_COUNT && video_packet_buffer_.size() > PACKET_BUFFER_COUNT)\r
cond_.wait(lock); \r
}\r
+\r
+ void stop()\r
+ {\r
+ executor_.stop();\r
+ CASPAR_LOG(info) << print() << " eof";\r
+ }\r
\r
aligned_buffer get_video_packet()\r
{\r
{\r
return get_packet(audio_packet_buffer_);\r
}\r
+\r
+ bool has_packet() const\r
+ {\r
+ return !video_packet_buffer_.empty() || !audio_packet_buffer_.empty();\r
+ }\r
\r
aligned_buffer get_packet(tbb::concurrent_bounded_queue<std::shared_ptr<aligned_buffer>>& buffer)\r
{\r
std::shared_ptr<aligned_buffer> packet;\r
return buffer.try_pop(packet) ? std::move(*packet) : aligned_buffer();\r
}\r
-\r
- bool is_eof() const\r
- {\r
- return !executor_.is_running() && video_packet_buffer_.empty() && audio_packet_buffer_.empty();\r
- }\r
- \r
+ \r
double fps() const\r
{\r
return static_cast<double>(video_codec_context_->time_base.den) / static_cast<double>(video_codec_context_->time_base.num);\r
input::input(const safe_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop) : impl_(new implementation(graph, filename, loop)){}\r
const std::shared_ptr<AVCodecContext>& input::get_video_codec_context() const{return impl_->video_codec_context_;}\r
const std::shared_ptr<AVCodecContext>& input::get_audio_codec_context() const{return impl_->audio_codex_context_;}\r
-bool input::is_eof() const{return impl_->is_eof();}\r
+bool input::has_packet() const{return impl_->has_packet();}\r
bool input::is_running() const {return impl_->executor_.is_running();}\r
aligned_buffer input::get_video_packet(){return impl_->get_video_packet();}\r
aligned_buffer input::get_audio_packet(){return impl_->get_audio_packet();}\r
aligned_buffer get_video_packet();\r
aligned_buffer get_audio_packet();\r
\r
- bool is_eof() const;\r
+ bool has_packet() const;\r
bool is_running() const;\r
double fps() const;\r
private:\r
}\r
}\r
\r
- safe_ptr<core::write_frame> execute(void* tag, const aligned_buffer& video_packet)\r
+ std::shared_ptr<core::write_frame> execute(void* tag, const aligned_buffer& video_packet)\r
{ \r
safe_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
\r
int frame_finished = 0;\r
- const int result = avcodec_decode_video(codec_context_, decoded_frame.get(), &frame_finished, video_packet.data(), video_packet.size());\r
+ const int errn = avcodec_decode_video(codec_context_, decoded_frame.get(), &frame_finished, video_packet.data(), video_packet.size());\r
\r
- if(result < 0)\r
- BOOST_THROW_EXCEPTION(invalid_operation() << msg_info("avcodec_decode_video failed"));\r
+ if(errn < 0)\r
+ {\r
+ BOOST_THROW_EXCEPTION(\r
+ invalid_operation() <<\r
+ boost::errinfo_api_function("avcodec_decode_video") <<\r
+ boost::errinfo_errno(AVUNERROR(errn)));\r
+ }\r
\r
+ if(frame_finished == 0)\r
+ return nullptr;\r
+\r
auto write = frame_factory_->create_frame(tag, desc_);\r
if(sws_context_ == nullptr)\r
{\r
};\r
\r
video_decoder::video_decoder(AVCodecContext* codec_context, const safe_ptr<core::frame_factory>& frame_factory) : impl_(new implementation(codec_context, frame_factory)){}\r
-safe_ptr<core::write_frame> video_decoder::execute(void* tag, const aligned_buffer& video_packet){return impl_->execute(tag, video_packet);}\r
+std::shared_ptr<core::write_frame> video_decoder::execute(void* tag, const aligned_buffer& video_packet){return impl_->execute(tag, video_packet);}\r
\r
}
\ No newline at end of file
{\r
public:\r
explicit video_decoder(AVCodecContext* codec_context, const safe_ptr<core::frame_factory>& frame_factory);\r
- safe_ptr<core::write_frame> execute(void* tag, const aligned_buffer& video_packet); \r
+ std::shared_ptr<core::write_frame> execute(void* tag, const aligned_buffer& video_packet); \r
private:\r
struct implementation;\r
safe_ptr<implementation> impl_;\r
CASPAR_LOG(info) << L"FFMPEG-swscale " << caspar::get_avformat_version();\r
CASPAR_LOG(info) << L"FFMPEG-avformat " << caspar::get_swscale_version();\r
CASPAR_LOG(info) << L"OpenGL " << caspar::mixer::ogl_device::create()->invoke([]{return reinterpret_cast<const char*>(glGetString(GL_VERSION));})\r
- << L" " << caspar::mixer::ogl_device::create()->invoke([]{return reinterpret_cast<const char*>(glGetString(GL_VENDOR));});\r
- CASPAR_LOG(info) << L"\n\n";\r
+ << L" " << caspar::mixer::ogl_device::create()->invoke([]{return reinterpret_cast<const char*>(glGetString(GL_VENDOR));}) << "\n\n";\r
}\r
\r
int main(int argc, wchar_t* argv[])\r