});\r
} \r
\r
- BOOST_FOREACH(auto& layer, layers)\r
- { \r
- // Remove first field stills.\r
- boost::range::remove_erase_if(layer.items, [&](const item& item)\r
- {\r
- return item.transform.is_still && item.transform.field_mode == format_desc.field_mode; // only us last field for stills.\r
- });\r
+ if(format_desc.field_mode != core::field_mode::progressive)\r
+ { // Remove jitter from still.\r
+ BOOST_FOREACH(auto& layer, layers)\r
+ { \r
+ // Remove first field stills.\r
+ boost::range::remove_erase_if(layer.items, [&](const item& item)\r
+ {\r
+ return item.transform.is_still && item.transform.field_mode == format_desc.field_mode; // only us last field for stills.\r
+ });\r
\r
- // Stills are progressive\r
- BOOST_FOREACH(auto& item, layer.items)\r
- {\r
- if(item.transform.is_still)\r
- item.transform.field_mode = core::field_mode::progressive;\r
+ // Stills are progressive\r
+ BOOST_FOREACH(auto& item, layer.items)\r
+ {\r
+ if(item.transform.is_still)\r
+ item.transform.field_mode = core::field_mode::progressive;\r
+ }\r
}\r
}\r
\r
}, task_priority::high_priority);\r
\r
if(timer.elapsed() > 0.02)\r
- CASPAR_LOG(debug) << L"[device] Performance warning. Buffer allocation blocked: " << timer.elapsed();\r
+ CASPAR_LOG(debug) << L"[ogl-device] Performance warning. Buffer allocation blocked: " << timer.elapsed();\r
}\r
\r
auto self = shared_from_this(); // buffers can leave the device context, take a hold on life-time.\r
\r
auto frame = self_.receive_impl();\r
if(frame == draw_frame::late())\r
- return last_frame();\r
+ return self_.last_frame();\r
\r
++frame_number_;\r
\r
{\r
}\r
\r
-frame_producer_base::frame_producer_base(frame_producer_base& self) : impl_(new impl(self))\r
-{\r
-}\r
-\r
draw_frame frame_producer_base::receive()\r
{\r
return impl_->receive();\r
{\r
public:\r
frame_producer_base();\r
- frame_producer_base(frame_producer_base& self);\r
virtual ~frame_producer_base(){} \r
\r
// Methods \r
\r
draw_frame receive_impl() override\r
{\r
- if(++current_frame_ >= info_.duration)\r
+ if(current_frame_ >= info_.duration)\r
{\r
source_producer_ = core::frame_producer::empty();\r
return dest_producer_->receive(); \r
}\r
\r
+ current_frame_ += 1;\r
+\r
auto dest = draw_frame::empty();\r
auto source = draw_frame::empty();\r
\r
\r
std::vector<uint8_t, tbb::cache_aligned_allocator<int8_t>> buffer_;\r
\r
- std::queue<spl::shared_ptr<AVPacket>> packets_;\r
+ std::shared_ptr<AVPacket> current_packet_;\r
\r
public:\r
impl()\r
\r
std::shared_ptr<core::audio_buffer> poll()\r
{ \r
- auto result = std::make_shared<core::audio_buffer>();\r
-\r
if(!codec_context_)\r
return empty_audio();\r
\r
- std::shared_ptr<AVPacket> packet;\r
- if(!input_->try_pop_audio(packet))\r
- return result;\r
+ if(!current_packet_ && !input_->try_pop_audio(current_packet_))\r
+ return nullptr;\r
+ \r
+ std::shared_ptr<core::audio_buffer> audio;\r
\r
- if(packet)\r
+ if(!current_packet_) \r
+ avcodec_flush_buffers(codec_context_.get()); \r
+ else if(!current_packet_->data)\r
{\r
- if(!packet->data && (codec_context_->codec->capabilities & CODEC_CAP_DELAY))\r
- while(decode(*packet, *result));\r
-\r
- while(packet->size > 0) \r
- decode(*packet, *result);\r
- } \r
- else \r
- avcodec_flush_buffers(codec_context_.get());\r
-\r
- return result;\r
+ if(codec_context_->codec->capabilities & CODEC_CAP_DELAY) \r
+ audio = decode(*current_packet_);\r
+ \r
+ if(!audio)\r
+ current_packet_.reset();\r
+ }\r
+ else\r
+ {\r
+ audio = decode(*current_packet_);\r
+ \r
+ if(current_packet_->size == 0)\r
+ current_packet_.reset();\r
+ }\r
+ \r
+ return audio ? audio : poll();\r
}\r
\r
- bool decode(AVPacket& pkt, core::audio_buffer& result)\r
+ std::shared_ptr<core::audio_buffer> decode(AVPacket& pkt)\r
{ \r
std::shared_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
-\r
+ \r
int got_frame = 0;\r
auto len = THROW_ON_ERROR2(avcodec_decode_audio4(codec_context_.get(), decoded_frame.get(), &got_frame, &pkt), "[audio_decoder]");\r
\r
if(len == 0)\r
{\r
pkt.size = 0;\r
- return false;\r
+ return nullptr;\r
}\r
\r
pkt.data += len;\r
pkt.size -= len;\r
\r
if(!got_frame)\r
- return false;\r
+ return nullptr;\r
\r
const uint8_t *in[] = {decoded_frame->data[0]};\r
uint8_t* out[] = {buffer_.data()};\r
in, decoded_frame->nb_samples);\r
\r
auto ptr = reinterpret_cast<int32_t*>(buffer_.data());\r
- result.insert(result.end(), ptr, ptr + channel_samples * format_desc_.audio_channels); \r
\r
event_subject_ << monitor::event("file/audio/sample-rate") % codec_context_->sample_rate\r
<< monitor::event("file/audio/channels") % codec_context_->channels\r
<< monitor::event("file/audio/format") % u8(av_get_sample_fmt_name(codec_context_->sample_fmt))\r
<< monitor::event("file/audio/codec") % u8(codec_context_->codec->long_name); \r
\r
- return true;\r
+ return std::make_shared<core::audio_buffer>(ptr, ptr + channel_samples * format_desc_.audio_channels);\r
}\r
\r
uint32_t nb_frames() const\r
\r
namespace caspar { namespace ffmpeg {\r
\r
-struct ffmpeg_producer : public core::frame_producer\r
+struct ffmpeg_producer : public core::frame_producer_base\r
{\r
monitor::basic_subject event_subject_;\r
const std::wstring filename_;\r
\r
input input_; \r
\r
- tbb::atomic<bool> paused_;\r
const double fps_;\r
const uint32_t start_;\r
\r
video_decoder video_decoder_;\r
audio_decoder audio_decoder_; \r
frame_muxer muxer_;\r
-\r
- tbb::atomic<uint32_t> frame_number_;\r
-\r
+ \r
core::draw_frame last_frame_;\r
\r
boost::optional<uint32_t> seek_target_;\r
, start_(start)\r
, last_frame_(core::draw_frame::empty())\r
{\r
- paused_ = false;\r
- frame_number_ = 0;\r
-\r
graph_->set_color("frame-time", diagnostics::color(0.1f, 1.0f, 0.1f));\r
graph_->set_color("underflow", diagnostics::color(0.6f, 0.3f, 0.9f)); \r
diagnostics::register_graph(graph_);\r
\r
// frame_producer\r
\r
- core::draw_frame receive() override\r
+ core::draw_frame receive_impl() override\r
{ \r
auto frame = core::draw_frame::late(); \r
\r
boost::timer frame_timer;\r
\r
end_seek();\r
-\r
- if(paused_)\r
- frame = last_frame(); \r
- else\r
- { \r
- decode_next_frame();\r
+ \r
+ decode_next_frame();\r
\r
- if(!muxer_.empty())\r
- {\r
- last_frame_ = frame = std::move(muxer_.front());\r
- muxer_.pop();\r
-\r
- ++frame_number_; \r
- }\r
- else \r
- graph_->set_tag("underflow");\r
+ if(!muxer_.empty())\r
+ {\r
+ last_frame_ = frame = std::move(muxer_.front());\r
+ muxer_.pop(); \r
}\r
+ else \r
+ graph_->set_tag("underflow");\r
\r
graph_->set_value("frame-time", frame_timer.elapsed()*format_desc_.fps*0.5);\r
event_subject_ << monitor::event("profiler/time") % frame_timer.elapsed() % (1.0/format_desc_.fps); \r
core::draw_frame last_frame() override\r
{\r
end_seek();\r
-\r
return core::draw_frame::still(last_frame_);\r
}\r
-\r
- void paused(bool value)\r
- {\r
- paused_ = value;\r
- }\r
-\r
- uint32_t frame_number() const override\r
- {\r
- return frame_number_;\r
- }\r
- \r
+ \r
uint32_t nb_frames() const override\r
{\r
if(input_.loop())\r
info.add(L"progressive", video_decoder_.is_progressive());\r
info.add(L"fps", fps_);\r
info.add(L"loop", input_.loop());\r
- info.add(L"frame-number", frame_number_);\r
+ info.add(L"frame-number", frame_number());\r
auto nb_frames2 = nb_frames();\r
info.add(L"nb-frames", nb_frames2 == std::numeric_limits<int64_t>::max() ? -1 : nb_frames2);\r
info.add(L"file-frame-number", file_frame_number());\r
\r
void end_seek()\r
{\r
- for(int n = 0; n < 8 && (last_frame_ == core::draw_frame::empty() || (seek_target_ && file_frame_number() != *seek_target_)); ++n)\r
+ for(int n = 0; n < 8 && (last_frame_ == core::draw_frame::empty() || (seek_target_ && file_frame_number() != *seek_target_+2)); ++n)\r
{\r
decode_next_frame();\r
if(!muxer_.empty())\r
\r
merge();\r
}\r
-\r
- void push(const std::vector<std::shared_ptr<AVFrame>> video_frames)\r
- {\r
- BOOST_FOREACH(auto& frame, video_frames)\r
- push(frame);\r
- }\r
-\r
+ \r
void push(const std::shared_ptr<core::audio_buffer>& audio)\r
{\r
if(audio == empty_audio()) \r
frame_muxer::frame_muxer(double in_fps, const spl::shared_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, const std::wstring& filter)\r
: impl_(new impl(in_fps, frame_factory, format_desc, filter)){}\r
void frame_muxer::push(const std::shared_ptr<AVFrame>& video_frame){impl_->push(video_frame);}\r
-void frame_muxer::push(const std::vector<std::shared_ptr<AVFrame>>& video_frames){impl_->push(video_frames);}\r
void frame_muxer::push(const std::shared_ptr<core::audio_buffer>& audio_samples){return impl_->push(audio_samples);}\r
bool frame_muxer::empty() const{return impl_->empty();}\r
core::draw_frame frame_muxer::front() const{return impl_->front();}\r
frame_muxer(double in_fps, const spl::shared_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, const std::wstring& filter = L"");\r
\r
void push(const std::shared_ptr<AVFrame>& video_frame);\r
- void push(const std::vector<std::shared_ptr<AVFrame>>& video_frame);\r
void push(const std::shared_ptr<core::audio_buffer>& audio_samples);\r
\r
bool video_ready() const;\r
bool is_progressive_;\r
uint32_t file_frame_number_;\r
double fps_;\r
+ \r
+ std::shared_ptr<AVPacket> current_packet_;\r
\r
public:\r
explicit impl() \r
{\r
}\r
\r
- std::vector<std::shared_ptr<AVFrame>> poll()\r
+ std::shared_ptr<AVFrame> poll()\r
{ \r
- std::vector<std::shared_ptr<AVFrame>> result;\r
-\r
if(!codec_context_)\r
- {\r
- result.push_back(empty_video());\r
- return result;\r
- }\r
+ return empty_video();\r
\r
- std::shared_ptr<AVPacket> packet;\r
- if(!input_->try_pop_video(packet))\r
- return result;\r
- \r
- if(packet)\r
- {\r
- if(!packet->data && (codec_context_->codec->capabilities & CODEC_CAP_DELAY)) \r
- while(decode(*packet, result)); \r
+ if(!current_packet_ && !input_->try_pop_video(current_packet_))\r
+ return nullptr;\r
+ \r
+ std::shared_ptr<AVFrame> frame;\r
\r
- while(packet->size > 0)\r
- decode(*packet, result); \r
+ if(!current_packet_) \r
+ avcodec_flush_buffers(codec_context_.get()); \r
+ else if(!current_packet_->data)\r
+ {\r
+ if(codec_context_->codec->capabilities & CODEC_CAP_DELAY) \r
+ frame = decode(*current_packet_);\r
+ \r
+ if(!frame)\r
+ current_packet_.reset();\r
}\r
else\r
- avcodec_flush_buffers(codec_context_.get());\r
- \r
- return result;\r
+ {\r
+ frame = decode(*current_packet_);\r
+ \r
+ if(current_packet_->size == 0)\r
+ current_packet_.reset();\r
+ }\r
+ \r
+ return frame ? frame : poll();\r
}\r
\r
- bool decode(AVPacket& pkt, std::vector<std::shared_ptr<AVFrame>>& result)\r
+ std::shared_ptr<AVFrame> decode(AVPacket& pkt)\r
{\r
auto frame = std::shared_ptr<AVFrame>(avcodec_alloc_frame(), av_free);\r
\r
if(len == 0)\r
{\r
pkt.size = 0;\r
- return false;\r
+ return nullptr;\r
}\r
\r
pkt.data += len;\r
pkt.size -= len;\r
\r
if(got_frame == 0) \r
- return false;\r
+ return nullptr;\r
\r
auto stream_time_base = stream_->time_base;\r
auto packet_frame_number = static_cast<uint32_t>((static_cast<double>(pkt.pts * stream_time_base.num)/stream_time_base.den)*fps_);\r
<< monitor::event("file/video/field") % u8(!frame->interlaced_frame ? "progressive" : (frame->top_field_first ? "upper" : "lower"))\r
<< monitor::event("file/video/codec") % u8(codec_context_->codec->long_name);\r
\r
- result.push_back(frame);\r
-\r
- return true;\r
+ return frame;\r
}\r
\r
uint32_t nb_frames() const\r
video_decoder::video_decoder(input& in) : impl_(new impl(in)){}\r
video_decoder::video_decoder(video_decoder&& other) : impl_(std::move(other.impl_)){}\r
video_decoder& video_decoder::operator=(video_decoder&& other){impl_ = std::move(other.impl_); return *this;}\r
-std::vector<std::shared_ptr<AVFrame>> video_decoder::operator()(){return impl_->poll();}\r
+std::shared_ptr<AVFrame> video_decoder::operator()(){return impl_->poll();}\r
int video_decoder::width() const{return impl_->width_;}\r
int video_decoder::height() const{return impl_->height_;}\r
uint32_t video_decoder::nb_frames() const{return impl_->nb_frames();}\r
video_decoder(video_decoder&& other);\r
video_decoder& operator=(video_decoder&& other);\r
\r
- std::vector<std::shared_ptr<AVFrame>> operator()();\r
+ std::shared_ptr<AVFrame> operator()();\r
\r
int width() const;\r
int height() const;\r