std::unique_ptr<video_decoder> video_decoder_;\r
std::unique_ptr<audio_decoder> audio_decoder_;\r
\r
- std::deque<std::pair<int, std::vector<short>>> audio_chunks_;\r
+ std::deque<std::pair<int, std::vector<int16_t>>> audio_chunks_;\r
std::deque<std::pair<int, safe_ptr<core::write_frame>>> video_frames_;\r
public:\r
- explicit ffmpeg_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filename, bool loop, int start, int length) \r
+ explicit ffmpeg_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filename, const std::wstring& filter_str, bool loop, int start, int length) \r
: filename_(filename)\r
, graph_(diagnostics::create_graph(narrow(print())))\r
, frame_factory_(frame_factory) \r
CASPAR_LOG(warning) << print() << L" Invalid framerate detected. This may cause distorted audio during playback. frame-time: " << frame_time;\r
\r
video_decoder_.reset(input_.get_video_codec_context() ? \r
- new video_decoder(input_, frame_factory) : nullptr);\r
+ new video_decoder(input_, frame_factory, narrow(filter_str)) : nullptr);\r
\r
audio_decoder_.reset(input_.get_audio_codec_context() ? \r
new audio_decoder(input_, frame_factory->get_video_format_desc()) : nullptr); \r
(\r
[&]\r
{\r
- if(video_decoder_ && video_frames_.size() < 2)\r
+ if(video_decoder_ && video_frames_.size() < 3)\r
boost::range::push_back(video_frames_, video_decoder_->receive()); \r
}, \r
[&]\r
{\r
- if(audio_decoder_ && audio_chunks_.size() < 2)\r
+ if(audio_decoder_ && audio_chunks_.size() < 3)\r
boost::range::push_back(audio_chunks_, audio_decoder_->receive()); \r
}\r
);\r
video_frames_.front().first == audio_chunks_.front().first);\r
}\r
\r
+ safe_ptr<core::basic_frame> get_video_frame(std::vector<int16_t>&& audio_chunk)\r
+ {\r
+ auto frame = std::move(video_frames_.front().second); \r
+ auto frame_number = video_frames_.front().first;\r
+ video_frames_.pop_front();\r
+ \r
+ frame->audio_data() = std::move(audio_chunk);\r
+ if(frame->audio_data().empty())\r
+ frame->get_audio_transform().set_has_audio(false); \r
+\r
+ if(!video_frames_.empty()) // interlace if we have double frames\r
+ {\r
+ if(video_frames_.front().first == frame_number)\r
+ {\r
+ auto frame2 = std::move(video_frames_.front().second); \r
+ video_frames_.pop_front();\r
+\r
+ return core::basic_frame::interlace(frame, frame2, frame_factory_->get_video_format_desc().mode);\r
+ }\r
+ }\r
+\r
+ return frame;\r
+ }\r
+\r
safe_ptr<core::basic_frame> decode_frame()\r
{\r
decode_packets();\r
\r
if(video_decoder_ && audio_decoder_ && !video_frames_.empty() && !audio_chunks_.empty())\r
{\r
- auto frame = std::move(video_frames_.front().second); \r
- video_frames_.pop_front();\r
- \r
- frame->audio_data() = std::move(audio_chunks_.front().second);\r
+ auto audio_chunk = std::move(audio_chunks_.front().second);\r
audio_chunks_.pop_front();\r
- \r
- return frame;\r
+ \r
+ return get_video_frame(std::move(audio_chunk));\r
}\r
else if(video_decoder_ && !audio_decoder_ && !video_frames_.empty())\r
- {\r
- auto frame = std::move(video_frames_.front().second); \r
- video_frames_.pop_front();\r
- frame->get_audio_transform().set_has_audio(false); \r
- \r
- return frame;\r
+ { \r
+ return get_video_frame(std::vector<int16_t>());\r
}\r
else if(audio_decoder_ && !video_decoder_ && !audio_chunks_.empty())\r
{\r
start = boost::lexical_cast<int>(*seek_it);\r
}\r
\r
- return make_safe<ffmpeg_producer>(frame_factory, path, loop, start, length);\r
+ std::wstring filter_str = L"";\r
+\r
+ auto filter_it = std::find(params.begin(), params.end(), L"FILTER");\r
+ if(filter_it != params.end())\r
+ {\r
+ if(++filter_it != params.end())\r
+ filter_str = *filter_it;\r
+ }\r
+\r
+ return make_safe<ffmpeg_producer>(frame_factory, path, filter_str, loop, start, length);\r
}\r
\r
}
\ No newline at end of file
\r
#include "video_decoder.h"\r
#include "../../ffmpeg_error.h"\r
+#include "../../util/util.h"\r
+#include "../../util/filter.h"\r
\r
#include <common/memory/memcpy.h>\r
\r
\r
namespace caspar {\r
\r
-core::pixel_format::type get_pixel_format(PixelFormat pix_fmt)\r
-{\r
- switch(pix_fmt)\r
- {\r
- case PIX_FMT_GRAY8: return core::pixel_format::gray;\r
- case PIX_FMT_BGRA: return core::pixel_format::bgra;\r
- case PIX_FMT_ARGB: return core::pixel_format::argb;\r
- case PIX_FMT_RGBA: return core::pixel_format::rgba;\r
- case PIX_FMT_ABGR: return core::pixel_format::abgr;\r
- case PIX_FMT_YUV444P: return core::pixel_format::ycbcr;\r
- case PIX_FMT_YUV422P: return core::pixel_format::ycbcr;\r
- case PIX_FMT_YUV420P: return core::pixel_format::ycbcr;\r
- case PIX_FMT_YUV411P: return core::pixel_format::ycbcr;\r
- case PIX_FMT_YUV410P: return core::pixel_format::ycbcr;\r
- case PIX_FMT_YUVA420P: return core::pixel_format::ycbcra;\r
- default: return core::pixel_format::invalid;\r
- }\r
-}\r
-\r
-core::pixel_format_desc get_pixel_format_desc(PixelFormat pix_fmt, size_t width, size_t height)\r
-{\r
- // Get linesizes\r
- AVPicture dummy_pict; \r
- avpicture_fill(&dummy_pict, nullptr, pix_fmt, width, height);\r
-\r
- core::pixel_format_desc desc;\r
- desc.pix_fmt = get_pixel_format(pix_fmt);\r
- \r
- switch(desc.pix_fmt)\r
- {\r
- case core::pixel_format::gray:\r
- {\r
- desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 1)); \r
- return desc;\r
- }\r
- case core::pixel_format::bgra:\r
- case core::pixel_format::argb:\r
- case core::pixel_format::rgba:\r
- case core::pixel_format::abgr:\r
- {\r
- desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 4)); \r
- return desc;\r
- }\r
- case core::pixel_format::ycbcr:\r
- case core::pixel_format::ycbcra:\r
- { \r
- // Find chroma height\r
- size_t size2 = dummy_pict.data[2] - dummy_pict.data[1];\r
- size_t h2 = size2/dummy_pict.linesize[1]; \r
-\r
- desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));\r
- desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[1], h2, 1));\r
- desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[2], h2, 1));\r
-\r
- if(desc.pix_fmt == core::pixel_format::ycbcra) \r
- desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[3], height, 1)); \r
- return desc;\r
- } \r
- default: \r
- desc.pix_fmt = core::pixel_format::invalid;\r
- return desc;\r
- }\r
-}\r
-\r
struct video_decoder::implementation : boost::noncopyable\r
{\r
input& input_;\r
std::shared_ptr<SwsContext> sws_context_;\r
const std::shared_ptr<core::frame_factory> frame_factory_;\r
AVCodecContext& codec_context_;\r
- const int width_;\r
- const int height_;\r
- const PixelFormat pix_fmt_;\r
- core::pixel_format_desc desc_;\r
size_t frame_number_;\r
\r
+ std::shared_ptr<filter> filter_;\r
+ size_t filter_delay_;\r
+\r
+ safe_ptr<AVFrame> last_frame_;\r
+\r
public:\r
- explicit implementation(input& input, const safe_ptr<core::frame_factory>& frame_factory) \r
+ explicit implementation(input& input, const safe_ptr<core::frame_factory>& frame_factory, const std::string& filter_str) \r
: input_(input)\r
, frame_factory_(frame_factory)\r
, codec_context_(*input_.get_video_codec_context())\r
- , width_(codec_context_.width)\r
- , height_(codec_context_.height)\r
- , pix_fmt_(codec_context_.pix_fmt)\r
- , desc_(get_pixel_format_desc(pix_fmt_, width_, height_))\r
, frame_number_(0)\r
+ , filter_(filter_str.empty() ? nullptr : new filter(filter_str))\r
+ , filter_delay_(0)\r
+ , last_frame_(avcodec_alloc_frame(), av_free)\r
{\r
- if(desc_.pix_fmt == core::pixel_format::invalid)\r
- {\r
- CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
-\r
- desc_ = get_pixel_format_desc(PIX_FMT_BGRA, width_, height_);\r
- double param;\r
- sws_context_.reset(sws_getContext(width_, height_, pix_fmt_, width_, height_, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, ¶m), sws_freeContext);\r
- if(!sws_context_)\r
- BOOST_THROW_EXCEPTION(operation_failed() <<\r
- msg_info("Could not create software scaling context.") << \r
- boost::errinfo_api_function("sws_getContext"));\r
- }\r
}\r
\r
std::deque<std::pair<int, safe_ptr<core::write_frame>>> receive()\r
\r
if(!video_packet) // eof\r
{ \r
- avcodec_flush_buffers(&codec_context_);\r
+ for(size_t n = 0; n < filter_delay_; ++n)\r
+ boost::range::push_back(result, get_frames(last_frame_));\r
+ \r
frame_number_ = 0;\r
+ filter_delay_ = 0;\r
+ avcodec_flush_buffers(&codec_context_);\r
+\r
return result;\r
}\r
\r
- safe_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
-\r
int frame_finished = 0;\r
- const int errn = avcodec_decode_video2(&codec_context_, decoded_frame.get(), &frame_finished, video_packet.get());\r
+ const int errn = avcodec_decode_video2(&codec_context_, last_frame_.get(), &frame_finished, video_packet.get());\r
\r
if(errn < 0)\r
{\r
}\r
\r
if(frame_finished != 0) \r
- result.push_back(std::make_pair(frame_number_++, make_write_frame(decoded_frame)));\r
+ result = get_frames(last_frame_);\r
+\r
+ return result;\r
+ }\r
+\r
+ std::deque<std::pair<int, safe_ptr<core::write_frame>>> get_frames(const safe_ptr<AVFrame>& frame)\r
+ {\r
+ std::deque<std::pair<int, safe_ptr<core::write_frame>>> result;\r
+ \r
+ if(filter_)\r
+ {\r
+ auto frames = filter_->execute(frame);\r
+\r
+ boost::range::transform(frames, std::back_inserter(result), [this](const safe_ptr<AVFrame>& frame)\r
+ {\r
+ return std::make_pair(frame_number_, make_write_frame(frame));\r
+ });\r
+\r
+ if(!frames.empty())\r
+ ++frame_number_;\r
+ else\r
+ ++filter_delay_;\r
+ }\r
+ else\r
+ result.push_back(std::make_pair(frame_number_++, make_write_frame(frame)));\r
\r
return result;\r
}\r
\r
safe_ptr<core::write_frame> make_write_frame(safe_ptr<AVFrame> decoded_frame)\r
- { \r
- auto write = frame_factory_->create_frame(this, desc_);\r
+ { \r
+ // We don't know what the filter output might give until we received the first frame. Initialize everything on first frame.\r
+ auto width = decoded_frame->width;\r
+ auto height = decoded_frame->height;\r
+ auto pix_fmt = static_cast<PixelFormat>(decoded_frame->format);\r
+ auto desc = get_pixel_format_desc(pix_fmt, width, height);\r
+ \r
+ if(desc.pix_fmt == core::pixel_format::invalid)\r
+ {\r
+ CASPAR_VERIFY(!sws_context_); // Initialize only once. Nothing should change while running;\r
+ CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
+\r
+ desc = get_pixel_format_desc(PIX_FMT_BGRA, width, height);\r
+ double param;\r
+ sws_context_.reset(sws_getContext(width, height, pix_fmt, width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, ¶m), sws_freeContext);\r
+ if(!sws_context_)\r
+ BOOST_THROW_EXCEPTION(operation_failed() <<\r
+ msg_info("Could not create software scaling context.") << \r
+ boost::errinfo_api_function("sws_getContext"));\r
+ }\r
+\r
+ auto write = frame_factory_->create_frame(this, desc);\r
write->set_is_interlaced(decoded_frame->interlaced_frame != 0);\r
\r
if(sws_context_ == nullptr)\r
{\r
- tbb::parallel_for(0, static_cast<int>(desc_.planes.size()), 1, [&](int n)\r
+ tbb::parallel_for(0, static_cast<int>(desc.planes.size()), 1, [&](int n)\r
{\r
- auto plane = desc_.planes[n];\r
+ auto plane = desc.planes[n];\r
auto result = write->image_data(n).begin();\r
auto decoded = decoded_frame->data[n];\r
auto decoded_linesize = decoded_frame->linesize[n];\r
\r
// Copy line by line since ffmpeg sometimes pads each line.\r
- tbb::parallel_for(tbb::blocked_range<size_t>(0, static_cast<int>(desc_.planes[n].height)), [&](const tbb::blocked_range<size_t>& r)\r
+ tbb::parallel_for(tbb::blocked_range<size_t>(0, static_cast<int>(desc.planes[n].height)), [&](const tbb::blocked_range<size_t>& r)\r
{\r
for(size_t y = r.begin(); y != r.end(); ++y)\r
memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
// Use sws_scale when provided colorspace has no hw-accel.\r
safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free); \r
avcodec_get_frame_defaults(av_frame.get()); \r
- avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width_, height_);\r
+ avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width, height);\r
\r
- sws_scale(sws_context_.get(), decoded_frame->data, decoded_frame->linesize, 0, height_, av_frame->data, av_frame->linesize); \r
+ sws_scale(sws_context_.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame->data, av_frame->linesize); \r
\r
write->commit();\r
} \r
\r
- // DVVIDEO is in lower field. Make it upper field if needed.\r
- if(codec_context_.codec_id == CODEC_ID_DVVIDEO && frame_factory_->get_video_format_desc().mode == core::video_mode::upper)\r
- write->get_image_transform().set_fill_translation(0.0f, 0.5/static_cast<double>(height_));\r
+ // Fix field-order if needed. DVVIDEO is in lower field. Make it upper field if needed.\r
+ if(decoded_frame->interlaced_frame)\r
+ {\r
+ switch(frame_factory_->get_video_format_desc().mode)\r
+ {\r
+ case core::video_mode::upper:\r
+ if(!decoded_frame->top_field_first)\r
+ write->get_image_transform().set_fill_translation(0.0f, 0.5/static_cast<double>(height));\r
+ break;\r
+ case core::video_mode::lower:\r
+ if(decoded_frame->top_field_first)\r
+ write->get_image_transform().set_fill_translation(0.0f, -0.5/static_cast<double>(height));\r
+ break;\r
+ }\r
+ }\r
\r
return write;\r
}\r
};\r
\r
-video_decoder::video_decoder(input& input, const safe_ptr<core::frame_factory>& frame_factory) : impl_(new implementation(input, frame_factory)){}\r
+video_decoder::video_decoder(input& input, const safe_ptr<core::frame_factory>& frame_factory, const std::string& filter_str) : impl_(new implementation(input, frame_factory, filter_str)){}\r
std::deque<std::pair<int, safe_ptr<core::write_frame>>> video_decoder::receive(){return impl_->receive();}\r
\r
}
\ No newline at end of file
\r
struct filter::implementation\r
{\r
- const std::string filters_;\r
+ std::string filters_;\r
std::shared_ptr<AVFilterGraph> graph_;\r
AVFilterContext* video_in_filter_;\r
AVFilterContext* video_out_filter_;\r
- std::deque<std::shared_ptr<AVFrame>> buffer_;\r
\r
implementation(const std::string& filters) \r
: filters_(filters)\r
- {}\r
+ {\r
+ std::transform(filters_.begin(), filters_.end(), filters_.begin(), ::tolower);\r
+ }\r
\r
- void push(const safe_ptr<AVFrame>& frame)\r
+ std::vector<safe_ptr<AVFrame>> execute(const safe_ptr<AVFrame>& frame)\r
{ \r
int errn = 0; \r
\r
boost::errinfo_api_function("avfilter_poll_frame") << boost::errinfo_errno(AVUNERROR(errn)));\r
}\r
\r
- std::generate_n(std::back_inserter(buffer_), errn, [&]{return get_frame();});\r
+ std::vector<safe_ptr<AVFrame>> result;\r
+\r
+ std::generate_n(std::back_inserter(result), errn, [&]{return get_frame();});\r
+\r
+ return result;\r
}\r
\r
- std::shared_ptr<AVFrame> get_frame()\r
+ safe_ptr<AVFrame> get_frame()\r
{ \r
auto link = video_out_filter_->inputs[0];\r
\r
boost::errinfo_api_function("avfilter_request_frame") << boost::errinfo_errno(AVUNERROR(errn)));\r
}\r
\r
- auto pic = reinterpret_cast<AVPicture*>(link->cur_buf->buf);\r
+ auto pic = reinterpret_cast<AVPicture*>(link->cur_buf->buf);\r
\r
- std::shared_ptr<AVFrame> frame(avcodec_alloc_frame(), av_free);\r
+ safe_ptr<AVFrame> frame(avcodec_alloc_frame(), av_free);\r
avcodec_get_frame_defaults(frame.get()); \r
\r
for(size_t n = 0; n < 4; ++n)\r
frame->linesize[n] = pic->linesize[n];\r
}\r
\r
- frame->width = link->w;\r
- frame->height = link->h;\r
- frame->format = link->format;\r
+ // FIXME\r
+ frame->width = link->cur_buf->video->w;\r
+ frame->height = link->cur_buf->video->h;\r
+ frame->format = link->cur_buf->format;\r
+ frame->interlaced_frame = link->cur_buf->video->interlaced;\r
+ frame->top_field_first = link->cur_buf->video->top_field_first;\r
+ frame->key_frame = link->cur_buf->video->key_frame;\r
\r
return frame;\r
}\r
- \r
- bool try_pop(std::shared_ptr<AVFrame>& frame)\r
- {\r
- if(buffer_.empty())\r
- return false;\r
-\r
- frame = buffer_.front();\r
- buffer_.pop_front();\r
-\r
- return true;\r
- }\r
};\r
\r
filter::filter(const std::string& filters) : impl_(new implementation(filters)){}\r
-void filter::push(const safe_ptr<AVFrame>& frame) {return impl_->push(frame);}\r
-bool filter::try_pop(std::shared_ptr<AVFrame>& frame){return impl_->try_pop(frame);}\r
-size_t filter::size() const {return impl_->buffer_.size();}\r
+std::vector<safe_ptr<AVFrame>> filter::execute(const safe_ptr<AVFrame>& frame) {return impl_->execute(frame);}\r
\r
}
\ No newline at end of file