#include "../../StdAfx.h"\r
\r
#include "deinterlacer.h"\r
-/*\r
+\r
#include <core/frame/frame_factory.h>\r
\r
#include <modules/ffmpeg/producer/filter/filter.h>\r
\r
namespace caspar { namespace accelerator { namespace cpu {\r
\r
-struct deinterlacer::impl : public std::enable_shared_from_this<impl> \r
+struct deinterlacer::impl\r
{\r
- ffmpeg::filter filter_;\r
-\r
- typedef tbb::concurrent_hash_map<const void*, std::tuple<boost::signals2::scoped_connection, std::vector<core::const_frame>>> cache_t; \r
-\r
- cache_t frame_cache_;\r
+ ffmpeg::filter filter_; \r
\r
public:\r
\r
}\r
\r
std::vector<core::const_frame> operator()(const core::const_frame& frame, core::frame_factory& frame_factory)\r
- {\r
- auto tag = frame.data_tag();\r
-\r
- cache_t::const_accessor a;\r
- \r
- if(frame_cache_.find(a, tag))\r
- return std::get<1>(a->second);\r
- \r
+ { \r
std::array<uint8_t*, 4> data = {};\r
for(int n = 0; n < frame.pixel_format_desc().planes.size(); ++n)\r
data[n] = const_cast<uint8_t*>(frame.image_data(n).begin());\r
std::vector<core::const_frame> frames;\r
\r
BOOST_FOREACH(auto av_frame, av_frames)\r
- frames.push_back(ffmpeg::make_frame(tag, av_frame, frame.frame_rate(), frame_factory, 0));\r
-\r
- std::weak_ptr<impl> self = shared_from_this();\r
- auto connection = frame.on_released.connect(std::function<void()>([self, tag]() mutable\r
- {\r
- auto self2 = self.lock();\r
- if(self2)\r
- self2->frame_cache_.erase(tag);\r
- }));\r
-\r
- frame_cache_.insert(std::make_pair(&frame, std::make_tuple(connection, frames)));\r
-\r
+ frames.push_back(ffmpeg::make_frame(frame.stream_tag(), av_frame, frame.frame_rate(), frame_factory));\r
+ \r
return frames;\r
} \r
};\r
deinterlacer::~deinterlacer(){}\r
std::vector<core::const_frame> deinterlacer::operator()(const core::const_frame& frame, core::frame_factory& frame_factory){return (*impl_)(frame, frame_factory);}\r
\r
-}}}*/
\ No newline at end of file
+}}}
\ No newline at end of file
#pragma once\r
-/*\r
+\r
#include <common/forward.h>\r
\r
#include <core/frame/frame.h>\r
spl::unique_ptr<impl> impl_;\r
};\r
\r
-}}}*/
\ No newline at end of file
+}}}
\ No newline at end of file
// Fix frames \r
BOOST_FOREACH(auto& item, layer.items) \r
{\r
+ if(std::abs(item.transform.fill_scale[1]-1.0) > 1.0/target_texture->height() ||\r
+ std::abs(item.transform.fill_translation[1]) > 1.0/target_texture->height()) \r
+ CASPAR_LOG(warning) << L"[image_mixer] Frame should be deinterlaced. Send FILTER DEINTERLACE_BOB when creating producer."; \r
+\r
if(item.pix_desc.planes.at(0).height == 480) // NTSC DV\r
{\r
item.transform.fill_translation[1] += 2.0/static_cast<double>(format_desc.height);\r
std::shared_ptr<texture>& layer_key_texture, \r
std::shared_ptr<texture>& local_key_texture, \r
std::shared_ptr<texture>& local_mix_texture)\r
- { \r
+ { \r
draw_params draw_params;\r
draw_params.pix_desc = std::move(item.pix_desc);\r
draw_params.transform = std::move(item.transform);\r
boost::unique_future<spl::shared_ptr<texture>> copy_async(const array<const std::uint8_t>& source, int width, int height, int stride);\r
boost::unique_future<spl::shared_ptr<texture>> copy_async(const array<std::uint8_t>& source, int width, int height, int stride);\r
boost::unique_future<array<const std::uint8_t>> copy_async(const spl::shared_ptr<texture>& source);\r
- \r
+ \r
template<typename Func>\r
auto begin_invoke(Func&& func, task_priority priority = task_priority::normal_priority) -> boost::unique_future<decltype(func())> // noexcept\r
{ \r
\r
// frame_producer\r
\r
- draw_frame receive(int) override\r
+ draw_frame receive() override\r
{\r
event_subject_ << monitor::event("color") % color_str_;\r
\r
{\r
public:\r
empty_frame_producer(){}\r
- virtual draw_frame receive(int){return draw_frame::empty();}\r
+ virtual draw_frame receive(){return draw_frame::empty();}\r
virtual draw_frame last_frame() const{return draw_frame::empty();}\r
virtual void set_frame_factory(const spl::shared_ptr<frame_factory>&){}\r
virtual uint32_t nb_frames() const {return 0;}\r
}).detach(); \r
}\r
\r
- draw_frame receive(int flags) override {return producer_->receive(flags);}\r
- draw_frame last_frame() const override {return producer_->last_frame();}\r
+ draw_frame receive() override {return producer_->receive();}\r
+ draw_frame last_frame() const override {return producer_->last_frame();}\r
std::wstring print() const override {return producer_->print();}\r
std::wstring name() const override {return producer_->name();}\r
boost::property_tree::wptree info() const override {return producer_->info();}\r
boost::unique_future<std::wstring> call(const std::wstring& str) override {return producer_->call(str);}\r
void leading_producer(const spl::shared_ptr<frame_producer>& producer) override {return producer_->leading_producer(producer);}\r
uint32_t nb_frames() const override {return producer_->nb_frames();}\r
- virtual void subscribe(const monitor::observable::observer_ptr& o) {return producer_->subscribe(o);}\r
- virtual void unsubscribe(const monitor::observable::observer_ptr& o) {return producer_->unsubscribe(o);}\r
+ virtual void subscribe(const monitor::observable::observer_ptr& o) {return producer_->subscribe(o);}\r
+ virtual void unsubscribe(const monitor::observable::observer_ptr& o) {return producer_->unsubscribe(o);}\r
};\r
\r
spl::shared_ptr<core::frame_producer> create_destroy_proxy(spl::shared_ptr<core::frame_producer> producer)\r
\r
// Static Members\r
\r
- struct flags_def\r
- {\r
- enum type\r
- {\r
- none = 0,\r
- alpha_only = 2,\r
- deinterlace = 4,\r
- };\r
- };\r
- typedef enum_class<flags_def> flags;\r
-\r
static const spl::shared_ptr<frame_producer>& empty();\r
\r
// Constructors\r
\r
// Methods \r
\r
- virtual class draw_frame receive(int flags) = 0;\r
+ virtual class draw_frame receive() = 0;\r
virtual boost::unique_future<std::wstring> call(const std::wstring&);\r
\r
// monitor::observable\r
if(preview)\r
{\r
play();\r
- foreground_->receive(0);\r
+ foreground_->receive();\r
pause();\r
}\r
\r
pause();\r
}\r
\r
- draw_frame receive(frame_producer::flags flags, const video_format_desc& format_desc)\r
+ draw_frame receive(const video_format_desc& format_desc)\r
{ \r
try\r
{\r
if(is_paused_)\r
return foreground_->last_frame();\r
\r
- auto frame = foreground_->receive(flags.value());\r
+ auto frame = foreground_->receive();\r
\r
if(frame == core::draw_frame::late())\r
return foreground_->last_frame();\r
if(frames_left < 1)\r
{\r
play();\r
- return receive(flags, format_desc);\r
+ return receive(format_desc);\r
}\r
}\r
\r
void layer::play(){impl_->play();}\r
void layer::pause(){impl_->pause();}\r
void layer::stop(){impl_->stop();}\r
-draw_frame layer::receive(frame_producer::flags flags, const video_format_desc& format_desc) {return impl_->receive(flags, format_desc);}\r
+draw_frame layer::receive(const video_format_desc& format_desc) {return impl_->receive(format_desc);}\r
spl::shared_ptr<frame_producer> layer::foreground() const { return impl_->foreground_;}\r
spl::shared_ptr<frame_producer> layer::background() const { return impl_->background_;}\r
boost::property_tree::wptree layer::info() const{return impl_->info();}\r
void pause(); \r
void stop(); \r
\r
- class draw_frame receive(frame_producer::flags flags, const struct video_format_desc& format_desc); \r
+ class draw_frame receive(const struct video_format_desc& format_desc); \r
\r
// monitor::observable\r
\r
\r
// frame_producer\r
\r
- draw_frame receive(int flags) override\r
+ draw_frame receive() override\r
{\r
tbb::parallel_invoke(\r
[&]\r
{\r
if(fill_ == core::draw_frame::late())\r
- fill_ = fill_producer_->receive(flags);\r
+ fill_ = fill_producer_->receive();\r
},\r
[&]\r
{\r
if(key_ == core::draw_frame::late())\r
- key_ = key_producer_->receive(flags | frame_producer::flags::alpha_only);\r
+ key_ = key_producer_->receive();\r
});\r
\r
if(fill_ == draw_frame::eof() || key_ == draw_frame::eof())\r
auto& layer = layers_[index];\r
auto& tween = tweens_[index];\r
auto transform = tween.fetch_and_tick(1);\r
-\r
- frame_producer::flags flags = frame_producer::flags::none;\r
- if(format_desc.field_mode != field_mode::progressive)\r
- {\r
- flags |= std::abs(transform.image_transform.fill_scale[1] - 1.0) > 0.0001 ? frame_producer::flags::deinterlace : frame_producer::flags::none;\r
- flags |= std::abs(transform.image_transform.fill_translation[1]) > 0.0001 ? frame_producer::flags::deinterlace : frame_producer::flags::none;\r
- }\r
-\r
- if(transform.image_transform.is_key)\r
- flags |= frame_producer::flags::alpha_only;\r
- \r
- auto frame = layer.receive(flags, format_desc); \r
\r
+ auto frame = layer.receive(format_desc); \r
auto frame1 = core::draw_frame(frame);\r
frame1.transform() = transform;\r
\r
source_producer_ = create_destroy_proxy(producer);\r
}\r
\r
- draw_frame receive(int flags) override\r
+ draw_frame receive() override\r
{\r
if(current_frame_ >= info_.duration)\r
{\r
source_producer_ = core::frame_producer::empty();\r
- return dest_producer_->receive(flags);\r
+ return dest_producer_->receive();\r
}\r
\r
++current_frame_;\r
tbb::parallel_invoke(\r
[&]\r
{\r
- dest = dest_producer_->receive(flags);\r
+ dest = dest_producer_->receive();\r
if(dest == core::draw_frame::late())\r
dest = dest_producer_->last_frame();\r
},\r
[&]\r
{\r
- source = source_producer_->receive(flags);\r
+ source = source_producer_->receive();\r
if(source == core::draw_frame::late())\r
source = source_producer_->last_frame();\r
}); \r
#include <core/frame/frame_transform.h>\r
#include <core/frame/frame_factory.h>\r
#include <core/monitor/monitor.h>\r
+#include <core/mixer/audio/audio_mixer.h>\r
\r
#include <tbb/concurrent_queue.h>\r
\r
boost::circular_buffer<size_t> sync_buffer_;\r
ffmpeg::frame_muxer muxer_;\r
\r
- tbb::atomic<int> flags_;\r
spl::shared_ptr<core::frame_factory> frame_factory_;\r
core::video_format_desc in_format_desc_;\r
core::video_format_desc out_format_desc_;\r
, sync_buffer_(out_format_desc.audio_cadence.size())\r
, frame_factory_(frame_factory)\r
{ \r
- flags_ = 0;\r
frame_buffer_.set_capacity(2);\r
\r
graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f)); \r
CASPAR_LOG(trace) << print() << L" Syncing audio.";\r
return S_OK;\r
}\r
-\r
- muxer_.push(audio_buffer);\r
- muxer_.push(av_frame, flags_); \r
+ \r
+ muxer_.push_video(av_frame); \r
+ muxer_.push_audio(audio_buffer);\r
\r
boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1);\r
\r
return S_OK;\r
}\r
\r
- core::draw_frame get_frame(int flags)\r
+ core::draw_frame get_frame()\r
{\r
if(exception_ != nullptr)\r
std::rethrow_exception(exception_);\r
-\r
- flags_ = flags;\r
-\r
+ \r
core::draw_frame frame = core::draw_frame::late();\r
if(!frame_buffer_.try_pop(frame))\r
graph_->set_tag("late-frame");\r
\r
// frame_producer\r
\r
- core::draw_frame receive(int flags) override\r
+ core::draw_frame receive() override\r
{\r
- auto frame = producer_->get_frame(flags);\r
+ auto frame = producer_->get_frame();\r
\r
if(frame != core::draw_frame::late())\r
last_frame_ = frame;\r
\r
struct ffmpeg_producer : public core::frame_producer\r
{\r
- monitor::basic_subject event_subject_;\r
- const std::wstring filename_;\r
+ monitor::basic_subject event_subject_;\r
+ const std::wstring filename_;\r
\r
- const spl::shared_ptr<diagnostics::graph> graph_;\r
+ const spl::shared_ptr<diagnostics::graph> graph_;\r
\r
- const spl::shared_ptr<core::frame_factory> frame_factory_;\r
- const core::video_format_desc format_desc_;\r
+ const spl::shared_ptr<core::frame_factory> frame_factory_;\r
+ const core::video_format_desc format_desc_;\r
\r
- input input_; \r
- std::unique_ptr<video_decoder> video_decoder_;\r
- std::unique_ptr<audio_decoder> audio_decoder_; \r
- std::unique_ptr<frame_muxer> muxer_;\r
+ input input_; \r
+ std::unique_ptr<video_decoder> video_decoder_;\r
+ std::unique_ptr<audio_decoder> audio_decoder_; \r
+ std::unique_ptr<frame_muxer> muxer_;\r
\r
- const double fps_;\r
- const uint32_t start_;\r
- const uint32_t length_;\r
+ const double fps_;\r
+ const uint32_t start_;\r
\r
- int64_t frame_number_;\r
+ int64_t frame_number_;\r
\r
- core::draw_frame last_frame_;\r
+ core::draw_frame last_frame_;\r
\r
public:\r
explicit ffmpeg_producer(const spl::shared_ptr<core::frame_factory>& frame_factory, \r
, input_(graph_, filename_, loop, start, length)\r
, fps_(read_fps(*input_.context(), format_desc_.fps))\r
, start_(start)\r
- , length_(length)\r
, frame_number_(0)\r
, last_frame_(core::draw_frame::empty())\r
{\r
\r
// frame_producer\r
\r
- core::draw_frame receive(int flags) override\r
+ core::draw_frame receive() override\r
{ \r
boost::timer frame_timer;\r
\r
auto frame = core::draw_frame::late(); \r
- if(!try_decode_frame(frame, flags))\r
+ if(!try_decode_frame(frame))\r
{\r
if(!input_.eof()) \r
graph_->set_tag("underflow"); \r
\r
uint32_t nb_frames = file_nb_frames();\r
\r
- nb_frames = std::min(length_, nb_frames);\r
+ nb_frames = std::min(input_.length(), nb_frames);\r
nb_frames = muxer_->calc_nb_frames(nb_frames);\r
\r
return nb_frames > start_ ? nb_frames - start_ : 0;\r
{\r
static const boost::wregex loop_exp(L"LOOP\\s*(?<VALUE>\\d?)?", boost::regex::icase);\r
static const boost::wregex seek_exp(L"SEEK\\s+(?<VALUE>\\d+)", boost::regex::icase);\r
+ static const boost::wregex length_exp(L"LENGTH\\s+(?<VALUE>\\d+)", boost::regex::icase);\r
+ static const boost::wregex start_exp(L"START\\s+(?<VALUE>\\d+)", boost::regex::icase);\r
\r
boost::wsmatch what;\r
if(boost::regex_match(param, what, loop_exp))\r
input_.seek(boost::lexical_cast<uint32_t>(what["VALUE"].str()));\r
return L"";\r
}\r
+ if(boost::regex_match(param, what, length_exp))\r
+ {\r
+ if(!what["LENGTH"].str().empty())\r
+ input_.length(boost::lexical_cast<uint32_t>(what["LENGTH"].str()));\r
+ return boost::lexical_cast<std::wstring>(input_.length());\r
+ }\r
+ if(boost::regex_match(param, what, start_exp))\r
+ {\r
+ if(!what["START"].str().empty())\r
+ input_.start(boost::lexical_cast<uint32_t>(what["START"].str()));\r
+ return boost::lexical_cast<std::wstring>(input_.start());\r
+ }\r
\r
BOOST_THROW_EXCEPTION(invalid_argument());\r
}\r
\r
- bool try_decode_frame(core::draw_frame& result, int flags)\r
+ bool try_decode_frame(core::draw_frame& result)\r
{\r
for(int n = 0; n < 32; ++n)\r
{\r
audio = audio_decoder_->poll(); \r
});\r
\r
- muxer_->push(video, flags);\r
- muxer_->push(audio);\r
+ muxer_->push_video(video);\r
+ muxer_->push_audio(audio);\r
\r
if(!audio_decoder_)\r
{\r
if(video == flush_video())\r
- muxer_->push(flush_audio());\r
+ muxer_->push_audio(flush_audio());\r
else if(!muxer_->audio_ready())\r
- muxer_->push(empty_audio());\r
+ muxer_->push_audio(empty_audio());\r
}\r
\r
if(!video_decoder_)\r
{\r
if(audio == flush_audio())\r
- muxer_->push(flush_video(), 0);\r
+ muxer_->push_video(flush_video());\r
else if(!muxer_->video_ready())\r
- muxer_->push(empty_video(), 0);\r
+ muxer_->push_video(empty_video());\r
}\r
}\r
\r
return core::frame_producer::empty();\r
\r
auto loop = boost::range::find(params, L"LOOP") != params.end();\r
- auto start = get_param(L"SEEK", params, static_cast<uint32_t>(0));\r
+ auto start = get_param(L"START", params, get_param(L"SEEK", params, static_cast<uint32_t>(0)));\r
auto length = get_param(L"LENGTH", params, std::numeric_limits<uint32_t>::max());\r
auto filter_str = get_param(L"FILTER", params, L""); \r
\r
const int default_stream_index_;\r
\r
const std::wstring filename_;\r
- const uint32_t start_; \r
- const uint32_t length_;\r
+ tbb::atomic<uint32_t> start_; \r
+ tbb::atomic<uint32_t> length_;\r
tbb::atomic<bool> loop_;\r
uint32_t frame_number_;\r
\r
\r
executor executor_;\r
\r
- impl(const spl::shared_ptr<diagnostics::graph> graph, const std::wstring& filename, bool loop, uint32_t start, uint32_t length) \r
+ impl(const spl::shared_ptr<diagnostics::graph> graph, const std::wstring& filename, const bool loop, const uint32_t start, const uint32_t length) \r
: graph_(graph)\r
, format_context_(open_input(filename)) \r
, default_stream_index_(av_find_default_stream_index(format_context_.get()))\r
, filename_(filename)\r
- , start_(start)\r
- , length_(length)\r
, frame_number_(0)\r
, executor_(print())\r
{ \r
+ start_ = start;\r
+ length_ = length;\r
loop_ = loop;\r
buffer_size_ = 0;\r
\r
void input::loop(bool value){impl_->loop_ = value;}\r
bool input::loop() const{return impl_->loop_;}\r
void input::seek(uint32_t target){impl_->seek(target);}\r
+void input::start(uint32_t value){impl_->start_ = value;}\r
+uint32_t input::start() const{return impl_->start_;}\r
+void input::length(uint32_t value){impl_->length_ = value;}\r
+uint32_t input::length() const{return impl_->length_;}\r
}}\r
\r
void loop(bool value);\r
bool loop() const;\r
+ void start(uint32_t value);\r
+ uint32_t start() const;\r
+ void length(uint32_t value);\r
+ uint32_t length() const;\r
\r
void seek(uint32_t target);\r
\r
display_mode display_mode_;\r
const double in_fps_;\r
const video_format_desc format_desc_;\r
- bool auto_transcode_;\r
- bool auto_deinterlace_;\r
\r
std::vector<int> audio_cadence_;\r
\r
\r
filter filter_;\r
const std::wstring filter_str_;\r
- bool force_deinterlacing_;\r
\r
impl(double in_fps, const spl::shared_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, const std::wstring& filter_str)\r
: display_mode_(display_mode::invalid)\r
, in_fps_(in_fps)\r
, format_desc_(format_desc)\r
- , auto_transcode_(env::properties().get(L"configuration.auto-transcode", true))\r
- , auto_deinterlace_(env::properties().get(L"configuration.auto-deinterlace", true))\r
, audio_cadence_(format_desc_.audio_cadence)\r
, frame_factory_(frame_factory)\r
, filter_str_(filter_str)\r
- , force_deinterlacing_(false)\r
{\r
video_streams_.push(std::queue<core::mutable_frame>());\r
audio_streams_.push(core::audio_buffer());\r
boost::range::rotate(audio_cadence_, std::end(audio_cadence_)-1);\r
}\r
\r
- void push(const std::shared_ptr<AVFrame>& video_frame, int flags)\r
+ void push_video(const std::shared_ptr<AVFrame>& video_frame)\r
{ \r
if(!video_frame)\r
return;\r
}\r
else\r
{\r
- bool DEINTERLACE_FLAG = (flags & core::frame_producer::flags::deinterlace) != 0;\r
- \r
- if(auto_deinterlace_ && force_deinterlacing_ != DEINTERLACE_FLAG)\r
- {\r
- force_deinterlacing_ = DEINTERLACE_FLAG;\r
- display_mode_ = display_mode::invalid;\r
- }\r
-\r
if(display_mode_ == display_mode::invalid)\r
- update_display_mode(video_frame, force_deinterlacing_);\r
+ update_display_mode(video_frame);\r
\r
- if(flags & core::frame_producer::flags::alpha_only)\r
- video_frame->format = make_alpha_format(video_frame->format);\r
- \r
- auto format = video_frame->format;\r
- if(video_frame->format == CASPAR_PIX_FMT_LUMA) // CASPAR_PIX_FMT_LUMA is not valid for filter, change it to GRAY8\r
- video_frame->format = PIX_FMT_GRAY8;\r
-\r
filter_.push(video_frame);\r
- BOOST_FOREACH(auto& av_frame, filter_.poll_all())\r
- {\r
- if(video_frame->format == PIX_FMT_GRAY8 && format == CASPAR_PIX_FMT_LUMA)\r
- av_frame->format = format;\r
-\r
- video_streams_.back().push(make_frame(this, av_frame, format_desc_.fps, *frame_factory_, flags));\r
- }\r
+ BOOST_FOREACH(auto& av_frame, filter_.poll_all()) \r
+ video_streams_.back().push(make_frame(this, av_frame, format_desc_.fps, *frame_factory_)); \r
}\r
\r
if(video_streams_.back().size() > 32)\r
BOOST_THROW_EXCEPTION(invalid_operation() << source_info("frame_muxer") << msg_info("video-stream overflow. This can be caused by incorrect frame-rate. Check clip meta-data."));\r
}\r
\r
- void push(const std::shared_ptr<core::audio_buffer>& audio)\r
+ void push_audio(const std::shared_ptr<core::audio_buffer>& audio)\r
{\r
if(!audio) \r
return;\r
return samples;\r
}\r
\r
- void update_display_mode(const std::shared_ptr<AVFrame>& frame, bool force_deinterlace)\r
+ void update_display_mode(const std::shared_ptr<AVFrame>& frame)\r
{\r
std::wstring filter_str = filter_str_;\r
\r
display_mode_ = display_mode::simple;\r
- if(auto_transcode_)\r
- {\r
- auto mode = get_mode(*frame);\r
- auto fps = in_fps_;\r
\r
- if(filter::is_deinterlacing(filter_str_))\r
- mode = core::field_mode::progressive;\r
+ auto mode = get_mode(*frame);\r
+ auto fps = in_fps_;\r
+\r
+ if(filter::is_deinterlacing(filter_str_))\r
+ mode = core::field_mode::progressive;\r
\r
- if(filter::is_double_rate(filter_str_))\r
- fps *= 2;\r
+ if(filter::is_double_rate(filter_str_))\r
+ fps *= 2;\r
\r
- display_mode_ = get_display_mode(mode, fps, format_desc_.field_mode, format_desc_.fps);\r
+ display_mode_ = get_display_mode(mode, fps, format_desc_.field_mode, format_desc_.fps);\r
\r
- if((frame->height != 480 || format_desc_.height != 486) && // don't deinterlace for NTSC DV\r
- display_mode_ == display_mode::simple && mode != core::field_mode::progressive && format_desc_.field_mode != core::field_mode::progressive && \r
- frame->height != format_desc_.height)\r
- {\r
- display_mode_ = display_mode::deinterlace_bob_reinterlace; // The frame will most likely be scaled, we need to deinterlace->reinterlace \r
- }\r
-\r
- if(force_deinterlace && mode != core::field_mode::progressive && \r
- display_mode_ != display_mode::deinterlace && \r
- display_mode_ != display_mode::deinterlace_bob && \r
- display_mode_ != display_mode::deinterlace_bob_reinterlace) \r
- { \r
- CASPAR_LOG(info) << L"[frame_muxer] Automatically started non bob-deinterlacing. Consider starting producer with bob-deinterlacing (FILTER DEINTERLACE_BOB) for smoother playback.";\r
- display_mode_ = display_mode::deinterlace;\r
- }\r
-\r
- if(display_mode_ == display_mode::deinterlace)\r
- filter_str = append_filter(filter_str, L"YADIF=0:-1");\r
- else if(display_mode_ == display_mode::deinterlace_bob || display_mode_ == display_mode::deinterlace_bob_reinterlace)\r
- filter_str = append_filter(filter_str, L"YADIF=1:-1");\r
+ if((frame->height != 480 || format_desc_.height != 486) && // don't deinterlace for NTSC DV\r
+ display_mode_ == display_mode::simple && mode != core::field_mode::progressive && format_desc_.field_mode != core::field_mode::progressive && \r
+ frame->height != format_desc_.height)\r
+ {\r
+ display_mode_ = display_mode::deinterlace_bob_reinterlace; // The frame will most likely be scaled, we need to deinterlace->reinterlace \r
}\r
+ \r
+ if(display_mode_ == display_mode::deinterlace)\r
+ filter_str = append_filter(filter_str, L"YADIF=0:-1");\r
+ else if(display_mode_ == display_mode::deinterlace_bob || display_mode_ == display_mode::deinterlace_bob_reinterlace)\r
+ filter_str = append_filter(filter_str, L"YADIF=1:-1");\r
\r
if(display_mode_ == display_mode::invalid)\r
{\r
filter_.push(frame);\r
auto av_frame = filter_.poll();\r
if(av_frame) \r
- video_streams_.back().push(make_frame(this, spl::make_shared_ptr(av_frame), format_desc_.fps, *frame_factory_, 0));\r
+ video_streams_.back().push(make_frame(this, spl::make_shared_ptr(av_frame), format_desc_.fps, *frame_factory_));\r
}\r
filter_ = filter(filter_str);\r
CASPAR_LOG(info) << L"[frame_muxer] " << display_mode_ << L" " << print_mode(frame->width, frame->height, in_fps_, frame->interlaced_frame > 0);\r
\r
frame_muxer::frame_muxer(double in_fps, const spl::shared_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, const std::wstring& filter)\r
: impl_(new impl(in_fps, frame_factory, format_desc, filter)){}\r
-void frame_muxer::push(const std::shared_ptr<AVFrame>& video_frame, int flags){impl_->push(video_frame, flags);}\r
-void frame_muxer::push(const std::shared_ptr<core::audio_buffer>& audio_samples){return impl_->push(audio_samples);}\r
+void frame_muxer::push_video(const std::shared_ptr<AVFrame>& video_frame){impl_->push_video(video_frame);}\r
+void frame_muxer::push_audio(const std::shared_ptr<core::audio_buffer>& audio_samples){return impl_->push_audio(audio_samples);}\r
bool frame_muxer::try_pop(core::draw_frame& result){return impl_->try_pop(result);}\r
uint32_t frame_muxer::calc_nb_frames(uint32_t nb_frames) const {return impl_->calc_nb_frames(nb_frames);}\r
bool frame_muxer::video_ready() const{return impl_->video_ready();}\r
public:\r
frame_muxer(double in_fps, const spl::shared_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, const std::wstring& filter = L"");\r
\r
- void push(const std::shared_ptr<AVFrame>& video_frame, int flags = 0);\r
- void push(const std::shared_ptr<core::audio_buffer>& audio_samples);\r
+ void push_video(const std::shared_ptr<AVFrame>& video_frame);\r
+ void push_audio(const std::shared_ptr<core::audio_buffer>& audio_samples);\r
\r
bool video_ready() const;\r
bool audio_ready() const;\r
{\r
switch(pix_fmt)\r
{\r
- case CASPAR_PIX_FMT_LUMA: return core::pixel_format::luma;\r
case PIX_FMT_GRAY8: return core::pixel_format::gray;\r
case PIX_FMT_BGRA: return core::pixel_format::bgra;\r
case PIX_FMT_ARGB: return core::pixel_format::argb;\r
{\r
// Get linesizes\r
AVPicture dummy_pict; \r
- avpicture_fill(&dummy_pict, nullptr, pix_fmt == CASPAR_PIX_FMT_LUMA ? PIX_FMT_GRAY8 : pix_fmt, width, height);\r
+ avpicture_fill(&dummy_pict, nullptr, pix_fmt, width, height);\r
\r
core::pixel_format_desc desc = get_pixel_format(pix_fmt);\r
\r
}\r
}\r
\r
-int make_alpha_format(int format)\r
-{\r
- switch(get_pixel_format(static_cast<PixelFormat>(format)).value())\r
- {\r
- case core::pixel_format::ycbcr:\r
- case core::pixel_format::ycbcra:\r
- return CASPAR_PIX_FMT_LUMA;\r
- default:\r
- return format;\r
- }\r
-}\r
-\r
-core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>& decoded_frame, double fps, core::frame_factory& frame_factory, int flags)\r
+core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>& decoded_frame, double fps, core::frame_factory& frame_factory)\r
{ \r
static tbb::concurrent_unordered_map<int, tbb::concurrent_queue<std::shared_ptr<SwsContext>>> sws_contexts_;\r
\r
const auto width = decoded_frame->width;\r
const auto height = decoded_frame->height;\r
auto desc = pixel_format_desc(static_cast<PixelFormat>(decoded_frame->format), width, height);\r
- \r
- if(flags & core::frame_producer::flags::alpha_only)\r
- desc = pixel_format_desc(static_cast<PixelFormat>(make_alpha_format(decoded_frame->format)), width, height);\r
- \r
+ \r
if(desc.format == core::pixel_format::invalid)\r
{\r
auto pix_fmt = static_cast<PixelFormat>(decoded_frame->format);\r
\r
// Utils\r
\r
-static const int CASPAR_PIX_FMT_LUMA = 10; // Just hijack some unual pixel format.\r
-\r
core::field_mode get_mode(const AVFrame& frame);\r
-int make_alpha_format(int format); // NOTE: Be careful about CASPAR_PIX_FMT_LUMA, change it to PIX_FMT_GRAY8 if you want to use the frame inside some ffmpeg function.\r
-core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>& decoded_frame, double fps, core::frame_factory& frame_factory, int flags);\r
+core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>& decoded_frame, double fps, core::frame_factory& frame_factory);\r
spl::shared_ptr<AVFrame> make_av_frame(core::mutable_frame& frame);\r
spl::shared_ptr<AVFrame> make_av_frame(core::const_frame& frame);\r
spl::shared_ptr<AVFrame> make_av_frame(std::array<uint8_t*, 4> data, const core::pixel_format_desc& pix_desc);\r
\r
// frame_producer\r
\r
- core::draw_frame receive(int) override\r
+ core::draw_frame receive() override\r
{ \r
auto frame = core::draw_frame::late();\r
\r
\r
// frame_producer\r
\r
- core::draw_frame receive(int) override\r
+ core::draw_frame receive() override\r
{\r
event_subject_ << monitor::event("file/path") % filename_;\r
\r
\r
// frame_producer\r
\r
- core::draw_frame receive(int) override\r
+ core::draw_frame receive() override\r
{ \r
delta_ += speed_;\r
\r