void draw(size_t width, \r
size_t height, \r
const core::pixel_format_desc& pix_desc, \r
- const core::image_transform& transform, \r
+ const core::image_transform& transform,\r
+ core::video_mode::type mode, \r
const std::vector<safe_ptr<device_buffer>>& planes, \r
const safe_ptr<device_buffer>& background,\r
const std::shared_ptr<device_buffer>& local_key, \r
const std::shared_ptr<device_buffer>& layer_key)\r
{\r
+ if(planes.empty())\r
+ return;\r
+\r
GL(glEnable(GL_TEXTURE_2D));\r
GL(glEnable(GL_POLYGON_STIPPLE));\r
\r
- if(transform.get_mode() == core::video_mode::upper)\r
+ if(mode == core::video_mode::upper)\r
glPolygonStipple(upper_pattern);\r
- else if(transform.get_mode() == core::video_mode::lower)\r
+ else if(mode == core::video_mode::lower)\r
glPolygonStipple(lower_pattern);\r
else\r
GL(glDisable(GL_POLYGON_STIPPLE));\r
shader_->set("has_layer_key", layer_key ? 1 : 0);\r
shader_->set("blend_mode", transform.get_is_key() ? core::image_transform::blend_mode::normal : transform.get_blend_mode());\r
shader_->set("alpha_mode", transform.get_alpha_mode());\r
- shader_->set("interlace_mode", transform.get_mode());\r
shader_->set("pixel_format", pix_desc.pix_fmt); \r
\r
auto levels = transform.get_levels();\r
\r
image_kernel::image_kernel() : impl_(new implementation()){}\r
\r
-void image_kernel::draw(size_t width, size_t height, const core::pixel_format_desc& pix_desc, const core::image_transform& transform, const std::vector<safe_ptr<device_buffer>>& planes, \r
+void image_kernel::draw(size_t width, size_t height, const core::pixel_format_desc& pix_desc, const core::image_transform& transform, core::video_mode::type mode, const std::vector<safe_ptr<device_buffer>>& planes, \r
const safe_ptr<device_buffer>& background, const std::shared_ptr<device_buffer>& local_key, const std::shared_ptr<device_buffer>& layer_key)\r
{\r
- impl_->draw(width, height, pix_desc, transform, planes, background, local_key, layer_key);\r
+ impl_->draw(width, height, pix_desc, transform, mode, planes, background, local_key, layer_key);\r
}\r
\r
}}
\ No newline at end of file
{\r
public:\r
image_kernel();\r
- void draw(size_t width, size_t height, const core::pixel_format_desc& pix_desc, const core::image_transform& transform, const std::vector<safe_ptr<device_buffer>>& planes, \r
+ void draw(size_t width, size_t height, const core::pixel_format_desc& pix_desc, const core::image_transform& transform, core::video_mode::type mode, const std::vector<safe_ptr<device_buffer>>& planes, \r
const safe_ptr<device_buffer>& background, const std::shared_ptr<device_buffer>& local_key = nullptr, const std::shared_ptr<device_buffer>& layer_key = nullptr);\r
private:\r
struct implementation;\r
#include <core/video_format.h>\r
\r
#include <boost/foreach.hpp>\r
+#include <boost/range.hpp>\r
\r
#include <algorithm>\r
#include <array>\r
\r
struct render_item\r
{\r
- pixel_format_desc desc;\r
- std::vector<safe_ptr<device_buffer>> textures;\r
- core::image_transform transform;\r
- const void* tag;\r
+ pixel_format_desc desc;\r
+ std::vector<safe_ptr<device_buffer>> textures;\r
+ image_transform transform;\r
+ video_mode::type mode;\r
+ const void* tag;\r
};\r
\r
bool operator==(const render_item& lhs, const render_item& rhs)\r
{\r
- return lhs.textures == rhs.textures && lhs.transform == rhs.transform && lhs.tag == rhs.tag;\r
+ return lhs.textures == rhs.textures && lhs.transform == rhs.transform && lhs.tag == rhs.tag && lhs.mode == rhs.mode;\r
}\r
\r
struct image_mixer::implementation : boost::noncopyable\r
{ \r
- typedef std::deque<render_item> stream;\r
- typedef std::deque<stream> layer;\r
+ typedef std::deque<render_item> layer;\r
\r
video_channel_context& channel_;\r
- \r
- std::stack<core::image_transform> transform_stack_;\r
+\r
+ std::stack<image_transform> transform_stack_;\r
+ std::deque<video_mode::type> mode_stack_;\r
\r
std::queue<layer> layers_; // layer/stream/items\r
\r
std::unique_ptr<image_kernel> kernel_;\r
\r
- std::shared_ptr<device_buffer> draw_buffer_[2];\r
+ std::array<std::shared_ptr<device_buffer>, 2> draw_buffer_;\r
std::shared_ptr<device_buffer> write_buffer_;\r
\r
- std::shared_ptr<device_buffer> stream_key_buffer_[2];\r
+ std::array<std::shared_ptr<device_buffer>, 2> stream_key_buffer_;\r
std::shared_ptr<device_buffer> layer_key_buffer_;\r
\r
public:\r
: channel_(video_channel)\r
{\r
initialize_buffers();\r
- transform_stack_.push(core::image_transform());\r
+ transform_stack_.push(image_transform());\r
+ mode_stack_.push_back(video_mode::progressive);\r
\r
channel_.ogl().invoke([=]\r
{\r
void begin(core::basic_frame& frame)\r
{\r
transform_stack_.push(transform_stack_.top()*frame.get_image_transform());\r
+ mode_stack_.push_back(frame.get_mode() == video_mode::progressive ? mode_stack_.back() : frame.get_mode());\r
}\r
\r
void visit(core::write_frame& frame)\r
- { \r
- if(frame.get_textures().empty())\r
+ { \r
+ // Check if frame has been discarded by interlacing\r
+ if(boost::range::find(mode_stack_, video_mode::upper) != mode_stack_.end() && boost::range::find(mode_stack_, video_mode::lower) != mode_stack_.end())\r
return;\r
-\r
- render_item item = {frame.get_pixel_format_desc(), frame.get_textures(), transform_stack_.top()*frame.get_image_transform(), frame.tag()}; \r
+ \r
+ core::render_item item = {frame.get_pixel_format_desc(), frame.get_textures(), transform_stack_.top(), mode_stack_.back(), frame.tag()}; \r
\r
auto& layer = layers_.back();\r
\r
- auto stream_it = std::find_if(layer.begin(), layer.end(), [&](stream& stream)\r
- {\r
- return stream.front().tag == item.tag;\r
- });\r
-\r
- if(stream_it == layer.end())\r
- layer.push_back(stream(1, item));\r
- else \r
- {\r
- if(std::find(stream_it->begin(), stream_it->end(), item) == stream_it->end())\r
- stream_it->push_back(item); \r
- }\r
+ auto it = boost::range::find(layer, item);\r
+ if(it == layer.end())\r
+ layer.push_back(item);\r
}\r
\r
void end()\r
{\r
transform_stack_.pop();\r
+ mode_stack_.pop_back();\r
}\r
\r
void begin_layer()\r
\r
auto layer = std::move(layers.front());\r
layers.pop();\r
-\r
+ \r
while(!layer.empty())\r
{\r
- auto stream = std::move(layer.front());\r
+ auto item = std::move(layer.front());\r
layer.pop_front();\r
- \r
- render(stream, local_key, layer_key);\r
- \r
- local_key = stream.front().transform.get_is_key();\r
- if(!local_key)\r
+ \r
+ if(item.transform.get_is_key())\r
+ {\r
+ render_item(stream_key_buffer_, item, nullptr, nullptr);\r
+ local_key = true;\r
+ }\r
+ else\r
+ {\r
+ render_item(draw_buffer_, item, local_key ? stream_key_buffer_[0] : nullptr, layer_key ? layer_key_buffer_ : nullptr); \r
stream_key_buffer_[0]->clear();\r
+ local_key = false;\r
+ }\r
\r
channel_.ogl().yield();\r
}\r
\r
return read_buffer;\r
}\r
-\r
- void render(stream& stream, bool local_key, bool layer_key)\r
+ \r
+ void render_item(std::array<std::shared_ptr<device_buffer>,2>& targets, render_item& item, const std::shared_ptr<device_buffer>& local_key, const std::shared_ptr<device_buffer>& layer_key)\r
{\r
- CASPAR_ASSERT(!stream.empty());\r
- \r
- if(stream.front().transform.get_is_key())\r
- {\r
- stream_key_buffer_[1]->attach();\r
- \r
- BOOST_FOREACH(auto item2, stream)\r
- { \r
- kernel_->draw(channel_.get_format_desc().width, channel_.get_format_desc().height, item2.desc, item2.transform, item2.textures, \r
- make_safe(stream_key_buffer_[0]), nullptr, nullptr);\r
- }\r
-\r
- std::swap(stream_key_buffer_[0], stream_key_buffer_[1]);\r
-\r
- stream_key_buffer_[1]->bind();\r
- glCopyTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 0, 0, channel_.get_format_desc().width, channel_.get_format_desc().height); \r
- }\r
- else\r
- {\r
- draw_buffer_[1]->attach(); \r
+ targets[1]->attach();\r
\r
- BOOST_FOREACH(auto item2, stream)\r
- { \r
- kernel_->draw(channel_.get_format_desc().width, channel_.get_format_desc().height, item2.desc, item2.transform, item2.textures, \r
- make_safe(draw_buffer_[0]), local_key ? stream_key_buffer_[0] : nullptr, layer_key ? layer_key_buffer_ : nullptr); \r
- }\r
+ kernel_->draw(channel_.get_format_desc().width, channel_.get_format_desc().height, item.desc, item.transform, item.mode, item.textures, make_safe(targets[0]), local_key, layer_key);\r
+ \r
+ targets[0]->bind();\r
\r
- std::swap(draw_buffer_[0], draw_buffer_[1]);\r
- \r
- draw_buffer_[1]->bind();\r
- glCopyTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 0, 0, channel_.get_format_desc().width, channel_.get_format_desc().height);\r
- }\r
+ glCopyTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 0, 0, channel_.get_format_desc().width, channel_.get_format_desc().height);\r
+ \r
+ std::swap(targets[0], targets[1]);\r
}\r
\r
safe_ptr<write_frame> create_frame(const void* tag, const core::pixel_format_desc& desc)\r
\r
image_transform image_transform_; \r
audio_transform audio_transform_;\r
+\r
+ core::video_mode::type mode_;\r
\r
public:\r
implementation(const std::vector<safe_ptr<basic_frame>>& frames) \r
- : frames_(frames) {}\r
+ : mode_(core::video_mode::progressive), frames_(frames) {}\r
implementation(std::vector<safe_ptr<basic_frame>>&& frames) \r
- : frames_(std::move(frames)){}\r
+ : mode_(core::video_mode::progressive), frames_(std::move(frames)){}\r
implementation(safe_ptr<basic_frame>&& frame) \r
+ : mode_(core::video_mode::progressive)\r
{ frames_.push_back(std::move(frame));}\r
implementation(const safe_ptr<basic_frame>& frame) \r
+ : mode_(core::video_mode::progressive)\r
{ frames_.push_back(frame);}\r
\r
void accept(basic_frame& self, frame_visitor& visitor)\r
basic_frame::basic_frame(std::vector<safe_ptr<basic_frame>>&& frames) : impl_(new implementation(frames)){}\r
basic_frame::basic_frame(const safe_ptr<basic_frame>& frame) : impl_(new implementation(frame)){}\r
basic_frame::basic_frame(safe_ptr<basic_frame>&& frame) : impl_(new implementation(std::move(frame))){}\r
+core::video_mode::type basic_frame::get_mode() const{return impl_->mode_;}\r
void basic_frame::swap(basic_frame& other){impl_.swap(other.impl_);}\r
basic_frame& basic_frame::operator=(const basic_frame& other)\r
{\r
auto my_frame2 = make_safe<basic_frame>(frame2);\r
if(mode == video_mode::upper)\r
{\r
- my_frame1->get_image_transform().set_mode(video_mode::upper); \r
- my_frame2->get_image_transform().set_mode(video_mode::lower); \r
+ my_frame1->impl_->mode_ = video_mode::upper; \r
+ my_frame2->impl_->mode_ = video_mode::lower; \r
} \r
else \r
{ \r
- my_frame1->get_image_transform().set_mode(video_mode::lower); \r
- my_frame2->get_image_transform().set_mode(video_mode::upper); \r
+ my_frame1->impl_->mode_ = video_mode::lower; \r
+ my_frame2->impl_->mode_ = video_mode::upper; \r
}\r
\r
std::vector<safe_ptr<basic_frame>> frames;\r
\r
const audio_transform& get_audio_transform() const;\r
audio_transform& get_audio_transform();\r
+\r
+ core::video_mode::type get_mode() const;\r
\r
static safe_ptr<basic_frame> interlace(const safe_ptr<basic_frame>& frame1, const safe_ptr<basic_frame>& frame2, video_mode::type mode);\r
static safe_ptr<basic_frame> combine(const safe_ptr<basic_frame>& frame1, const safe_ptr<basic_frame>& frame2);\r
, brightness_(1.0)\r
, contrast_(1.0)\r
, saturation_(1.0)\r
- , mode_(video_mode::invalid)\r
, is_key_(false)\r
, deinterlace_(false)\r
, blend_mode_(image_transform::blend_mode::normal)\r
return clip_scale_;\r
}\r
\r
-void image_transform::set_mode(video_mode::type mode)\r
-{\r
- mode_ = mode;\r
-}\r
-\r
-video_mode::type image_transform::get_mode() const\r
-{\r
- return mode_;\r
-}\r
-\r
void image_transform::set_deinterlace(bool value)\r
{\r
deinterlace_ = value;\r
\r
image_transform& image_transform::operator*=(const image_transform &other)\r
{\r
- opacity_ *= other.opacity_;\r
- \r
- if(other.mode_ != video_mode::invalid)\r
- mode_ = other.mode_;\r
-\r
+ opacity_ *= other.opacity_; \r
blend_mode_ = std::max(blend_mode_, other.blend_mode_);\r
alpha_mode_ = std::max(alpha_mode_, other.alpha_mode_);\r
gain_ *= other.gain_;\r
{\r
return tweener(time, source, dest-source, duration);\r
};\r
-\r
- CASPAR_ASSERT(source.get_mode() == dest.get_mode() || source.get_mode() == video_mode::invalid || dest.get_mode() == video_mode::invalid);\r
-\r
+ \r
image_transform result; \r
- result.set_mode (dest.get_mode() != video_mode::invalid ? dest.get_mode() : source.get_mode());\r
result.set_blend_mode (std::max(source.get_blend_mode(), dest.get_blend_mode()));\r
result.set_alpha_mode (std::max(source.get_alpha_mode(), dest.get_alpha_mode()));\r
result.set_is_key (source.get_is_key() | dest.get_is_key());\r
return image_transform::alpha_mode::normal;\r
}\r
\r
+bool operator<(const image_transform& lhs, const image_transform& rhs)\r
+{\r
+ return memcmp(&lhs, &rhs, sizeof(image_transform)) < 0;\r
+}\r
+\r
bool operator==(const image_transform& lhs, const image_transform& rhs)\r
{\r
return memcmp(&lhs, &rhs, sizeof(image_transform)) == 0;\r
\r
void set_clip_scale(double x, double y);\r
std::array<double, 2> get_clip_scale() const;\r
-\r
- void set_mode(video_mode::type mode);\r
- video_mode::type get_mode() const;\r
-\r
+ \r
image_transform& operator*=(const image_transform &other);\r
const image_transform operator*(const image_transform &other) const;\r
\r
\r
image_transform tween(double time, const image_transform& source, const image_transform& dest, double duration, const tweener_t& tweener);\r
\r
+bool operator<(const image_transform& lhs, const image_transform& rhs);\r
bool operator==(const image_transform& lhs, const image_transform& rhs);\r
bool operator!=(const image_transform& lhs, const image_transform& rhs);\r
\r
DEFINE_VIDEOFORMATDESC(video_format::x1080p2997 ,1920, 1080, video_mode::progressive, 30000, 1001, TEXT("1080p2997")),\r
DEFINE_VIDEOFORMATDESC(video_format::x1080p3000 ,1920, 1080, video_mode::progressive, 30, 1, TEXT("1080p3000")),\r
DEFINE_VIDEOFORMATDESC(video_format::x1080p5000 ,1920, 1080, video_mode::progressive, 50, 1, TEXT("1080p5000")),\r
- DEFINE_VIDEOFORMATDESC(video_format::invalid ,0, 0, video_mode::invalid, 1, 1, TEXT("invalid"))\r
+ DEFINE_VIDEOFORMATDESC(video_format::invalid ,0, 0, video_mode::count, 1, 1, TEXT("invalid"))\r
};\r
\r
const video_format_desc& video_format_desc::get(video_format::type format) \r
progressive = 0,\r
lower,\r
upper,\r
- count,\r
- invalid\r
+ count\r
};\r
\r
static std::wstring print(video_mode::type value)\r
\r
display_mode::type get_display_mode(const core::video_mode::type in_mode, double in_fps, const core::video_mode::type out_mode, double out_fps)\r
{ \r
- if(in_mode == core::video_mode::invalid || out_mode == core::video_mode::invalid)\r
- return display_mode::invalid;\r
-\r
static const auto epsilon = 2.0;\r
\r
if(std::abs(in_fps - out_fps) < epsilon)\r
const safe_ptr<core::frame_factory> frame_factory_;\r
std::shared_ptr<AVCodecContext> codec_context_;\r
int index_;\r
- core::video_mode::type mode_;\r
\r
std::queue<std::shared_ptr<AVPacket>> packet_buffer_;\r
\r
public:\r
explicit implementation(const std::shared_ptr<AVFormatContext>& context, const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filter) \r
: frame_factory_(frame_factory)\r
- , mode_(core::video_mode::invalid)\r
, filter_(filter)\r
, fps_(frame_factory_->get_video_format_desc().fps)\r
, nb_frames_(0)\r
return !codec_context_ || !packet_buffer_.empty();\r
}\r
\r
- core::video_mode::type mode()\r
- {\r
- if(!codec_context_)\r
- return frame_factory_->get_video_format_desc().mode;\r
-\r
- return mode_;\r
- }\r
-\r
double fps() const\r
{\r
return fps_;\r
void video_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}\r
std::vector<std::shared_ptr<AVFrame>> video_decoder::poll(){return impl_->poll();}\r
bool video_decoder::ready() const{return impl_->ready();}\r
-core::video_mode::type video_decoder::mode(){return impl_->mode();}\r
double video_decoder::fps() const{return impl_->fps();}\r
int64_t video_decoder::nb_frames() const{return impl_->nb_frames_;}\r
}
\ No newline at end of file
void push(const std::shared_ptr<AVPacket>& packet);\r
bool ready() const;\r
std::vector<std::shared_ptr<AVFrame>> poll();\r
-\r
- core::video_mode::type mode();\r
-\r
+ \r
int64_t nb_frames() const;\r
\r
double fps() const;\r