}\r
}\r
\r
- // TODO: Optimize\r
- bool has_overlapping_items(const layer& layer, image_transform::blend_mode::type blend_mode)\r
- {\r
- if(layer.size() < 2)\r
- return false; \r
- \r
- implementation::layer fill;\r
-\r
- std::copy_if(layer.begin(), layer.end(), std::back_inserter(fill), [&](const render_item& item)\r
- {\r
- return !item.transform.get_is_key();\r
- });\r
- \r
- if(blend_mode == image_transform::blend_mode::normal) // Only overlap if opacity\r
- {\r
- return std::any_of(fill.begin(), fill.end(), [&](const render_item& item)\r
- {\r
- return item.transform.get_opacity() < 1.0 - 0.001;\r
- });\r
- }\r
-\r
- // Simple solution, just check if we have differnt video streams / tags.\r
- return std::any_of(fill.begin(), fill.end(), [&](const render_item& item)\r
- {\r
- return item.tag != fill.front().tag;\r
- });\r
- } \r
- \r
- render_item create_render_item(const safe_ptr<device_buffer>& buffer, image_transform::blend_mode::type blend_mode)\r
- {\r
- CASPAR_ASSERT(buffer->stride() == 4 && "Only used for bgra textures");\r
-\r
- pixel_format_desc desc;\r
- desc.pix_fmt = pixel_format::bgra;\r
- desc.planes.push_back(pixel_format_desc::plane(channel_.get_format_desc().width, channel_.get_format_desc().height, 4));\r
-\r
- std::vector<safe_ptr<device_buffer>> textures;\r
- textures.push_back(buffer);\r
- \r
- image_transform transform;\r
- transform.set_blend_mode(blend_mode);\r
-\r
- return render_item(desc, std::move(textures), transform, video_mode::progressive, nullptr); \r
- }\r
+ //// TODO: Optimize\r
+ //bool has_overlapping_items(const layer& layer, image_transform::blend_mode::type blend_mode)\r
+ //{\r
+ // if(layer.size() < 2)\r
+ // return false; \r
+ // \r
+ // implementation::layer fill;\r
+\r
+ // std::copy_if(layer.begin(), layer.end(), std::back_inserter(fill), [&](const render_item& item)\r
+ // {\r
+ // return !item.transform.get_is_key();\r
+ // });\r
+ // \r
+ // if(blend_mode == image_transform::blend_mode::normal) // Only overlap if opacity\r
+ // {\r
+ // return std::any_of(fill.begin(), fill.end(), [&](const render_item& item)\r
+ // {\r
+ // return item.transform.get_opacity() < 1.0 - 0.001;\r
+ // });\r
+ // }\r
+\r
+ // // Simple solution, just check if we have differnt video streams / tags.\r
+ // return std::any_of(fill.begin(), fill.end(), [&](const render_item& item)\r
+ // {\r
+ // return item.tag != fill.front().tag;\r
+ // });\r
+ //} \r
+ // \r
+ //render_item create_render_item(const safe_ptr<device_buffer>& buffer, image_transform::blend_mode::type blend_mode)\r
+ //{\r
+ // CASPAR_ASSERT(buffer->stride() == 4 && "Only used for bgra textures");\r
+\r
+ // pixel_format_desc desc;\r
+ // desc.pix_fmt = pixel_format::bgra;\r
+ // desc.planes.push_back(pixel_format_desc::plane(channel_.get_format_desc().width, channel_.get_format_desc().height, 4));\r
+\r
+ // std::vector<safe_ptr<device_buffer>> textures;\r
+ // textures.push_back(buffer);\r
+ // \r
+ // image_transform transform;\r
+ // transform.set_blend_mode(blend_mode);\r
+\r
+ // return render_item(desc, std::move(textures), transform, video_mode::progressive, nullptr); \r
+ //}\r
\r
safe_ptr<device_buffer> create_device_buffer(size_t stride)\r
{\r
\r
image_transform::image_transform() \r
: opacity_(1.0)\r
- , gain_(1.0)\r
, brightness_(1.0)\r
, contrast_(1.0)\r
, saturation_(1.0)\r
return opacity_;\r
}\r
\r
-void image_transform::set_gain(double value)\r
-{\r
- gain_ = std::max(0.0, value);\r
-}\r
-\r
-double image_transform::get_gain() const\r
-{\r
- return gain_;\r
-}\r
-\r
void image_transform::set_brightness(double value)\r
{\r
brightness_ = std::max(0.0, value);\r
{\r
opacity_ *= other.opacity_; \r
blend_mode_ = std::max(blend_mode_, other.blend_mode_);\r
- gain_ *= other.gain_;\r
brightness_ *= other.brightness_;\r
contrast_ *= other.contrast_;\r
saturation_ *= other.saturation_;\r
image_transform result; \r
result.set_blend_mode (std::max(source.get_blend_mode(), dest.get_blend_mode()));\r
result.set_is_key (source.get_is_key() | dest.get_is_key());\r
- result.set_gain (do_tween(time, source.get_gain(), dest.get_gain(), duration, tweener));\r
result.set_brightness (do_tween(time, source.get_brightness(), dest.get_brightness(), duration, tweener));\r
result.set_contrast (do_tween(time, source.get_contrast(), dest.get_contrast(), duration, tweener));\r
result.set_saturation (do_tween(time, source.get_saturation(), dest.get_saturation(), duration, tweener));\r
int layer = GetLayerIndex();\r
GetChannel()->mixer()->apply_image_transform(GetLayerIndex(), transform, duration, tween);\r
}\r
- else if(_parameters[1] == L"GAIN")\r
- {\r
- int duration = _parameters.size() > 3 ? lexical_cast_or_default(_parameters[3], 0) : 0;\r
- std::wstring tween = _parameters.size() > 4 ? _parameters[4] : L"linear";\r
- double value = boost::lexical_cast<double>(_parameters.at(2));\r
- \r
- auto transform = [=](image_transform transform) -> image_transform\r
- {\r
- transform.set_gain(value);\r
- return transform; \r
- };\r
-\r
- int layer = GetLayerIndex();\r
- GetChannel()->mixer()->apply_image_transform(GetLayerIndex(), transform, duration, tween);\r
- }\r
else if(_parameters[1] == L"FILL_RECT")\r
{\r
int duration = _parameters.size() > 6 ? lexical_cast_or_default(_parameters[6], 0) : 0;\r