}\r
}\r
\r
- // TODO: Optimize\r
- bool has_overlapping_items(const layer& layer, image_transform::blend_mode::type blend_mode)\r
- {\r
- if(layer.size() < 2)\r
- return false; \r
- \r
- implementation::layer fill;\r
-\r
- std::copy_if(layer.begin(), layer.end(), std::back_inserter(fill), [&](const render_item& item)\r
- {\r
- return !item.transform.get_is_key();\r
- });\r
- \r
- if(blend_mode == image_transform::blend_mode::normal) // Only overlap if opacity\r
- {\r
- return std::any_of(fill.begin(), fill.end(), [&](const render_item& item)\r
- {\r
- return item.transform.get_opacity() < 1.0 - 0.001;\r
- });\r
- }\r
-\r
- // Simple solution, just check if we have differnt video streams / tags.\r
- return std::any_of(fill.begin(), fill.end(), [&](const render_item& item)\r
- {\r
- return item.tag != fill.front().tag;\r
- });\r
- } \r
- \r
- render_item create_render_item(const safe_ptr<device_buffer>& buffer, image_transform::blend_mode::type blend_mode)\r
- {\r
- CASPAR_ASSERT(buffer->stride() == 4 && "Only used for bgra textures");\r
-\r
- pixel_format_desc desc;\r
- desc.pix_fmt = pixel_format::bgra;\r
- desc.planes.push_back(pixel_format_desc::plane(channel_.get_format_desc().width, channel_.get_format_desc().height, 4));\r
-\r
- std::vector<safe_ptr<device_buffer>> textures;\r
- textures.push_back(buffer);\r
- \r
- image_transform transform;\r
- transform.set_blend_mode(blend_mode);\r
-\r
- return render_item(desc, std::move(textures), transform, video_mode::progressive, nullptr); \r
- }\r
+ //// TODO: Optimize\r
+ //bool has_overlapping_items(const layer& layer, image_transform::blend_mode::type blend_mode)\r
+ //{\r
+ // if(layer.size() < 2)\r
+ // return false; \r
+ // \r
+ // implementation::layer fill;\r
+\r
+ // std::copy_if(layer.begin(), layer.end(), std::back_inserter(fill), [&](const render_item& item)\r
+ // {\r
+ // return !item.transform.get_is_key();\r
+ // });\r
+ // \r
+ // if(blend_mode == image_transform::blend_mode::normal) // Only overlap if opacity\r
+ // {\r
+ // return std::any_of(fill.begin(), fill.end(), [&](const render_item& item)\r
+ // {\r
+ // return item.transform.get_opacity() < 1.0 - 0.001;\r
+ // });\r
+ // }\r
+\r
+ // // Simple solution, just check if we have differnt video streams / tags.\r
+ // return std::any_of(fill.begin(), fill.end(), [&](const render_item& item)\r
+ // {\r
+ // return item.tag != fill.front().tag;\r
+ // });\r
+ //} \r
+ // \r
+ //render_item create_render_item(const safe_ptr<device_buffer>& buffer, image_transform::blend_mode::type blend_mode)\r
+ //{\r
+ // CASPAR_ASSERT(buffer->stride() == 4 && "Only used for bgra textures");\r
+\r
+ // pixel_format_desc desc;\r
+ // desc.pix_fmt = pixel_format::bgra;\r
+ // desc.planes.push_back(pixel_format_desc::plane(channel_.get_format_desc().width, channel_.get_format_desc().height, 4));\r
+\r
+ // std::vector<safe_ptr<device_buffer>> textures;\r
+ // textures.push_back(buffer);\r
+ // \r
+ // image_transform transform;\r
+ // transform.set_blend_mode(blend_mode);\r
+\r
+ // return render_item(desc, std::move(textures), transform, video_mode::progressive, nullptr); \r
+ //}\r
\r
safe_ptr<device_buffer> create_device_buffer(size_t stride)\r
{\r