* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
*\r
*/\r
+// TODO: Smart GC\r
+\r
#include "../../stdafx.h"\r
\r
#include "ogl_device.h"\r
std::fill(binded_textures_.begin(), binded_textures_.end(), 0);\r
std::fill(viewport_.begin(), viewport_.end(), 0);\r
std::fill(scissor_.begin(), scissor_.end(), 0);\r
+ std::fill(blend_func_.begin(), blend_func_.end(), 0);\r
\r
invoke([=]\r
{\r
{\r
CASPAR_VERIFY(stride > 0 && stride < 5);\r
CASPAR_VERIFY(width > 0 && height > 0);\r
- auto pool = device_pools_[stride-1][((width << 16) & 0xFFFF0000) | (height & 0x0000FFFF)];\r
+ auto& pool = device_pools_[stride-1][((width << 16) & 0xFFFF0000) | (height & 0x0000FFFF)];\r
std::shared_ptr<device_buffer> buffer;\r
- if(!pool->try_pop(buffer)) \r
+ if(!pool->items.try_pop(buffer)) \r
{\r
executor_.invoke([&]\r
{ \r
throw;\r
}\r
}\r
-\r
}, high_priority); \r
}\r
- \r
- return safe_ptr<device_buffer>(buffer.get(), [=](device_buffer*)\r
- {\r
- pool->push(buffer);\r
+ \r
+ ++pool->usage_count;\r
+\r
+ return safe_ptr<device_buffer>(buffer.get(), [=](device_buffer*) mutable\r
+ { \r
+ pool->items.push(buffer); \r
});\r
}\r
\r
{\r
CASPAR_VERIFY(usage == host_buffer::write_only || usage == host_buffer::read_only);\r
CASPAR_VERIFY(size > 0);\r
- auto pool = host_pools_[usage][size];\r
+ auto& pool = host_pools_[usage][size];\r
std::shared_ptr<host_buffer> buffer;\r
- if(!pool->try_pop(buffer))\r
+ if(!pool->items.try_pop(buffer))\r
{\r
executor_.invoke([&]\r
{\r
throw; \r
}\r
}\r
-\r
}, high_priority); \r
}\r
\r
- return safe_ptr<host_buffer>(buffer.get(), [=](host_buffer*)\r
+ ++pool->usage_count;\r
+\r
+ return safe_ptr<host_buffer>(buffer.get(), [=](host_buffer*) mutable\r
{\r
- executor_.begin_invoke([=]\r
- {\r
+ executor_.begin_invoke([=]() mutable\r
+ { \r
if(usage == host_buffer::write_only)\r
buffer->map();\r
else\r
buffer->unmap();\r
- \r
- pool->push(buffer);\r
\r
- }, high_priority);\r
+ pool->items.push(buffer);\r
+ }, high_priority); \r
});\r
}\r
\r
+template<typename T>\r
+void flush_pool(buffer_pool<T>& pool)\r
+{ \r
+ if(pool.flush_count.fetch_and_increment() < 16)\r
+ return;\r
+\r
+ if(pool.usage_count.fetch_and_store(0) < pool.items.size())\r
+ {\r
+ std::shared_ptr<T> buffer;\r
+ pool.items.try_pop(buffer);\r
+ }\r
+\r
+ pool.flush_count = 0;\r
+ pool.usage_count = 0;\r
+}\r
+\r
+void ogl_device::flush()\r
+{\r
+ GL(glFlush()); \r
+ \r
+ try\r
+ {\r
+ BOOST_FOREACH(auto& pools, device_pools_)\r
+ {\r
+ BOOST_FOREACH(auto& pool, pools)\r
+ flush_pool(*pool.second);\r
+ }\r
+ BOOST_FOREACH(auto& pools, host_pools_)\r
+ {\r
+ BOOST_FOREACH(auto& pool, pools)\r
+ flush_pool(*pool.second);\r
+ }\r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
+ }\r
+}\r
+\r
void ogl_device::yield()\r
{\r
executor_.yield();\r
BOOST_FOREACH(auto& pools, device_pools_)\r
{\r
BOOST_FOREACH(auto& pool, pools)\r
- pool.second->clear();\r
+ pool.second->items.clear();\r
}\r
BOOST_FOREACH(auto& pools, host_pools_)\r
{\r
BOOST_FOREACH(auto& pool, pools)\r
- pool.second->clear();\r
+ pool.second->items.clear();\r
}\r
}\r
catch(...)\r
{ \r
if(attached_texture_ != texture.id())\r
{\r
- glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + 0, GL_TEXTURE_2D, texture.id(), 0);\r
+ GL(glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + 0, GL_TEXTURE_2D, texture.id(), 0));\r
attached_texture_ = texture.id();\r
}\r
}\r
void ogl_device::clear(device_buffer& texture)\r
{ \r
attach(texture);\r
- glClear(GL_COLOR_BUFFER_BIT);\r
+ GL(glClear(GL_COLOR_BUFFER_BIT));\r
}\r
\r
void ogl_device::use(shader& shader)\r
{\r
if(active_shader_ != shader.id())\r
{ \r
- glUseProgramObjectARB(shader.id()); \r
+ GL(glUseProgramObjectARB(shader.id())); \r
active_shader_ = shader.id();\r
}\r
}\r
\r
+void ogl_device::blend_func(int c1, int c2, int a1, int a2)\r
+{\r
+ std::array<int, 4> func = {c1, c2, a1, a2};\r
+\r
+ if(blend_func_ != func)\r
+ {\r
+ blend_func_ = func;\r
+ GL(glBlendFuncSeparate(c1, c2, a1, a2));\r
+ }\r
+}\r
+\r
+void ogl_device::blend_func(int c1, int c2)\r
+{\r
+ blend_func(c1, c2, c1, c2);\r
+}\r
+\r
}}\r
\r