return make_safe_ptr(buffer);\r
}\r
\r
-safe_ptr<device_buffer> ogl_device::create_device_buffer(int width, int height, int stride, bool zero)\r
+safe_ptr<device_buffer> ogl_device::create_device_buffer(int width, int height, int stride)\r
{\r
CASPAR_VERIFY(stride > 0 && stride < 5);\r
CASPAR_VERIFY(width > 0 && height > 0);\r
std::shared_ptr<device_buffer> buffer;\r
\r
if(!pool->items.try_pop(buffer)) \r
- buffer = executor_.invoke([&]{return allocate_device_buffer(width, height, stride);});\r
-\r
- if(zero)\r
+ buffer = executor_.invoke([&]{return allocate_device_buffer(width, height, stride);});\r
+ \r
+ return safe_ptr<device_buffer>(buffer.get(), [=](device_buffer*) mutable\r
{ \r
- executor_.invoke([&]\r
+ executor_.begin_invoke([=]\r
{\r
- scoped_state scope(*this);\r
- attach(*buffer);\r
+ auto prev = attach(buffer->id());\r
glClear(GL_COLOR_BUFFER_BIT);\r
- }, high_priority); \r
- } \r
- \r
- //++pool->usage_count;\r
-\r
- return safe_ptr<device_buffer>(buffer.get(), [=](device_buffer*) mutable\r
- { \r
- pool->items.push(buffer); \r
+ attach(prev);\r
+ pool->items.push(buffer);\r
+ }, high_priority); \r
});\r
}\r
\r
}\r
}\r
\r
-void ogl_device::attach(GLint id)\r
+GLint ogl_device::attach(GLint id)\r
{ \r
+ auto prev = state_.attached_texture;\r
if(id != state_.attached_texture)\r
{\r
GL(glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + 0, GL_TEXTURE_2D, id, 0));\r
\r
state_.attached_texture = id;\r
}\r
+ return prev;\r
}\r
\r
void ogl_device::attach(const device_buffer& texture)\r
ogl_device();\r
\r
void use(GLint id);\r
- void attach(GLint id);\r
+ GLint attach(GLint id);\r
void bind(GLint id, int index); \r
void flush();\r
\r
friend class scoped_state;\r
+ \r
public: \r
void push_state();\r
state pop_state();\r
\r
void bind(const device_buffer& texture, int index);\r
\r
-\r
// thread-afe\r
template<typename Func>\r
auto begin_invoke(Func&& func, task_priority priority = normal_priority) -> boost::unique_future<decltype(func())> // noexcept\r
return executor_.invoke(std::forward<Func>(func), priority);\r
}\r
\r
- safe_ptr<device_buffer> create_device_buffer(int width, int height, int stride, bool zero = false);\r
+ safe_ptr<device_buffer> create_device_buffer(int width, int height, int stride);\r
safe_ptr<host_buffer> create_host_buffer(int size, host_buffer::usage_t usage);\r
\r
boost::unique_future<safe_ptr<host_buffer>> transfer(const safe_ptr<device_buffer>& source);\r
\r
boost::unique_future<safe_ptr<host_buffer>> operator()(std::vector<layer>&& layers, const video_format_desc& format_desc)\r
{ \r
- auto draw_buffer = ogl_->create_device_buffer(format_desc.width, format_desc.height, 4, true);\r
+ auto draw_buffer = ogl_->create_device_buffer(format_desc.width, format_desc.height, 4);\r
\r
if(format_desc.field_mode != field_mode::progressive)\r
{\r
\r
if(layer.first != blend_mode::normal)\r
{\r
- auto layer_draw_buffer = ogl_->create_device_buffer(format_desc.width, format_desc.height, 4, true);\r
+ auto layer_draw_buffer = ogl_->create_device_buffer(format_desc.width, format_desc.height, 4);\r
\r
BOOST_FOREACH(auto& item, layer.second)\r
draw_item(std::move(item), layer_draw_buffer, layer_key_buffer, local_key_buffer, local_mix_buffer, format_desc); \r
\r
if(item.transform.is_key)\r
{\r
- local_key_buffer = local_key_buffer ? local_key_buffer : ogl_->create_device_buffer(format_desc.width, format_desc.height, 4, true);\r
+ local_key_buffer = local_key_buffer ? local_key_buffer : ogl_->create_device_buffer(format_desc.width, format_desc.height, 4);\r
\r
draw_params.background = local_key_buffer;\r
draw_params.local_key = nullptr;\r
}\r
else if(item.transform.is_mix)\r
{\r
- local_mix_buffer = local_mix_buffer ? local_mix_buffer : ogl_->create_device_buffer(format_desc.width, format_desc.height, 4, true);\r
+ local_mix_buffer = local_mix_buffer ? local_mix_buffer : ogl_->create_device_buffer(format_desc.width, format_desc.height, 4);\r
\r
draw_params.background = local_mix_buffer;\r
draw_params.local_key = std::move(local_key_buffer);\r