\r
#include "shader.h"\r
\r
-#include <common/except.h>\r
#include <common/assert.h>\r
+#include <common/except.h>\r
#include <common/gl/gl_check.h>\r
\r
#include <boost/foreach.hpp>\r
\r
#include <gl/glew.h>\r
\r
-namespace caspar { namespace accelerator { namespace ogl {\r
+#include <SFML/Window/Context.hpp>\r
\r
-context::context() \r
- : executor_(L"context")\r
+#include <array>\r
+#include <unordered_map>\r
+\r
+#include <tbb/concurrent_unordered_map.h>\r
+#include <tbb/concurrent_queue.h>\r
+\r
+namespace caspar { namespace accelerator { namespace ogl {\r
+ \r
+struct context::impl : public std::enable_shared_from_this<impl>\r
{\r
- CASPAR_LOG(info) << L"Initializing OpenGL Device.";\r
- \r
- invoke([=]\r
+ std::unique_ptr<sf::Context> context_;\r
+ \r
+ std::array<tbb::concurrent_unordered_map<int, tbb::concurrent_bounded_queue<std::shared_ptr<device_buffer>>>, 4> device_pools_;\r
+ std::array<tbb::concurrent_unordered_map<int, tbb::concurrent_bounded_queue<std::shared_ptr<host_buffer>>>, 2> host_pools_;\r
+ \r
+ GLuint fbo_;\r
+\r
+ executor& executor_;\r
+ \r
+ impl(executor& executor) \r
+ : executor_(executor)\r
{\r
- context_.reset(new sf::Context());\r
- context_->SetActive(true);\r
+ CASPAR_LOG(info) << L"Initializing OpenGL Device.";\r
+ \r
+ executor_.invoke([=]\r
+ {\r
+ context_.reset(new sf::Context());\r
+ context_->SetActive(true);\r
\r
- if (glewInit() != GLEW_OK)\r
- BOOST_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to initialize GLEW."));\r
+ if (glewInit() != GLEW_OK)\r
+ BOOST_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to initialize GLEW."));\r
\r
- CASPAR_LOG(info) << L"OpenGL " << version();\r
+ CASPAR_LOG(info) << L"OpenGL " << version();\r
\r
- if(!GLEW_VERSION_3_0)\r
- BOOST_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Your graphics card does not meet the minimum hardware requirements since it does not support OpenGL 3.0 or higher. CasparCG Server will not be able to continue."));\r
+ if(!GLEW_VERSION_3_0)\r
+ BOOST_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Your graphics card does not meet the minimum hardware requirements since it does not support OpenGL 3.0 or higher. CasparCG Server will not be able to continue."));\r
\r
- glGenFramebuffers(1, &fbo_); \r
- \r
- CASPAR_LOG(info) << L"Successfully initialized OpenGL Device.";\r
- });\r
-}\r
+ glGenFramebuffers(1, &fbo_); \r
+ glBindFramebuffer(GL_FRAMEBUFFER, fbo_);\r
\r
-context::~context()\r
-{\r
- invoke([=]\r
- {\r
- BOOST_FOREACH(auto& pool, device_pools_)\r
- pool.clear();\r
- BOOST_FOREACH(auto& pool, host_pools_)\r
- pool.clear();\r
- glDeleteFramebuffers(1, &fbo_);\r
- });\r
-}\r
+ CASPAR_LOG(info) << L"Successfully initialized OpenGL Device.";\r
+ });\r
+ }\r
\r
-spl::shared_ptr<device_buffer> context::allocate_device_buffer(int width, int height, int stride)\r
-{\r
- std::shared_ptr<device_buffer> buffer;\r
- try\r
+ ~impl()\r
{\r
- buffer.reset(new device_buffer(shared_from_this(), width, height, stride));\r
+ executor_.invoke([=]\r
+ {\r
+ BOOST_FOREACH(auto& pool, device_pools_)\r
+ pool.clear();\r
+ BOOST_FOREACH(auto& pool, host_pools_)\r
+ pool.clear();\r
+ glDeleteFramebuffers(1, &fbo_);\r
+ });\r
}\r
- catch(...)\r
+\r
+ spl::shared_ptr<device_buffer> allocate_device_buffer(int width, int height, int stride)\r
{\r
+ std::shared_ptr<device_buffer> buffer;\r
try\r
{\r
- executor_.yield();\r
- gc().wait();\r
- \r
- // Try again\r
- buffer.reset(new device_buffer(shared_from_this(), width, height, stride));\r
+ buffer.reset(new device_buffer(width, height, stride));\r
}\r
catch(...)\r
{\r
CASPAR_LOG(error) << L"ogl: create_device_buffer failed!";\r
throw;\r
}\r
+ return spl::make_shared_ptr(buffer);\r
}\r
- return spl::make_shared_ptr(buffer);\r
-}\r
\r
-spl::shared_ptr<device_buffer> context::create_device_buffer(int width, int height, int stride)\r
-{\r
- CASPAR_VERIFY(stride > 0 && stride < 5);\r
- CASPAR_VERIFY(width > 0 && height > 0);\r
- auto& pool = device_pools_[stride-1][((width << 16) & 0xFFFF0000) | (height & 0x0000FFFF)];\r
- std::shared_ptr<device_buffer> buffer;\r
- if(!pool->items.try_pop(buffer)) \r
- buffer = executor_.invoke([&]{return allocate_device_buffer(width, height, stride);}, task_priority::high_priority); \r
- \r
- //++pool->usage_count;\r
-\r
- return spl::shared_ptr<device_buffer>(buffer.get(), [=](device_buffer*) mutable\r
- { \r
- pool->items.push(buffer); \r
- });\r
-}\r
-\r
-spl::shared_ptr<host_buffer> context::allocate_host_buffer(int size, host_buffer::usage usage)\r
-{\r
- std::shared_ptr<host_buffer> buffer;\r
-\r
- try\r
+ spl::shared_ptr<device_buffer> create_device_buffer(int width, int height, int stride)\r
{\r
- buffer.reset(new host_buffer(shared_from_this(), size, usage));\r
- if(usage == host_buffer::usage::write_only)\r
- buffer->map();\r
- else\r
- buffer->unmap(); \r
+ CASPAR_VERIFY(stride > 0 && stride < 5);\r
+ CASPAR_VERIFY(width > 0 && height > 0);\r
+ \r
+ auto pool = &device_pools_[stride-1][((width << 16) & 0xFFFF0000) | (height & 0x0000FFFF)];\r
+ \r
+ std::shared_ptr<device_buffer> buffer;\r
+ if(!pool->try_pop(buffer)) \r
+ buffer = executor_.invoke([&]{return allocate_device_buffer(width, height, stride);}, task_priority::high_priority); \r
+ \r
+ auto self = shared_from_this();\r
+ return spl::shared_ptr<device_buffer>(buffer.get(), [self, buffer, pool](device_buffer*) mutable\r
+ { \r
+ pool->push(buffer); \r
+ });\r
}\r
- catch(...)\r
+\r
+ spl::shared_ptr<host_buffer> allocate_host_buffer(int size, host_buffer::usage usage)\r
{\r
+ std::shared_ptr<host_buffer> buffer;\r
+\r
try\r
{\r
- executor_.yield();\r
- gc().wait();\r
-\r
- // Try again\r
- buffer.reset(new host_buffer(shared_from_this(), size, usage));\r
+ buffer.reset(new host_buffer(size, usage));\r
if(usage == host_buffer::usage::write_only)\r
buffer->map();\r
else\r
- buffer->unmap(); \r
+ buffer->unmap(); \r
}\r
catch(...)\r
{\r
CASPAR_LOG(error) << L"ogl: create_host_buffer failed!";\r
- throw; \r
+ throw; \r
}\r
- }\r
\r
- return spl::make_shared_ptr(buffer);\r
-}\r
- \r
-spl::shared_ptr<host_buffer> context::create_host_buffer(int size, host_buffer::usage usage)\r
-{\r
- CASPAR_VERIFY(usage == host_buffer::usage::write_only || usage == host_buffer::usage::read_only);\r
- CASPAR_VERIFY(size > 0);\r
- auto& pool = host_pools_[usage.value()][size];\r
- std::shared_ptr<host_buffer> buffer;\r
- if(!pool->items.try_pop(buffer)) \r
- buffer = executor_.invoke([=]{return allocate_host_buffer(size, usage);}, task_priority::high_priority); \r
+ return spl::make_shared_ptr(buffer);\r
+ }\r
\r
- auto self = shared_from_this();\r
- bool is_write_only = (usage == host_buffer::usage::write_only);\r
- return spl::shared_ptr<host_buffer>(buffer.get(), [=](host_buffer*) mutable\r
+ spl::shared_ptr<host_buffer> create_host_buffer(int size, host_buffer::usage usage)\r
{\r
- self->executor_.begin_invoke([=]() mutable\r
- { \r
- if(is_write_only)\r
- buffer->map();\r
- else\r
- buffer->unmap();\r
-\r
- pool->items.push(buffer);\r
- }, task_priority::high_priority); \r
- });\r
-}\r
-\r
-spl::shared_ptr<context> context::create()\r
-{\r
- return spl::shared_ptr<context>(new context());\r
-}\r
-\r
-//template<typename T>\r
-//void flush_pool(buffer_pool<T>& pool)\r
-//{ \r
-// if(pool.flush_count.fetch_and_increment() < 16)\r
-// return;\r
-//\r
-// if(pool.usage_count.fetch_and_store(0) < pool.items.size())\r
-// {\r
-// std::shared_ptr<T> buffer;\r
-// pool.items.try_pop(buffer);\r
-// }\r
-//\r
-// pool.flush_count = 0;\r
-// pool.usage_count = 0;\r
-//}\r
-\r
-boost::unique_future<void> context::gc()\r
-{ \r
- return begin_invoke([=]\r
- {\r
- CASPAR_LOG(info) << " ogl: Running GC."; \r
+ CASPAR_VERIFY(usage == host_buffer::usage::write_only || usage == host_buffer::usage::read_only);\r
+ CASPAR_VERIFY(size > 0);\r
+ \r
+ auto pool = &host_pools_[usage.value()][size];\r
+ \r
+ std::shared_ptr<host_buffer> buffer;\r
+ if(!pool->try_pop(buffer)) \r
+ buffer = executor_.invoke([=]{return allocate_host_buffer(size, usage);}, task_priority::high_priority); \r
\r
- try\r
+ auto self = shared_from_this();\r
+ bool is_write = (usage == host_buffer::usage::write_only);\r
+ return spl::shared_ptr<host_buffer>(buffer.get(), [self, is_write, buffer, pool](host_buffer*) mutable\r
{\r
- BOOST_FOREACH(auto& pools, device_pools_)\r
- {\r
- BOOST_FOREACH(auto& pool, pools)\r
- pool.second->items.clear();\r
- }\r
- BOOST_FOREACH(auto& pools, host_pools_)\r
- {\r
- BOOST_FOREACH(auto& pool, pools)\r
- pool.second->items.clear();\r
- }\r
- }\r
- catch(...)\r
+ self->executor_.begin_invoke([=]() mutable\r
+ { \r
+ if(is_write)\r
+ buffer->map();\r
+ else\r
+ buffer->unmap();\r
+\r
+ pool->push(buffer);\r
+ }, task_priority::high_priority); \r
+ });\r
+ }\r
+ \r
+ std::wstring version()\r
+ { \r
+ static std::wstring ver = L"Not found";\r
+ try\r
{\r
- CASPAR_LOG_CURRENT_EXCEPTION();\r
+ ver = u16(executor_.invoke([]{return std::string(reinterpret_cast<const char*>(glGetString(GL_VERSION)));})\r
+ + " " + executor_.invoke([]{return std::string(reinterpret_cast<const char*>(glGetString(GL_VENDOR)));})); \r
}\r
- }, task_priority::high_priority);\r
-}\r
+ catch(...){}\r
\r
-std::wstring context::version()\r
-{ \r
- static std::wstring ver = L"Not found";\r
- try\r
+ return ver;\r
+ }\r
+ \r
+ boost::unique_future<spl::shared_ptr<device_buffer>> copy_async(spl::shared_ptr<host_buffer>& source, int width, int height, int stride)\r
{\r
- ver = u16(invoke([]{return std::string(reinterpret_cast<const char*>(glGetString(GL_VERSION)));})\r
- + " " + invoke([]{return std::string(reinterpret_cast<const char*>(glGetString(GL_VENDOR)));})); \r
+ return executor_.begin_invoke([=]() -> spl::shared_ptr<device_buffer>\r
+ {\r
+ auto result = create_device_buffer(width, height, stride);\r
+ result->copy_from(*source);\r
+ return result;\r
+ }, task_priority::high_priority);\r
}\r
- catch(...){}\r
-\r
- return ver;\r
-}\r
-\r
-void context::attach(device_buffer& texture)\r
-{ \r
- glBindFramebuffer(GL_FRAMEBUFFER, fbo_);\r
- GL(glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + 0, GL_TEXTURE_2D, texture.id(), 0));\r
-}\r
-\r
-void context::clear(device_buffer& texture)\r
-{ \r
- attach(texture);\r
- GL(glClear(GL_COLOR_BUFFER_BIT));\r
-}\r
-\r
-void context::use(shader& shader)\r
-{ \r
- GL(glUseProgramObjectARB(shader.id())); \r
-}\r
\r
-boost::unique_future<spl::shared_ptr<device_buffer>> context::copy_async(spl::shared_ptr<host_buffer>& source, int width, int height, int stride)\r
-{\r
- return executor_.begin_invoke([=]() -> spl::shared_ptr<device_buffer>\r
+ void yield()\r
{\r
- auto result = create_device_buffer(width, height, stride);\r
- result->copy_from(source);\r
- return result;\r
- }, task_priority::high_priority);\r
-}\r
+ executor_.yield(task_priority::high_priority);\r
+ }\r
+};\r
\r
-void context::yield()\r
+context::context() \r
+ : executor_(L"context")\r
+ , impl_(new impl(executor_))\r
{\r
- executor_.yield(task_priority::high_priority);\r
}\r
+ \r
+void context::yield(){impl_->yield();} \r
+spl::shared_ptr<device_buffer> context::create_device_buffer(int width, int height, int stride){return impl_->create_device_buffer(width, height, stride);}\r
+spl::shared_ptr<host_buffer> context::create_host_buffer(int size, host_buffer::usage usage){return impl_->create_host_buffer(size, usage);}\r
+boost::unique_future<spl::shared_ptr<device_buffer>> context::copy_async(spl::shared_ptr<host_buffer>& source, int width, int height, int stride){return impl_->copy_async(source, width, height, stride);}\r
+std::wstring context::version(){return impl_->version();}\r
+\r
\r
}}}\r
\r
\r
struct device_buffer::impl : boost::noncopyable\r
{\r
- std::weak_ptr<context> parent_;\r
GLuint id_;\r
\r
const int width_;\r
const int height_;\r
const int stride_;\r
public:\r
- impl(std::weak_ptr<context> parent, int width, int height, int stride) \r
- : parent_(parent)\r
- , width_(width)\r
+ impl(int width, int height, int stride) \r
+ : width_(width)\r
, height_(height)\r
, stride_(stride)\r
{ \r
{\r
GL(glBindTexture(GL_TEXTURE_2D, 0));\r
}\r
- \r
- void copy_from(const spl::shared_ptr<host_buffer>& source)\r
- {\r
- auto ogl = parent_.lock();\r
- if(!ogl)\r
- BOOST_THROW_EXCEPTION(invalid_operation());\r
\r
- ogl->begin_invoke([=]\r
- {\r
- source->unmap();\r
- source->bind();\r
- GL(glBindTexture(GL_TEXTURE_2D, id_));\r
- GL(glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width_, height_, FORMAT[stride_], TYPE[stride_], NULL));\r
- GL(glBindTexture(GL_TEXTURE_2D, 0));\r
- source->unbind();\r
- }, task_priority::high_priority);\r
+ void attach()\r
+ { \r
+ GL(glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + 0, GL_TEXTURE_2D, id_, 0));\r
}\r
\r
- void copy_to(const spl::shared_ptr<host_buffer>& dest)\r
+ void clear()\r
{\r
- auto ogl = parent_.lock();\r
- if(!ogl)\r
- BOOST_THROW_EXCEPTION(invalid_operation());\r
+ attach(); \r
+ GL(glClear(GL_COLOR_BUFFER_BIT));\r
+ }\r
+ \r
+ void copy_from(host_buffer& source)\r
+ {\r
+ source.unmap();\r
+ source.bind();\r
+ GL(glBindTexture(GL_TEXTURE_2D, id_));\r
+ GL(glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width_, height_, FORMAT[stride_], TYPE[stride_], NULL));\r
+ GL(glBindTexture(GL_TEXTURE_2D, 0));\r
+ source.unbind();\r
+ }\r
\r
- ogl->begin_invoke([=]\r
- {\r
- dest->unmap();\r
- dest->bind();\r
- GL(glBindTexture(GL_TEXTURE_2D, id_));\r
- GL(glReadBuffer(GL_COLOR_ATTACHMENT0));\r
- GL(glReadPixels(0, 0, width_, height_, FORMAT[stride_], TYPE[stride_], NULL));\r
- GL(glBindTexture(GL_TEXTURE_2D, 0));\r
- dest->unbind();\r
- GL(glFlush());\r
- }, task_priority::high_priority);\r
+ void copy_to(host_buffer& dest)\r
+ {\r
+ dest.unmap();\r
+ dest.bind();\r
+ GL(glBindTexture(GL_TEXTURE_2D, id_));\r
+ GL(glReadBuffer(GL_COLOR_ATTACHMENT0));\r
+ GL(glReadPixels(0, 0, width_, height_, FORMAT[stride_], TYPE[stride_], NULL));\r
+ GL(glBindTexture(GL_TEXTURE_2D, 0));\r
+ dest.unbind();\r
+ GL(glFlush());\r
}\r
};\r
\r
-device_buffer::device_buffer(std::weak_ptr<context> parent, int width, int height, int stride) : impl_(new impl(parent, width, height, stride)){}\r
+device_buffer::device_buffer(int width, int height, int stride) : impl_(new impl(width, height, stride)){}\r
int device_buffer::stride() const { return impl_->stride_; }\r
int device_buffer::width() const { return impl_->width_; }\r
int device_buffer::height() const { return impl_->height_; }\r
void device_buffer::bind(int index){impl_->bind(index);}\r
void device_buffer::unbind(){impl_->unbind();}\r
-void device_buffer::copy_from(const spl::shared_ptr<host_buffer>& source){impl_->copy_from(source);}\r
-void device_buffer::copy_to(const spl::shared_ptr<host_buffer>& dest){impl_->copy_to(dest);}\r
+void device_buffer::attach(){impl_->attach();}\r
+void device_buffer::clear(){impl_->clear();}\r
+void device_buffer::copy_from(host_buffer& source){impl_->copy_from(source);}\r
+void device_buffer::copy_to(host_buffer& dest){impl_->copy_to(dest);}\r
int device_buffer::id() const{ return impl_->id_;}\r
\r
}}}
\ No newline at end of file