#include "../../stdafx.h"\r
\r
#include "device_buffer.h"\r
+#include "ogl_device.h"\r
\r
#include <common/exception/exceptions.h>\r
#include <common/gl/gl_check.h>\r
\r
#include <tbb/atomic.h>\r
\r
+#include <boost/thread/future.hpp>\r
+\r
namespace caspar { namespace core {\r
\r
static GLenum FORMAT[] = {0, GL_RED, GL_RG, GL_BGR, GL_BGRA};\r
static GLenum INTERNAL_FORMAT[] = {0, GL_R8, GL_RG8, GL_RGB8, GL_RGBA8}; \r
+static GLenum TYPE[] = {0, GL_UNSIGNED_BYTE, GL_UNSIGNED_BYTE, GL_UNSIGNED_BYTE, GL_UNSIGNED_INT_8_8_8_8_REV}; \r
\r
unsigned int format(int stride)\r
{\r
\r
struct device_buffer::impl : boost::noncopyable\r
{\r
- GLuint id_;\r
+ std::weak_ptr<ogl_device> parent_;\r
+ GLuint id_;\r
\r
const int width_;\r
const int height_;\r
const int stride_;\r
public:\r
- impl(int width, int height, int stride) \r
- : width_(width)\r
+ impl(std::weak_ptr<ogl_device> parent, int width, int height, int stride) \r
+ : parent_(parent)\r
+ , width_(width)\r
, height_(height)\r
, stride_(stride)\r
{ \r
GL(glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));\r
GL(glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));\r
GL(glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));\r
- GL(glTexImage2D(GL_TEXTURE_2D, 0, INTERNAL_FORMAT[stride_], width_, height_, 0, FORMAT[stride_], GL_UNSIGNED_BYTE, NULL));\r
+ GL(glTexImage2D(GL_TEXTURE_2D, 0, INTERNAL_FORMAT[stride_], width_, height_, 0, FORMAT[stride_], TYPE[stride_], NULL));\r
GL(glBindTexture(GL_TEXTURE_2D, 0));\r
CASPAR_LOG(trace) << "[device_buffer] [" << ++g_total_count << L"] allocated size:" << width*height*stride; \r
} \r
{\r
GL(glBindTexture(GL_TEXTURE_2D, 0));\r
}\r
+ \r
+ boost::unique_future<void> copy_async_from(const safe_ptr<host_buffer>& source)\r
+ {\r
+ auto ogl = parent_.lock();\r
+ if(!ogl)\r
+ BOOST_THROW_EXCEPTION(invalid_operation());\r
+\r
+ return ogl->begin_invoke([=]\r
+ {\r
+ source->unmap();\r
+ source->bind();\r
+ GL(glBindTexture(GL_TEXTURE_2D, id_));\r
+ GL(glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width_, height_, FORMAT[stride_], TYPE[stride_], NULL));\r
+ GL(glBindTexture(GL_TEXTURE_2D, 0));\r
+ source->unbind();\r
+ }, high_priority);\r
+ }\r
\r
- void begin_read()\r
+ boost::unique_future<void> copy_async_to(const safe_ptr<host_buffer>& dest)\r
{\r
- bind();\r
- GL(glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width_, height_, FORMAT[stride_], GL_UNSIGNED_BYTE, NULL));\r
- unbind();\r
+ auto ogl = parent_.lock();\r
+ if(!ogl)\r
+ BOOST_THROW_EXCEPTION(invalid_operation());\r
+\r
+ return ogl->begin_invoke([=]\r
+ {\r
+ dest->unmap();\r
+ dest->bind();\r
+ GL(glBindTexture(GL_TEXTURE_2D, id_));\r
+ GL(glReadPixels(0, 0, width_, height_, FORMAT[stride_], TYPE[stride_], NULL));\r
+ GL(glBindTexture(GL_TEXTURE_2D, 0));\r
+ dest->unbind();\r
+ }, high_priority);\r
}\r
};\r
\r
-device_buffer::device_buffer(int width, int height, int stride) : impl_(new impl(width, height, stride)){}\r
+device_buffer::device_buffer(std::weak_ptr<ogl_device> parent, int width, int height, int stride) : impl_(new impl(parent, width, height, stride)){}\r
int device_buffer::stride() const { return impl_->stride_; }\r
int device_buffer::width() const { return impl_->width_; }\r
int device_buffer::height() const { return impl_->height_; }\r
void device_buffer::bind(int index){impl_->bind(index);}\r
void device_buffer::unbind(){impl_->unbind();}\r
-void device_buffer::begin_read(){impl_->begin_read();}\r
+boost::unique_future<void> device_buffer::copy_async_from(const safe_ptr<host_buffer>& source){return impl_->copy_async_from(source);}\r
+boost::unique_future<void> device_buffer::copy_async_to(const safe_ptr<host_buffer>& dest){return impl_->copy_async_to(dest);}\r
int device_buffer::id() const{ return impl_->id_;}\r
\r
\r
#pragma once\r
\r
#include <common/memory/safe_ptr.h>\r
+#include <common/forward.h>\r
\r
#include <boost/noncopyable.hpp>\r
\r
+FORWARD1(boost, template<typename> class unique_future);\r
+\r
namespace caspar { namespace core {\r
\r
+class host_buffer;\r
+class ogl_device;\r
+\r
class device_buffer : boost::noncopyable\r
{\r
public: \r
int width() const;\r
int height() const;\r
\r
- void bind(int index);\r
- void unbind();\r
- \r
- void begin_read();\r
+ boost::unique_future<void> copy_async_from(const safe_ptr<host_buffer>& source);\r
+ boost::unique_future<void> copy_async_to(const safe_ptr<host_buffer>& dest);\r
private:\r
friend class ogl_device;\r
- device_buffer(int width, int height, int stride);\r
-\r
+ friend class image_kernel;\r
+ device_buffer(std::weak_ptr<ogl_device> parent, int width, int height, int stride);\r
+ \r
+ void bind(int index);\r
+ void unbind();\r
int id() const;\r
\r
struct impl;\r
\r
struct host_buffer::impl : boost::noncopyable\r
{ \r
- GLuint pbo_;\r
- const int size_;\r
- void* data_;\r
- GLenum usage_;\r
- GLenum target_;\r
+ GLuint pbo_;\r
+ const int size_;\r
+ tbb::atomic<void*> data_;\r
+ GLenum usage_;\r
+ GLenum target_;\r
+ std::weak_ptr<ogl_device> parent_;\r
\r
public:\r
- impl(int size, usage_t usage) \r
- : size_(size)\r
- , data_(nullptr)\r
+ impl(std::weak_ptr<ogl_device> parent, int size, host_buffer::usage usage) \r
+ : parent_(parent)\r
+ , size_(size)\r
, pbo_(0)\r
- , target_(usage == write_only ? GL_PIXEL_UNPACK_BUFFER : GL_PIXEL_PACK_BUFFER)\r
- , usage_(usage == write_only ? GL_STREAM_DRAW : GL_STREAM_READ)\r
+ , target_(usage == host_buffer::usage::write_only ? GL_PIXEL_UNPACK_BUFFER : GL_PIXEL_PACK_BUFFER)\r
+ , usage_(usage == host_buffer::usage::write_only ? GL_STREAM_DRAW : GL_STREAM_READ)\r
{\r
+ data_ = nullptr;\r
GL(glGenBuffers(1, &pbo_));\r
- GL(glBindBuffer(target_, pbo_));\r
- if(usage_ != write_only) \r
+ bind();\r
+ if(usage_ != GL_STREAM_DRAW) \r
GL(glBufferData(target_, size_, NULL, usage_)); \r
- GL(glBindBuffer(target_, 0));\r
+ unbind();\r
\r
if(!pbo_)\r
BOOST_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to allocate buffer."));\r
\r
- CASPAR_LOG(trace) << "[host_buffer] [" << ++(usage_ == write_only ? g_w_total_count : g_r_total_count) << L"] allocated size:" << size_ << " usage: " << (usage == write_only ? "write_only" : "read_only");\r
+ CASPAR_LOG(trace) << "[host_buffer] [" << ++(usage_ == host_buffer::usage::write_only ? g_w_total_count : g_r_total_count) << L"] allocated size:" << size_ << " usage: " << (usage == host_buffer::usage::write_only ? "write_only" : "read_only");\r
} \r
\r
~impl()\r
}\r
}\r
\r
- void map()\r
+ void* map()\r
{\r
- if(data_)\r
- return;\r
+ if(data_ != nullptr)\r
+ return data_;\r
+\r
+ auto ogl = parent_.lock();\r
\r
- if(usage_ == write_only) \r
- GL(glBufferData(target_, size_, NULL, usage_)); // Notify OpenGL that we don't care about previous data.\r
+ if(!ogl)\r
+ BOOST_THROW_EXCEPTION(invalid_operation());\r
+\r
+ return ogl->invoke([&]() -> void*\r
+ { \r
+ if(data_ != nullptr)\r
+ return data_;\r
+\r
+ GL(glBindBuffer(target_, pbo_));\r
+ if(usage_ == GL_STREAM_DRAW) \r
+ GL(glBufferData(target_, size_, NULL, usage_)); // Notify OpenGL that we don't care about previous data.\r
\r
- GL(glBindBuffer(target_, pbo_));\r
- data_ = GL2(glMapBuffer(target_, usage_ == GL_STREAM_DRAW ? GL_WRITE_ONLY : GL_READ_ONLY)); \r
- GL(glBindBuffer(target_, 0)); \r
- if(!data_)\r
- BOOST_THROW_EXCEPTION(invalid_operation() << msg_info("Failed to map target_ OpenGL Pixel Buffer Object."));\r
+ data_ = GL2(glMapBuffer(target_, usage_ == GL_STREAM_DRAW ? GL_WRITE_ONLY : GL_READ_ONLY)); \r
+ GL(glBindBuffer(target_, 0));\r
+ if(!data_)\r
+ BOOST_THROW_EXCEPTION(invalid_operation() << msg_info("Failed to map target_ OpenGL Pixel Buffer Object."));\r
+\r
+ return data_;\r
+ }, high_priority);\r
}\r
\r
void unmap()\r
{\r
- if(!data_)\r
+ if(data_ == nullptr)\r
return;\r
- \r
- if(usage_ == read_only) \r
- GL(glBufferData(target_, size_, NULL, usage_)); // Notify OpenGL that we don't care about previous data.\r
\r
- GL(glBindBuffer(target_, pbo_));\r
- GL(glUnmapBuffer(target_)); \r
- data_ = nullptr; \r
- GL(glBindBuffer(target_, 0));\r
+ auto ogl = parent_.lock();\r
+\r
+ if(!ogl)\r
+ BOOST_THROW_EXCEPTION(invalid_operation());\r
+\r
+ ogl->invoke([&]\r
+ {\r
+ if(data_ == nullptr)\r
+ return;\r
+ \r
+ GL(glBindBuffer(target_, pbo_));\r
+ GL(glUnmapBuffer(target_)); \r
+ if(usage_ == GL_STREAM_READ) \r
+ GL(glBufferData(target_, size_, NULL, usage_)); // Notify OpenGL that we don't care about previous data.\r
+ data_ = nullptr; \r
+ GL(glBindBuffer(target_, 0));\r
+ }, high_priority);\r
}\r
\r
void bind()\r
{\r
GL(glBindBuffer(target_, 0));\r
}\r
-\r
- void begin_read(int width, int height, GLuint format)\r
+ \r
+ void* data()\r
{\r
- unmap();\r
- bind();\r
- GL(glReadPixels(0, 0, width, height, format, GL_UNSIGNED_BYTE, NULL));\r
- unbind();\r
+ return map();\r
}\r
};\r
\r
-host_buffer::host_buffer(int size, usage_t usage) : impl_(new impl(size, usage)){}\r
+host_buffer::host_buffer(std::weak_ptr<ogl_device> parent, int size, usage usage) : impl_(new impl(parent, size, usage)){}\r
const void* host_buffer::data() const {return impl_->data_;}\r
-void* host_buffer::data() {return impl_->data_;}\r
+void* host_buffer::data() {return impl_->data();}\r
void host_buffer::map(){impl_->map();}\r
void host_buffer::unmap(){impl_->unmap();}\r
void host_buffer::bind(){impl_->bind();}\r
void host_buffer::unbind(){impl_->unbind();}\r
-void host_buffer::begin_read(int width, int height, GLuint format){impl_->begin_read(width, height, format);}\r
int host_buffer::size() const { return impl_->size_; }\r
\r
}}
\ No newline at end of file
#pragma once\r
\r
#include <common/memory/safe_ptr.h>\r
+#include <common/enum_class.h>\r
\r
#include <boost/noncopyable.hpp>\r
\r
class host_buffer : boost::noncopyable\r
{\r
public:\r
- enum usage_t\r
+ struct usage_def\r
{\r
- write_only,\r
- read_only\r
+ enum type\r
+ {\r
+ write_only,\r
+ read_only\r
+ };\r
};\r
+ typedef enum_class<usage_def> usage;\r
\r
const void* data() const;\r
void* data();\r
int size() const; \r
\r
+private:\r
+ friend class ogl_device;\r
+ friend class device_buffer;\r
+\r
void bind();\r
void unbind();\r
\r
void map();\r
void unmap();\r
- \r
- void begin_read(int width, int height, unsigned int format);\r
-private:\r
- friend class ogl_device;\r
- host_buffer(int size, usage_t usage);\r
+\r
+ host_buffer(std::weak_ptr<ogl_device> parent, int size, usage usage);\r
\r
struct impl;\r
safe_ptr<impl> impl_;\r
\r
ogl_device::ogl_device() \r
: executor_(L"ogl_device")\r
- , pattern_(nullptr)\r
, attached_texture_(0)\r
, active_shader_(0)\r
, read_buffer_(0)\r
{\r
CASPAR_LOG(info) << L"Initializing OpenGL Device.";\r
\r
- std::fill(binded_textures_.begin(), binded_textures_.end(), 0);\r
- std::fill(viewport_.begin(), viewport_.end(), 0);\r
- std::fill(scissor_.begin(), scissor_.end(), 0);\r
- std::fill(blend_func_.begin(), blend_func_.end(), 0);\r
- \r
+ viewport_.assign(std::numeric_limits<GLint>::max());\r
+ scissor_.assign(std::numeric_limits<GLint>::max());\r
+ blend_func_.assign(std::numeric_limits<GLint>::max());\r
+ pattern_.assign(0xFF);\r
+ \r
invoke([=]\r
{\r
context_.reset(new sf::Context());\r
std::shared_ptr<device_buffer> buffer;\r
try\r
{\r
- buffer.reset(new device_buffer(width, height, stride));\r
+ buffer.reset(new device_buffer(shared_from_this(), width, height, stride));\r
}\r
catch(...)\r
{\r
gc().wait();\r
\r
// Try again\r
- buffer.reset(new device_buffer(width, height, stride));\r
+ buffer.reset(new device_buffer(shared_from_this(), width, height, stride));\r
}\r
catch(...)\r
{\r
});\r
}\r
\r
-safe_ptr<host_buffer> ogl_device::allocate_host_buffer(int size, host_buffer::usage_t usage)\r
+safe_ptr<host_buffer> ogl_device::allocate_host_buffer(int size, host_buffer::usage usage)\r
{\r
std::shared_ptr<host_buffer> buffer;\r
\r
try\r
{\r
- buffer.reset(new host_buffer(size, usage));\r
- if(usage == host_buffer::write_only)\r
+ buffer.reset(new host_buffer(shared_from_this(), size, usage));\r
+ if(usage == host_buffer::usage::write_only)\r
buffer->map();\r
else\r
buffer->unmap(); \r
gc().wait();\r
\r
// Try again\r
- buffer.reset(new host_buffer(size, usage));\r
- if(usage == host_buffer::write_only)\r
+ buffer.reset(new host_buffer(shared_from_this(), size, usage));\r
+ if(usage == host_buffer::usage::write_only)\r
buffer->map();\r
else\r
buffer->unmap(); \r
return make_safe_ptr(buffer);\r
}\r
\r
-safe_ptr<host_buffer> ogl_device::create_host_buffer(int size, host_buffer::usage_t usage)\r
+safe_ptr<host_buffer> ogl_device::create_host_buffer(int size, host_buffer::usage usage)\r
{\r
- CASPAR_VERIFY(usage == host_buffer::write_only || usage == host_buffer::read_only);\r
+ CASPAR_VERIFY(usage == host_buffer::usage::write_only || usage == host_buffer::usage::read_only);\r
CASPAR_VERIFY(size > 0);\r
- auto& pool = host_pools_[usage][size];\r
+ auto& pool = host_pools_[usage.value()][size];\r
std::shared_ptr<host_buffer> buffer;\r
if(!pool->items.try_pop(buffer)) \r
buffer = executor_.invoke([=]{return allocate_host_buffer(size, usage);}, high_priority); \r
\r
//++pool->usage_count;\r
\r
- auto self = shared_from_this();\r
+ auto self = shared_from_this();\r
+ bool is_write_only = (usage == host_buffer::usage::write_only);\r
+\r
return safe_ptr<host_buffer>(buffer.get(), [=](host_buffer*) mutable\r
{\r
self->executor_.begin_invoke([=]() mutable\r
{ \r
- if(usage == host_buffer::write_only)\r
+ if(is_write_only)\r
buffer->map();\r
else\r
buffer->unmap();\r
\r
void ogl_device::viewport(int x, int y, int width, int height)\r
{\r
- if(x != viewport_[0] || y != viewport_[1] || width != viewport_[2] || height != viewport_[3])\r
+ std::array<GLint, 4> viewport = {{x, y, width, height}};\r
+ if(viewport != viewport_)\r
{ \r
glViewport(x, y, width, height);\r
- viewport_[0] = x;\r
- viewport_[1] = y;\r
- viewport_[2] = width;\r
- viewport_[3] = height;\r
+ viewport_ = viewport;\r
}\r
}\r
\r
void ogl_device::scissor(int x, int y, int width, int height)\r
{\r
- if(x != scissor_[0] || y != scissor_[1] || width != scissor_[2] || height != scissor_[3])\r
+ std::array<GLint, 4> scissor = {{x, y, width, height}};\r
+ if(scissor != scissor_)\r
{ \r
+ enable(GL_SCISSOR_TEST);\r
glScissor(x, y, width, height);\r
- scissor_[0] = x;\r
- scissor_[1] = y;\r
- scissor_[2] = width;\r
- scissor_[3] = height;\r
+ scissor_ = scissor;\r
}\r
}\r
\r
-void ogl_device::stipple_pattern(const GLubyte* pattern)\r
+void ogl_device::stipple_pattern(const std::array<GLubyte, 32*32>& pattern)\r
{\r
if(pattern_ != pattern)\r
{ \r
- glPolygonStipple(pattern);\r
+ enable(GL_POLYGON_STIPPLE);\r
+\r
+ std::array<GLubyte, 32*32> nopattern;\r
+ nopattern.assign(0xFF);\r
+\r
+ if(pattern == nopattern)\r
+ disable(GL_POLYGON_STIPPLE);\r
+ else\r
+ glPolygonStipple(pattern.data());\r
+\r
pattern_ = pattern;\r
}\r
}\r
, boost::noncopyable\r
{ \r
std::unordered_map<GLenum, bool> caps_;\r
+ std::array<GLubyte, 32*32> pattern_;\r
std::array<int, 4> viewport_;\r
std::array<int, 4> scissor_;\r
- const GLubyte* pattern_;\r
+ std::array<GLint, 4> blend_func_;\r
GLint attached_texture_;\r
GLint active_shader_;\r
- std::array<GLint, 16> binded_textures_;\r
- std::array<GLint, 4> blend_func_;\r
GLenum read_buffer_;\r
\r
std::unique_ptr<sf::Context> context_;\r
void disable(GLenum cap);\r
void viewport(int x, int y, int width, int height);\r
void scissor(int x, int y, int width, int height);\r
- void stipple_pattern(const GLubyte* pattern);\r
+ void stipple_pattern(const std::array<GLubyte, 32*32>& pattern);\r
\r
void attach(device_buffer& texture);\r
void clear(device_buffer& texture);\r
}\r
\r
safe_ptr<device_buffer> create_device_buffer(int width, int height, int stride);\r
- safe_ptr<host_buffer> create_host_buffer(int size, host_buffer::usage_t usage);\r
+ safe_ptr<host_buffer> create_host_buffer(int size, host_buffer::usage usage);\r
\r
boost::unique_future<void> gc();\r
\r
\r
private:\r
safe_ptr<device_buffer> allocate_device_buffer(int width, int height, int stride);\r
- safe_ptr<host_buffer> allocate_host_buffer(int size, host_buffer::usage_t usage);\r
+ safe_ptr<host_buffer> allocate_host_buffer(int size, host_buffer::usage usage);\r
};\r
\r
}}
\ No newline at end of file
\r
namespace caspar { namespace core {\r
\r
-GLubyte upper_pattern[] = {\r
+__declspec(align(16)) std::array<GLubyte, 32*32> upper_pattern = {{\r
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,\r
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,\r
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,\r
- 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00};\r
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}};\r
\r
-GLubyte lower_pattern[] = {\r
+__declspec(align(16)) std::array<GLubyte, 32*32> lower_pattern = {{\r
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, \r
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,\r
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,\r
- 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff};\r
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff}};\r
+ \r
+__declspec(align(16)) std::array<GLubyte, 32*32> progressive_pattern = {{\r
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \r
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,\r
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,\r
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};\r
\r
struct image_kernel::impl : boost::noncopyable\r
{ \r
shader_->set("csb", false); \r
\r
// Setup interlacing\r
-\r
- if(params.transform.field_mode == core::field_mode::progressive) \r
- ogl_->disable(GL_POLYGON_STIPPLE); \r
- else \r
- {\r
- ogl_->enable(GL_POLYGON_STIPPLE);\r
-\r
- if(params.transform.field_mode == core::field_mode::upper)\r
- ogl_->stipple_pattern(upper_pattern);\r
- else if(params.transform.field_mode == core::field_mode::lower)\r
- ogl_->stipple_pattern(lower_pattern);\r
- }\r
-\r
+ \r
+ if(params.transform.field_mode == core::field_mode::upper)\r
+ ogl_->stipple_pattern(upper_pattern);\r
+ else if(params.transform.field_mode == core::field_mode::lower)\r
+ ogl_->stipple_pattern(lower_pattern);\r
+ else \r
+ ogl_->stipple_pattern(progressive_pattern);\r
+ \r
// Setup drawing area\r
\r
ogl_->viewport(0, 0, params.background->width(), params.background->height());\r
\r
auto m_p = params.transform.clip_translation;\r
auto m_s = params.transform.clip_scale;\r
-\r
- bool scissor = m_p[0] > std::numeric_limits<double>::epsilon() || m_p[1] > std::numeric_limits<double>::epsilon() ||\r
- m_s[0] < (1.0 - std::numeric_limits<double>::epsilon()) || m_s[1] < (1.0 - std::numeric_limits<double>::epsilon());\r
-\r
- if(scissor)\r
- {\r
- double w = static_cast<double>(params.background->width());\r
- double h = static_cast<double>(params.background->height());\r
\r
- ogl_->enable(GL_SCISSOR_TEST);\r
- ogl_->scissor(static_cast<int>(m_p[0]*w), static_cast<int>(m_p[1]*h), static_cast<int>(m_s[0]*w), static_cast<int>(m_s[1]*h));\r
- }\r
-\r
+ double w = static_cast<double>(params.background->width());\r
+ double h = static_cast<double>(params.background->height());\r
+ \r
+ ogl_->scissor(static_cast<int>(m_p[0]*w), static_cast<int>(m_p[1]*h), static_cast<int>(m_s[0]*w), static_cast<int>(m_s[1]*h));\r
+ \r
auto f_p = params.transform.fill_translation;\r
auto f_s = params.transform.fill_scale;\r
\r
// Set render target\r
- \r
+ \r
+ if(blend_modes_)\r
+ {\r
+ // http://www.opengl.org/registry/specs/NV/texture_barrier.txt\r
+ // This allows us to use framebuffer (background) both as source and target while blending.\r
+ glTextureBarrierNV(); \r
+ }\r
+\r
ogl_->attach(*params.background);\r
\r
// Draw\r
glMultiTexCoord2d(GL_TEXTURE0, 1.0, 0.0); glMultiTexCoord2d(GL_TEXTURE1, (f_p[0]+f_s[0]), f_p[1] ); glVertex2d((f_p[0]+f_s[0])*2.0-1.0, f_p[1] *2.0-1.0);\r
glMultiTexCoord2d(GL_TEXTURE0, 1.0, 1.0); glMultiTexCoord2d(GL_TEXTURE1, (f_p[0]+f_s[0]), (f_p[1]+f_s[1])); glVertex2d((f_p[0]+f_s[0])*2.0-1.0, (f_p[1]+f_s[1])*2.0-1.0);\r
glMultiTexCoord2d(GL_TEXTURE0, 0.0, 1.0); glMultiTexCoord2d(GL_TEXTURE1, f_p[0] , (f_p[1]+f_s[1])); glVertex2d( f_p[0] *2.0-1.0, (f_p[1]+f_s[1])*2.0-1.0);\r
- glEnd();\r
- \r
- // Cleanup\r
-\r
- ogl_->disable(GL_SCISSOR_TEST); \r
- \r
- params.textures.clear();\r
-\r
- if(blend_modes_)\r
- {\r
- // http://www.opengl.org/registry/specs/NV/texture_barrier.txt\r
- // This allows us to use framebuffer (background) both as source and target while blending.\r
- glTextureBarrierNV(); \r
- }\r
+ glEnd(); \r
}\r
};\r
\r
{\r
}\r
\r
- boost::unique_future<safe_ptr<host_buffer>> operator()(std::vector<layer>&& layers, const video_format_desc& format_desc)\r
- { \r
- auto layers2 = make_move_on_copy(std::move(layers));\r
- return ogl_->begin_invoke([=]\r
- {\r
- return do_render(std::move(layers2.value), format_desc);\r
- });\r
- }\r
-\r
-private:\r
- safe_ptr<host_buffer> do_render(std::vector<layer>&& layers, const video_format_desc& format_desc)\r
- {\r
- auto draw_buffer = create_mixer_buffer(4, format_desc);\r
-\r
- if(format_desc.field_mode != field_mode::progressive)\r
+ boost::unique_future<safe_ptr<host_buffer>> operator()(std::vector<layer> layers, const video_format_desc& format_desc)\r
+ { \r
+ return ogl_->begin_invoke([=]() mutable -> safe_ptr<host_buffer>\r
{\r
- auto upper = layers;\r
- auto lower = std::move(layers);\r
+ auto draw_buffer = create_mixer_buffer(4, format_desc);\r
\r
- BOOST_FOREACH(auto& layer, upper)\r
+ if(format_desc.field_mode != field_mode::progressive)\r
{\r
- BOOST_FOREACH(auto& item, layer.second)\r
- item.transform.field_mode = static_cast<field_mode>(item.transform.field_mode & field_mode::upper);\r
+ auto upper = layers;\r
+ auto lower = std::move(layers);\r
+\r
+ BOOST_FOREACH(auto& layer, upper)\r
+ {\r
+ BOOST_FOREACH(auto& item, layer.second)\r
+ item.transform.field_mode = static_cast<field_mode>(item.transform.field_mode & field_mode::upper);\r
+ }\r
+\r
+ BOOST_FOREACH(auto& layer, lower)\r
+ {\r
+ BOOST_FOREACH(auto& item, layer.second)\r
+ item.transform.field_mode = static_cast<field_mode>(item.transform.field_mode & field_mode::lower);\r
+ }\r
+\r
+ draw(std::move(upper), draw_buffer, format_desc);\r
+ draw(std::move(lower), draw_buffer, format_desc);\r
}\r
-\r
- BOOST_FOREACH(auto& layer, lower)\r
+ else\r
{\r
- BOOST_FOREACH(auto& item, layer.second)\r
- item.transform.field_mode = static_cast<field_mode>(item.transform.field_mode & field_mode::lower);\r
+ draw(std::move(layers), draw_buffer, format_desc);\r
}\r
-\r
- draw(std::move(upper), draw_buffer, format_desc);\r
- draw(std::move(lower), draw_buffer, format_desc);\r
- }\r
- else\r
- {\r
- draw(std::move(layers), draw_buffer, format_desc);\r
- }\r
-\r
- auto host_buffer = ogl_->create_host_buffer(static_cast<int>(format_desc.size), host_buffer::read_only);\r
- ogl_->attach(*draw_buffer);\r
- ogl_->read_buffer(*draw_buffer);\r
- host_buffer->begin_read(draw_buffer->width(), draw_buffer->height(), format(draw_buffer->stride()));\r
- \r
- return host_buffer;\r
+ \r
+ auto result = ogl_->create_host_buffer(static_cast<int>(format_desc.size), host_buffer::usage::read_only);\r
+ draw_buffer->copy_async_to(result); \r
+ return result;\r
+ });\r
}\r
\r
+private:\r
+\r
void draw(std::vector<layer>&& layers, \r
safe_ptr<device_buffer>& draw_buffer, \r
const video_format_desc& format_desc)\r
\r
auto image = image_mixer_(format_desc_);\r
auto audio = audio_mixer_(format_desc_);\r
- image.wait();\r
\r
graph_->set_value("mix-time", mix_timer_.elapsed()*format_desc_.fps*0.5);\r
\r
- target_->send(std::make_pair(make_safe<read_frame>(ogl_, format_desc_.width, format_desc_.height, std::move(image.get()), std::move(audio)), packet.second)); \r
+ target_->send(std::make_pair(make_safe<read_frame>(format_desc_.width, format_desc_.height, std::move(image), std::move(audio)), packet.second)); \r
}\r
catch(...)\r
{\r
#include "read_frame.h"\r
\r
#include "gpu/host_buffer.h" \r
-#include "gpu/ogl_device.h"\r
\r
#include <tbb/mutex.h>\r
\r
\r
struct read_frame::impl : boost::noncopyable\r
{\r
- safe_ptr<ogl_device> ogl_;\r
- int width_;\r
- int height_;\r
- safe_ptr<host_buffer> image_data_;\r
- tbb::mutex mutex_;\r
- audio_buffer audio_data_;\r
+ int width_;\r
+ int height_;\r
+ boost::unique_future<safe_ptr<host_buffer>> image_data_;\r
+ audio_buffer audio_data_;\r
\r
public:\r
- impl(const safe_ptr<ogl_device>& ogl, int width, int height, safe_ptr<host_buffer>&& image_data, audio_buffer&& audio_data) \r
- : ogl_(ogl)\r
- , width_(width)\r
+ impl( int width, int height, boost::unique_future<safe_ptr<host_buffer>>&& image_data, audio_buffer&& audio_data) \r
+ : width_(width)\r
, height_(height)\r
, image_data_(std::move(image_data))\r
, audio_data_(std::move(audio_data)){} \r
\r
const boost::iterator_range<const uint8_t*> image_data()\r
{\r
- {\r
- tbb::mutex::scoped_lock lock(mutex_);\r
-\r
- if(!image_data_->data()) \r
- ogl_->invoke([=]{image_data_.get()->map();}, high_priority); \r
- }\r
-\r
- auto ptr = static_cast<const uint8_t*>(image_data_->data());\r
- return boost::iterator_range<const uint8_t*>(ptr, ptr + image_data_->size());\r
+ auto ptr = static_cast<const uint8_t*>(image_data_.get()->data());\r
+ return boost::iterator_range<const uint8_t*>(ptr, ptr + image_data_.get()->size());\r
}\r
const boost::iterator_range<const int32_t*> audio_data()\r
{\r
}\r
};\r
\r
-read_frame::read_frame(const safe_ptr<ogl_device>& ogl, int width, int height, safe_ptr<host_buffer>&& image_data, audio_buffer&& audio_data) \r
- : impl_(new impl(ogl, width, height, std::move(image_data), std::move(audio_data))){}\r
+read_frame::read_frame(int width, int height, boost::unique_future<safe_ptr<host_buffer>>&& image_data, audio_buffer&& audio_data) \r
+ : impl_(new impl(width, height, std::move(image_data), std::move(audio_data))){}\r
read_frame::read_frame(){}\r
const boost::iterator_range<const uint8_t*> read_frame::image_data()\r
{\r
#pragma once\r
\r
#include <common/memory/safe_ptr.h>\r
+#include <common/forward.h>\r
\r
#include <core/mixer/audio/audio_mixer.h>\r
\r
#include <memory>\r
#include <vector>\r
\r
+FORWARD1(boost, template<typename> class unique_future);\r
+\r
namespace caspar { namespace core {\r
\r
class read_frame sealed : boost::noncopyable\r
{\r
public:\r
read_frame();\r
- read_frame(const safe_ptr<class ogl_device>& ogl, int width, int height, safe_ptr<class host_buffer>&& image_data, audio_buffer&& audio_data);\r
+ read_frame(int width, int height, boost::unique_future<safe_ptr<class host_buffer>>&& image_data, audio_buffer&& audio_data);\r
\r
const boost::iterator_range<const uint8_t*> image_data();\r
const boost::iterator_range<const int32_t*> audio_data();\r
{\r
}\r
\r
- impl(const safe_ptr<ogl_device>& ogl, const void* tag, const core::pixel_format_desc& desc) \r
+ impl(const safe_ptr<ogl_device>& ogl, const void* tag, const core::pixel_format_desc& desc, const field_mode& mode) \r
: ogl_(ogl)\r
, desc_(desc)\r
, tag_(tag)\r
- , mode_(core::field_mode::progressive)\r
+ , mode_(mode)\r
{\r
std::transform(desc.planes.begin(), desc.planes.end(), std::back_inserter(buffers_), [&](const core::pixel_format_desc::plane& plane)\r
{\r
- return ogl_->create_host_buffer(plane.size, host_buffer::write_only);\r
+ return ogl_->create_host_buffer(plane.size, host_buffer::usage::write_only);\r
});\r
std::transform(desc.planes.begin(), desc.planes.end(), std::back_inserter(textures_), [&](const core::pixel_format_desc::plane& plane)\r
{\r
\r
if(!buffer)\r
return;\r
-\r
- auto texture = textures_.at(plane_index);\r
- \r
- ogl_->begin_invoke([=]\r
- { \r
- buffer->unmap();\r
- buffer->bind();\r
- texture->begin_read();\r
- buffer->unbind();\r
- }, high_priority);\r
+ \r
+ textures_.at(plane_index)->copy_async_from(make_safe_ptr(buffer));\r
}\r
};\r
\r
write_frame::write_frame(const void* tag) : impl_(new impl(tag)){}\r
-write_frame::write_frame(const safe_ptr<ogl_device>& ogl, const void* tag, const core::pixel_format_desc& desc) \r
- : impl_(new impl(ogl, tag, desc)){}\r
+write_frame::write_frame(const safe_ptr<ogl_device>& ogl, const void* tag, const core::pixel_format_desc& desc, const field_mode& mode) \r
+ : impl_(new impl(ogl, tag, desc, mode)){}\r
write_frame::write_frame(write_frame&& other) : impl_(std::move(other.impl_)){}\r
write_frame& write_frame::operator=(write_frame&& other)\r
{\r
const std::vector<safe_ptr<device_buffer>>& write_frame::get_textures() const{return impl_->textures_;}\r
void write_frame::commit(int plane_index){impl_->commit(plane_index);}\r
void write_frame::commit(){impl_->commit();}\r
-void write_frame::set_type(const field_mode& mode){impl_->mode_ = mode;}\r
core::field_mode write_frame::get_type() const{return impl_->mode_;}\r
void write_frame::accept(core::frame_visitor& visitor){impl_->accept(*this, visitor);}\r
\r
{\r
public: \r
explicit write_frame(const void* tag);\r
- explicit write_frame(const safe_ptr<class ogl_device>& ogl, const void* tag, const struct pixel_format_desc& desc);\r
+ explicit write_frame(const safe_ptr<class ogl_device>& ogl, const void* tag, const struct pixel_format_desc& desc, const field_mode& mode = field_mode::progressive);\r
\r
write_frame(write_frame&& other);\r
write_frame& operator=(write_frame&& other);\r
void commit(int plane_index);\r
void commit();\r
\r
- void set_type(const field_mode& mode);\r
field_mode get_type() const;\r
\r
const void* tag() const;\r
{\r
frame_factory(){}\r
\r
- virtual safe_ptr<class write_frame> create_frame(const void* video_stream_tag, const struct pixel_format_desc& desc) = 0; \r
+ virtual safe_ptr<class write_frame> create_frame(const void* video_stream_tag, const struct pixel_format_desc& desc, field_mode mode = field_mode::progressive) = 0; \r
virtual struct video_format_desc get_video_format_desc() const = 0; // nothrow\r
};\r
\r
\r
// frame_factory\r
\r
- virtual safe_ptr<write_frame> create_frame(const void* tag, const core::pixel_format_desc& desc) override\r
+ virtual safe_ptr<write_frame> create_frame(const void* tag, const core::pixel_format_desc& desc, field_mode mode) override\r
{ \r
- return make_safe<write_frame>(ogl_, tag, desc);\r
+ return make_safe<write_frame>(ogl_, tag, desc, mode);\r
}\r
\r
virtual core::video_format_desc get_video_format_desc() const override\r
\r
auto target_desc = get_pixel_format_desc(target_pix_fmt, width, height);\r
\r
- write = frame_factory->create_frame(tag, target_desc);\r
- write->set_type(get_mode(*decoded_frame));\r
+ write = frame_factory->create_frame(tag, target_desc, get_mode(*decoded_frame));\r
\r
std::shared_ptr<SwsContext> sws_context;\r
\r
}\r
else\r
{\r
- write = frame_factory->create_frame(tag, desc);\r
- write->set_type(get_mode(*decoded_frame));\r
+ write = frame_factory->create_frame(tag, desc, get_mode(*decoded_frame));\r
\r
for(int n = 0; n < static_cast<int>(desc.planes.size()); ++n)\r
{\r
<data-path>D:\casparcg\_data\</data-path>\r
<template-path>D:\casparcg\_templates\</template-path>\r
</paths>\r
+ <blend-modes>true</blend-modes>\r
<log-level>trace</log-level>\r
- <channel-grid>true</channel-grid>\r
<channels>\r
<channel>\r
<video-mode>720p5000</video-mode>\r
<consumers>\r
- </consumers>\r
- </channel>\r
- <channel>\r
- <video-mode>720p5000</video-mode>\r
- <consumers>\r
- </consumers>\r
- </channel>\r
- <channel>\r
- <video-mode>720p5000</video-mode>\r
- <consumers>\r
+ <screen>\r
+ <device>2</device>\r
+ </screen>\r
</consumers>\r
</channel>\r
</channels>\r