<ClInclude Include="os\windows\current_version.h" />\r
<ClInclude Include="os\windows\system_info.h" />\r
<ClInclude Include="enum_class.h" />\r
- <ClInclude Include="scope_guard.h" />\r
<ClInclude Include="stdafx.h" />\r
<ClInclude Include="utf.h" />\r
<ClInclude Include="prec_timer.h" />\r
<ClInclude Include="assert.h">\r
<Filter>source</Filter>\r
</ClInclude>\r
- <ClInclude Include="scope_guard.h">\r
- <Filter>source</Filter>\r
- </ClInclude>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
return begin_invoke(std::forward<Func>(func), prioriy).get();\r
}\r
\r
- bool yield() // noexcept\r
+ void yield() // noexcept\r
{\r
if(boost::this_thread::get_id() != thread_.get_id()) // Only yield when calling from execution thread.\r
- return false;\r
+ return;\r
\r
std::function<void()> func;\r
while(execution_queue_[high_priority].try_pop(func))\r
if(func)\r
func();\r
} \r
-\r
- if(!func)\r
- {\r
- execution_queue_[normal_priority].try_pop(func);\r
- if(func)\r
- func();\r
- }\r
-\r
- return func != nullptr;\r
}\r
\r
function_queue::size_type capacity() const /*noexcept*/ { return execution_queue_[normal_priority].capacity(); }\r
// Get the last error\r
GLenum LastErrorCode = GL_NO_ERROR;\r
\r
- //for(GLenum ErrorCode = glGetError(); ErrorCode != GL_NO_ERROR; ErrorCode = glGetError())\r
- //{\r
- // CASPAR_LOG(error) << "OpenGL Error: " << ErrorCode << L" " << glewGetErrorString(ErrorCode);\r
- // LastErrorCode = ErrorCode;\r
- //}\r
+ for(GLenum ErrorCode = glGetError(); ErrorCode != GL_NO_ERROR; ErrorCode = glGetError())\r
+ {\r
+ CASPAR_LOG(error) << "OpenGL Error: " << ErrorCode << L" " << glewGetErrorString(ErrorCode);\r
+ LastErrorCode = ErrorCode;\r
+ }\r
\r
if (LastErrorCode != GL_NO_ERROR)\r
{\r
+++ /dev/null
-#pragma once\r
-\r
-namespace caspar {\r
-\r
-template<typename F>\r
-class scope_guard\r
-{\r
- F func_;\r
-public:\r
-\r
- template<typename U>\r
- scope_guard(U&& func)\r
- : func_(std::forward<U>(func))\r
- {\r
- }\r
-\r
- ~scope_guard()\r
- {\r
- func_();\r
- }\r
-};\r
-\r
-template<typename F>\r
-scope_guard<F> make_scope_guard(F&& func)\r
-{\r
- return scope_guard<F>(std::forward<F>(func));\r
-}\r
-\r
-}
\ No newline at end of file
#include <common/memory/safe_ptr.h>\r
#include <common/exception/exceptions.h>\r
#include <core/video_format.h>\r
-#include <core/mixer/data_frame.h>\r
+#include <core/mixer/read_frame.h>\r
\r
#include <boost/circular_buffer.hpp>\r
\r
consumer_->initialize(format_desc, channel_index);\r
}\r
\r
- virtual bool send(const safe_ptr<data_frame>& frame) override\r
+ virtual bool send(const safe_ptr<read_frame>& frame) override\r
{ \r
if(audio_cadence_.size() == 1)\r
return consumer_->send(frame);\r
{\r
struct empty_frame_consumer : public frame_consumer\r
{\r
- virtual bool send(const safe_ptr<data_frame>&) override {return false;}\r
+ virtual bool send(const safe_ptr<read_frame>&) override {return false;}\r
virtual void initialize(const video_format_desc&, int) override{}\r
virtual std::wstring print() const override {return L"empty";}\r
virtual bool has_synchronization_clock() const override {return false;}\r
frame_consumer(){}\r
virtual ~frame_consumer() {}\r
\r
- virtual bool send(const safe_ptr<class data_frame>& frame) = 0;\r
+ virtual bool send(const safe_ptr<class read_frame>& frame) = 0;\r
virtual void initialize(const struct video_format_desc& format_desc, int channel_index) = 0;\r
virtual std::wstring print() const = 0;\r
virtual boost::property_tree::wptree info() const = 0;\r
\r
#include "../video_format.h"\r
#include "../mixer/gpu/ogl_device.h"\r
-#include "../mixer/data_frame.h"\r
+#include "../mixer/read_frame.h"\r
\r
#include <common/concurrency/executor.h>\r
#include <common/diagnostics/graph.h>\r
\r
prec_timer sync_timer_;\r
\r
- boost::circular_buffer<safe_ptr<data_frame>> frames_;\r
+ boost::circular_buffer<safe_ptr<read_frame>> frames_;\r
\r
executor executor_;\r
\r
return boost::range::count_if(consumers_ | boost::adaptors::map_values, [](const safe_ptr<frame_consumer>& x){return x->has_synchronization_clock();}) > 0;\r
}\r
\r
- void send(const std::pair<safe_ptr<data_frame>, std::shared_ptr<void>>& packet)\r
+ void send(const std::pair<safe_ptr<read_frame>, std::shared_ptr<void>>& packet)\r
{\r
executor_.begin_invoke([=]\r
{\r
void output::add(const safe_ptr<frame_consumer>& consumer){impl_->add(consumer);}\r
void output::remove(int index){impl_->remove(index);}\r
void output::remove(const safe_ptr<frame_consumer>& consumer){impl_->remove(consumer);}\r
-void output::send(const std::pair<safe_ptr<data_frame>, std::shared_ptr<void>>& frame) {impl_->send(frame); }\r
+void output::send(const std::pair<safe_ptr<read_frame>, std::shared_ptr<void>>& frame) {impl_->send(frame); }\r
void output::set_video_format_desc(const video_format_desc& format_desc){impl_->set_video_format_desc(format_desc);}\r
boost::unique_future<boost::property_tree::wptree> output::info() const{return impl_->info();}\r
}}
\ No newline at end of file
\r
namespace caspar { namespace core {\r
\r
-class output sealed : public target<std::pair<safe_ptr<class data_frame>, std::shared_ptr<void>>>\r
+class output sealed : public target<std::pair<safe_ptr<class read_frame>, std::shared_ptr<void>>>\r
, boost::noncopyable\r
{\r
public:\r
\r
// target\r
\r
- virtual void send(const std::pair<safe_ptr<class data_frame>, std::shared_ptr<void>>& frame) override;\r
+ virtual void send(const std::pair<safe_ptr<class read_frame>, std::shared_ptr<void>>& frame) override;\r
\r
// output\r
\r
<ClInclude Include="mixer\gpu\ogl_device.h" />\r
<ClInclude Include="mixer\image\image_kernel.h" />\r
<ClInclude Include="mixer\image\image_mixer.h" />\r
- <ClInclude Include="mixer\data_frame.h" />\r
- <ClInclude Include="mixer\device_frame.h" />\r
+ <ClInclude Include="mixer\read_frame.h" />\r
+ <ClInclude Include="mixer\write_frame.h" />\r
<ClInclude Include="producer\color\color_producer.h" />\r
<ClInclude Include="producer\frame\basic_frame.h" />\r
<ClInclude Include="producer\frame\frame_factory.h" />\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">../../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|x64'">../../StdAfx.h</PrecompiledHeaderFile>\r
</ClCompile>\r
- <ClCompile Include="mixer\data_frame.cpp">\r
+ <ClCompile Include="mixer\read_frame.cpp">\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|x64'">../StdAfx.h</PrecompiledHeaderFile>\r
</ClCompile>\r
- <ClCompile Include="mixer\device_frame.cpp">\r
+ <ClCompile Include="mixer\write_frame.cpp">\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|x64'">../StdAfx.h</PrecompiledHeaderFile>\r
</ClCompile>\r
<ClInclude Include="mixer\audio\audio_mixer.h">\r
<Filter>source\mixer\audio</Filter>\r
</ClInclude>\r
+ <ClInclude Include="mixer\read_frame.h">\r
+ <Filter>source\mixer</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="mixer\write_frame.h">\r
+ <Filter>source\mixer</Filter>\r
+ </ClInclude>\r
<ClInclude Include="StdAfx.h" />\r
<ClInclude Include="mixer\mixer.h">\r
<Filter>source\mixer</Filter>\r
<ClInclude Include="producer\playlist\playlist_producer.h">\r
<Filter>source\producer\playlist</Filter>\r
</ClInclude>\r
+ <ClInclude Include="mixer\image\shader\image_shader.h">\r
+ <Filter>source\mixer\image\shader</Filter>\r
+ </ClInclude>\r
<ClInclude Include="mixer\image\blend_modes.h">\r
<Filter>source\mixer\image</Filter>\r
</ClInclude>\r
<ClInclude Include="producer\separated\separated_producer.h">\r
<Filter>source\producer\separated</Filter>\r
</ClInclude>\r
- <ClInclude Include="mixer\device_frame.h">\r
- <Filter>source\mixer</Filter>\r
- </ClInclude>\r
- <ClInclude Include="mixer\data_frame.h">\r
- <Filter>source\mixer</Filter>\r
- </ClInclude>\r
- <ClInclude Include="mixer\image\shader\image_shader.h">\r
- <Filter>source\mixer\image\shader</Filter>\r
- </ClInclude>\r
</ItemGroup>\r
<ItemGroup>\r
<ClCompile Include="producer\transition\transition_producer.cpp">\r
<ClCompile Include="producer\separated\separated_producer.cpp">\r
<Filter>source\producer\separated</Filter>\r
</ClCompile>\r
+ <ClCompile Include="mixer\read_frame.cpp">\r
+ <Filter>source\mixer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="mixer\write_frame.cpp">\r
+ <Filter>source\mixer</Filter>\r
+ </ClCompile>\r
<ClCompile Include="StdAfx.cpp" />\r
<ClCompile Include="mixer\mixer.cpp">\r
<Filter>source\mixer</Filter>\r
<ClCompile Include="producer\channel\channel_producer.cpp">\r
<Filter>source\producer\channel</Filter>\r
</ClCompile>\r
- <ClCompile Include="mixer\device_frame.cpp">\r
- <Filter>source\mixer</Filter>\r
- </ClCompile>\r
- <ClCompile Include="mixer\data_frame.cpp">\r
- <Filter>source\mixer</Filter>\r
- </ClCompile>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
\r
#include "audio_mixer.h"\r
\r
-#include <core/mixer/device_frame.h>\r
+#include <core/mixer/write_frame.h>\r
#include <core/producer/frame/frame_transform.h>\r
#include <common/diagnostics/graph.h>\r
\r
transform_stack_.push(transform_stack_.top()*frame.get_frame_transform());\r
}\r
\r
- void visit(core::device_frame& frame)\r
+ void visit(core::write_frame& frame)\r
{\r
if(transform_stack_.top().volume < 0.002 || frame.audio_data().empty())\r
return;\r
\r
audio_mixer::audio_mixer(const safe_ptr<diagnostics::graph>& graph) : impl_(new impl(graph)){}\r
void audio_mixer::begin(core::basic_frame& frame){impl_->begin(frame);}\r
-void audio_mixer::visit(core::device_frame& frame){impl_->visit(frame);}\r
+void audio_mixer::visit(core::write_frame& frame){impl_->visit(frame);}\r
void audio_mixer::end(){impl_->end();}\r
audio_buffer audio_mixer::operator()(const video_format_desc& format_desc){return impl_->mix(format_desc);}\r
\r
audio_mixer(const safe_ptr<diagnostics::graph>& graph);\r
\r
virtual void begin(core::basic_frame& frame);\r
- virtual void visit(core::device_frame& frame);\r
+ virtual void visit(core::write_frame& frame);\r
virtual void end();\r
\r
audio_buffer operator()(const struct video_format_desc& format_desc);\r
+++ /dev/null
-/*\r
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>\r
-*\r
-* This file is part of CasparCG (www.casparcg.com).\r
-*\r
-* CasparCG is free software: you can redistribute it and/or modify\r
-* it under the terms of the GNU General Public License as published by\r
-* the Free Software Foundation, either version 3 of the License, or\r
-* (at your option) any later version.\r
-*\r
-* CasparCG is distributed in the hope that it will be useful,\r
-* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
-* GNU General Public License for more details.\r
-*\r
-* You should have received a copy of the GNU General Public License\r
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
-*\r
-* Author: Robert Nagy, ronag89@gmail.com\r
-*/\r
-\r
-#include "../stdafx.h"\r
-\r
-#include "device_frame.h"\r
-\r
-#include "gpu/ogl_device.h"\r
-#include "gpu/host_buffer.h"\r
-#include "gpu/device_buffer.h"\r
-\r
-#include <core/producer/frame/frame_visitor.h>\r
-#include <core/producer/frame/pixel_format.h>\r
-#include <common/scope_guard.h>\r
-#include <common/gl/gl_check.h>\r
-\r
-#include <boost/lexical_cast.hpp>\r
-\r
-namespace caspar { namespace core {\r
- \r
-struct device_frame::impl : boost::noncopyable\r
-{ \r
- std::vector<boost::shared_future<safe_ptr<device_buffer>>> textures_;\r
- audio_buffer audio_data_;\r
- const core::pixel_format_desc desc_;\r
- const void* tag_;\r
- core::field_mode mode_;\r
-\r
- impl(const void* tag)\r
- : tag_(tag)\r
- , mode_(core::field_mode::empty)\r
- {\r
- }\r
-\r
- impl(std::vector<boost::unique_future<safe_ptr<device_buffer>>>&& textures, const void* tag, const core::pixel_format_desc& desc, field_mode type) \r
- : desc_(desc)\r
- , tag_(tag)\r
- , mode_(type)\r
- {\r
- BOOST_FOREACH(auto& texture, textures)\r
- textures_.push_back(std::move(texture));\r
- }\r
-\r
- impl(boost::unique_future<safe_ptr<device_buffer>>&& texture, const void* tag, const core::pixel_format_desc& desc, field_mode type) \r
- : desc_(desc)\r
- , tag_(tag)\r
- , mode_(type)\r
- {\r
- textures_.push_back(std::move(texture));\r
- }\r
- \r
- void accept(device_frame& self, core::frame_visitor& visitor)\r
- {\r
- visitor.begin(self);\r
- visitor.visit(self);\r
- visitor.end();\r
- }\r
-};\r
- \r
-\r
-device_frame::device_frame(const void* tag) : impl_(new impl(tag)){}\r
-device_frame::device_frame(std::vector<future_texture>&& textures, const void* tag, const struct pixel_format_desc& desc, field_mode type)\r
- : impl_(new impl(std::move(textures), tag, desc, type)){}\r
-device_frame::device_frame(future_texture&& texture, const void* tag, const struct pixel_format_desc& desc, field_mode type)\r
- : impl_(new impl(std::move(texture), tag, desc, type)){}\r
-device_frame::device_frame(device_frame&& other) : impl_(std::move(other.impl_)){}\r
-device_frame& device_frame::operator=(device_frame&& other)\r
-{\r
- device_frame temp(std::move(other));\r
- temp.swap(*this);\r
- return *this;\r
-}\r
-void device_frame::swap(device_frame& other){impl_.swap(other.impl_);}\r
-audio_buffer& device_frame::audio_data() { return impl_->audio_data_; }\r
-const void* device_frame::tag() const {return impl_->tag_;}\r
-const core::pixel_format_desc& device_frame::get_pixel_format_desc() const{return impl_->desc_;}\r
-const std::vector<boost::shared_future<safe_ptr<device_buffer>>>& device_frame::get_textures() const{return impl_->textures_;}\r
-core::field_mode device_frame::get_type() const{return impl_->mode_;}\r
-void device_frame::accept(core::frame_visitor& visitor){impl_->accept(*this, visitor);}\r
-\r
-}}
\ No newline at end of file
const int width_;\r
const int height_;\r
const int stride_;\r
+\r
+ fence fence_;\r
public:\r
impl(int width, int height, int stride) \r
: width_(width)\r
CASPAR_LOG_CURRENT_EXCEPTION();\r
}\r
}\r
+ \r
+ void bind()\r
+ {\r
+ GL(glBindTexture(GL_TEXTURE_2D, id_));\r
+ }\r
+\r
+ void bind(int index)\r
+ {\r
+ GL(glActiveTexture(GL_TEXTURE0+index));\r
+ bind();\r
+ }\r
+\r
+ void unbind()\r
+ {\r
+ GL(glBindTexture(GL_TEXTURE_2D, 0));\r
+ }\r
+\r
+ void begin_read()\r
+ {\r
+ bind();\r
+ GL(glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width_, height_, FORMAT[stride_], GL_UNSIGNED_BYTE, NULL));\r
+ unbind();\r
+ fence_.set();\r
+ }\r
+ \r
+ bool ready() const\r
+ {\r
+ return fence_.ready();\r
+ }\r
};\r
\r
device_buffer::device_buffer(int width, int height, int stride) : impl_(new impl(width, height, stride)){}\r
int device_buffer::stride() const { return impl_->stride_; }\r
int device_buffer::width() const { return impl_->width_; }\r
int device_buffer::height() const { return impl_->height_; }\r
+void device_buffer::bind(int index){impl_->bind(index);}\r
+void device_buffer::unbind(){impl_->unbind();}\r
+void device_buffer::begin_read(){impl_->begin_read();}\r
+bool device_buffer::ready() const{return impl_->ready();}\r
int device_buffer::id() const{ return impl_->id_;}\r
\r
\r
int stride() const; \r
int width() const;\r
int height() const;\r
- \r
+ \r
+ void bind(int index);\r
+ void unbind();\r
+ \r
+ void begin_read();\r
+ bool ready() const;\r
private:\r
friend class ogl_device;\r
device_buffer(int width, int height, int stride);\r
\r
struct host_buffer::impl : boost::noncopyable\r
{ \r
- GLuint pbo_;\r
+ GLuint pbo_;\r
const int size_;\r
- void* data_;\r
- GLenum usage_;\r
- GLenum target_;\r
+ void* data_;\r
+ GLenum usage_;\r
+ GLenum target_;\r
+ fence fence_;\r
\r
public:\r
impl(int size, usage_t usage) \r
if(!data_)\r
BOOST_THROW_EXCEPTION(invalid_operation() << msg_info("Failed to map target_ OpenGL Pixel Buffer Object."));\r
}\r
- \r
+\r
+ void wait(ogl_device& ogl)\r
+ {\r
+ fence_.wait(ogl);\r
+ }\r
+\r
void unmap()\r
{\r
if(!data_)\r
{\r
GL(glBindBuffer(target_, 0));\r
}\r
+\r
+ void begin_read(int width, int height, GLuint format)\r
+ {\r
+ unmap();\r
+ bind();\r
+ GL(glReadPixels(0, 0, width, height, format, GL_UNSIGNED_BYTE, NULL));\r
+ unbind();\r
+ fence_.set();\r
+ }\r
+\r
+ bool ready() const\r
+ {\r
+ return fence_.ready();\r
+ }\r
};\r
\r
host_buffer::host_buffer(int size, usage_t usage) : impl_(new impl(size, usage)){}\r
void host_buffer::unmap(){impl_->unmap();}\r
void host_buffer::bind(){impl_->bind();}\r
void host_buffer::unbind(){impl_->unbind();}\r
+void host_buffer::begin_read(int width, int height, GLuint format){impl_->begin_read(width, height, format);}\r
int host_buffer::size() const { return impl_->size_; }\r
+bool host_buffer::ready() const{return impl_->ready();}\r
+void host_buffer::wait(ogl_device& ogl){impl_->wait(ogl);}\r
\r
}}
\ No newline at end of file
\r
const void* data() const;\r
void* data();\r
- int size() const;\r
+ int size() const; \r
\r
-private:\r
- friend class ogl_device;\r
- host_buffer(int size, usage_t usage);\r
- \r
- void map();\r
void bind();\r
void unbind();\r
+\r
+ void map();\r
void unmap();\r
+ \r
+ void begin_read(int width, int height, unsigned int format);\r
+ bool ready() const;\r
+ void wait(class ogl_device& ogl);\r
+private:\r
+ friend class ogl_device;\r
+ host_buffer(int size, usage_t usage);\r
\r
struct impl;\r
safe_ptr<impl> impl_;\r
#include "ogl_device.h"\r
\r
#include "shader.h"\r
-#include "fence.h"\r
\r
#include <common/exception/exceptions.h>\r
#include <common/gl/gl_check.h>\r
-#include <common/scope_guard.h>\r
\r
#include <common/assert.h>\r
#include <boost/foreach.hpp>\r
\r
-#include <asmlib.h>\r
-\r
#include <gl/glew.h>\r
\r
namespace caspar { namespace core {\r
\r
ogl_device::ogl_device() \r
: executor_(L"ogl_device")\r
+ , pattern_(nullptr)\r
+ , attached_texture_(0)\r
+ , active_shader_(0)\r
+ , read_buffer_(0)\r
{\r
- state_stack_.push(state());\r
-\r
CASPAR_LOG(info) << L"Initializing OpenGL Device.";\r
- \r
+\r
+ std::fill(binded_textures_.begin(), binded_textures_.end(), 0);\r
+ std::fill(viewport_.begin(), viewport_.end(), 0);\r
+ std::fill(scissor_.begin(), scissor_.end(), 0);\r
+ std::fill(blend_func_.begin(), blend_func_.end(), 0);\r
+ \r
invoke([=]\r
{\r
context_.reset(new sf::Context());\r
glBindFramebuffer(GL_FRAMEBUFFER, fbo_);\r
\r
CASPAR_LOG(info) << L"Successfully initialized OpenGL Device.";\r
-\r
- enable(GL_TEXTURE_2D);\r
});\r
}\r
\r
CASPAR_VERIFY(width > 0 && height > 0);\r
auto& pool = device_pools_[stride-1][((width << 16) & 0xFFFF0000) | (height & 0x0000FFFF)];\r
std::shared_ptr<device_buffer> buffer;\r
- \r
- if(!pool->items.try_pop(buffer)) \r
- buffer = executor_.invoke([&]{return allocate_device_buffer(width, height, stride);});\r
- \r
+ if(!pool->items.try_pop(buffer)) \r
+ buffer = executor_.invoke([&]{return allocate_device_buffer(width, height, stride);}, high_priority); \r
+ \r
+ //++pool->usage_count;\r
+\r
return safe_ptr<device_buffer>(buffer.get(), [=](device_buffer*) mutable\r
{ \r
- executor_.begin_invoke([=]\r
- {\r
- auto prev = attach(buffer->id());\r
- glClear(GL_COLOR_BUFFER_BIT);\r
- attach(prev);\r
- pool->items.push(buffer);\r
- }, high_priority); \r
+ pool->items.push(buffer); \r
});\r
}\r
\r
//}\r
}\r
\r
-bool ogl_device::yield()\r
+void ogl_device::yield()\r
{\r
- scoped_state scope(*this);\r
- return executor_.yield();\r
+ executor_.yield();\r
}\r
\r
boost::unique_future<void> ogl_device::gc()\r
return ver;\r
}\r
\r
-void ogl_device::push_state()\r
-{\r
- state_stack_.push(state_);\r
-}\r
-\r
-ogl_device::state ogl_device::pop_state()\r
-{\r
- if(state_stack_.size() <= 1)\r
- BOOST_THROW_EXCEPTION(invalid_operation());\r
-\r
- auto prev_state = state_stack_.top();\r
- state_stack_.pop();\r
- auto new_state = state_stack_.top();\r
- \r
- viewport(new_state.viewport);\r
- scissor(new_state.scissor);\r
- stipple_pattern(new_state.pattern);\r
- blend_func(new_state.blend_func);\r
- attach(new_state.attached_texture);\r
- use(new_state.active_shader);\r
- for(int n = 0; n < 16; ++n)\r
- bind(new_state.binded_textures[n], n);\r
-\r
- return prev_state;\r
-}\r
\r
void ogl_device::enable(GLenum cap)\r
{\r
}\r
}\r
\r
-void ogl_device::viewport(const std::array<GLint, 4>& viewport)\r
-{\r
- if(viewport != state_.viewport)\r
- { \r
- glViewport(viewport[0], viewport[1], viewport[2], viewport[3]);\r
- state_.viewport = viewport;\r
- }\r
-}\r
-\r
void ogl_device::viewport(int x, int y, int width, int height)\r
{\r
- std::array<int, 4> ar = {{x, y, width, height}};\r
- viewport(ar);\r
-}\r
-\r
-void ogl_device::scissor(const std::array<GLint, 4>& scissor)\r
-{\r
- if(scissor != state_.scissor)\r
+ if(x != viewport_[0] || y != viewport_[1] || width != viewport_[2] || height != viewport_[3])\r
{ \r
- if(scissor == state().scissor)\r
- {\r
- disable(GL_SCISSOR_TEST);\r
- }\r
- else\r
- {\r
- enable(GL_SCISSOR_TEST);\r
- glScissor(scissor[0], scissor[1], scissor[2], scissor[3]);\r
- state_.scissor = scissor;\r
- }\r
+ glViewport(x, y, width, height);\r
+ viewport_[0] = x;\r
+ viewport_[1] = y;\r
+ viewport_[2] = width;\r
+ viewport_[3] = height;\r
}\r
}\r
\r
void ogl_device::scissor(int x, int y, int width, int height)\r
{\r
- std::array<int, 4> ar = {{x, y, width, height}};\r
- scissor(ar);\r
+ if(x != scissor_[0] || y != scissor_[1] || width != scissor_[2] || height != scissor_[3])\r
+ { \r
+ glScissor(x, y, width, height);\r
+ scissor_[0] = x;\r
+ scissor_[1] = y;\r
+ scissor_[2] = width;\r
+ scissor_[3] = height;\r
+ }\r
}\r
\r
-void ogl_device::stipple_pattern(const std::array<GLubyte, 32*32>& pattern)\r
+void ogl_device::stipple_pattern(const GLubyte* pattern)\r
{\r
- if(pattern != state_.pattern)\r
- {\r
- if(pattern == state().pattern)\r
- disable(GL_POLYGON_STIPPLE);\r
- else\r
- {\r
- enable(GL_POLYGON_STIPPLE);\r
- glPolygonStipple(pattern.data());\r
- state_.pattern = pattern;\r
- }\r
+ if(pattern_ != pattern)\r
+ { \r
+ glPolygonStipple(pattern);\r
+ pattern_ = pattern;\r
}\r
}\r
\r
-GLint ogl_device::attach(GLint id)\r
+void ogl_device::attach(device_buffer& texture)\r
{ \r
- auto prev = state_.attached_texture;\r
- if(id != state_.attached_texture)\r
+ if(attached_texture_ != texture.id())\r
{\r
- GL(glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + 0, GL_TEXTURE_2D, id, 0));\r
- GL(glReadBuffer(GL_COLOR_ATTACHMENT0));\r
-\r
- state_.attached_texture = id;\r
+ GL(glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + 0, GL_TEXTURE_2D, texture.id(), 0));\r
+ attached_texture_ = texture.id();\r
}\r
- return prev;\r
}\r
\r
-void ogl_device::attach(const device_buffer& texture)\r
+void ogl_device::clear(device_buffer& texture)\r
{ \r
- attach(texture.id());\r
-}\r
-\r
-void ogl_device::bind(GLint id, int index)\r
-{\r
- if(id != state_.binded_textures[index])\r
- { \r
- GL(glActiveTexture(GL_TEXTURE0+index));\r
- glBindTexture(GL_TEXTURE_2D, id);\r
- }\r
+ attach(texture);\r
+ GL(glClear(GL_COLOR_BUFFER_BIT));\r
}\r
\r
-void ogl_device::blend_func(const std::array<GLint, 4>& func)\r
+void ogl_device::read_buffer(device_buffer&)\r
{\r
- if(state_.blend_func != func)\r
+ if(read_buffer_ != GL_COLOR_ATTACHMENT0)\r
{\r
- state def_state_;\r
- if(func == def_state_.blend_func)\r
- disable(GL_BLEND);\r
- else\r
- {\r
- enable(GL_BLEND);\r
- GL(glBlendFuncSeparate(func[0], func[1], func[2], func[3]));\r
- state_.blend_func = func;\r
- }\r
+ GL(glReadBuffer(GL_COLOR_ATTACHMENT0));\r
+ read_buffer_ = GL_COLOR_ATTACHMENT0;\r
}\r
}\r
\r
-void ogl_device::blend_func(int c1, int c2, int a1, int a2)\r
-{\r
- std::array<int, 4> ar = {c1, c2, a1, a2};\r
- blend_func(ar);\r
-}\r
-\r
-void ogl_device::blend_func(int c1, int c2)\r
-{\r
- blend_func(c1, c2, c1, c2);\r
-}\r
-\r
-void ogl_device::bind(const device_buffer& texture, int index)\r
-{\r
- //while(true)\r
- //{\r
- // if(!texture.ready()) \r
- // yield();\r
- // else\r
- // break;\r
- //}\r
-\r
- bind(texture.id(), index);\r
-}\r
-\r
-\r
-void ogl_device::use(GLint id)\r
+void ogl_device::use(shader& shader)\r
{\r
- if(id != state_.active_shader)\r
+ if(active_shader_ != shader.id())\r
{ \r
- GL(glUseProgramObjectARB(id)); \r
- state_.active_shader = id;\r
+ GL(glUseProgramObjectARB(shader.id())); \r
+ active_shader_ = shader.id();\r
}\r
}\r
\r
-void ogl_device::use(const shader& shader)\r
+void ogl_device::blend_func(int c1, int c2, int a1, int a2)\r
{\r
- use(shader.id());\r
-}\r
-\r
+ std::array<int, 4> func = {c1, c2, a1, a2};\r
\r
-boost::unique_future<safe_ptr<host_buffer>> ogl_device::transfer(const safe_ptr<device_buffer>& source)\r
-{ \r
- return begin_invoke([=]() -> safe_ptr<host_buffer>\r
+ if(blend_func_ != func)\r
{\r
- auto dest = create_host_buffer(source->width()*source->height()*source->stride(), host_buffer::read_only);\r
-\r
- {\r
- scoped_state scope(*this);\r
-\r
- attach(*source);\r
- \r
- dest->bind();\r
- GL(glReadPixels(0, 0, source->width(), source->height(), format(source->stride()), GL_UNSIGNED_BYTE, NULL));\r
-\r
- dest->unbind();\r
- }\r
- \r
- auto sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);\r
-\r
- flush();\r
- \r
- GLsizei length = 0;\r
- int values[] = {0};\r
- \r
- while(true)\r
- {\r
- GL(glGetSynciv(sync, GL_SYNC_STATUS, 1, &length, values));\r
-\r
- if(values[0] != GL_SIGNALED) \r
- {\r
- if(!yield())\r
- Sleep(2);\r
- }\r
- else\r
- break;\r
- }\r
-\r
- dest->map();\r
-\r
- return dest;\r
- });\r
-}\r
-\r
-boost::unique_future<safe_ptr<device_buffer>> ogl_device::transfer(const safe_ptr<host_buffer>& source, int width, int height, int stride)\r
-{ \r
- return begin_invoke([=]() -> safe_ptr<device_buffer>\r
- { \r
- auto dest = create_device_buffer(width, height, stride);\r
- \r
- scoped_state scope(*this);\r
-\r
- source->unmap();\r
- source->bind();\r
- \r
- const_cast<ogl_device*>(this)->bind(*dest, 0); // WORKAROUND: const-cast needed due to compiler bug?\r
- GL(glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, dest->width(), dest->height(), format(dest->stride()), GL_UNSIGNED_BYTE, NULL));\r
- \r
- source->unbind();\r
- \r
- return dest;\r
- });\r
-}\r
-\r
-ogl_device::state::state()\r
- : attached_texture(0)\r
- , active_shader(0)\r
-{\r
- binded_textures.assign(0);\r
- viewport.assign(std::numeric_limits<int>::max());\r
- scissor.assign(std::numeric_limits<int>::max());\r
- blend_func.assign(std::numeric_limits<int>::max());\r
- pattern.assign(0xFF);\r
-} \r
-\r
-ogl_device::state::state(const state& other)\r
-{\r
- A_memcpy(this, &other, sizeof(state));\r
+ blend_func_ = func;\r
+ GL(glBlendFuncSeparate(c1, c2, a1, a2));\r
+ }\r
}\r
\r
-ogl_device::state& ogl_device::state::operator=(const state& other)\r
+void ogl_device::blend_func(int c1, int c2)\r
{\r
- A_memcpy(this, &other, sizeof(state));\r
+ blend_func(c1, c2, c1, c2);\r
}\r
\r
}}\r
#include <boost/noncopyable.hpp>\r
\r
#include <array>\r
-#include <stack>\r
#include <unordered_map>\r
-#include <type_traits>\r
\r
FORWARD1(boost, template<typename> class unique_future);\r
\r
class ogl_device : public std::enable_shared_from_this<ogl_device>\r
, boost::noncopyable\r
{ \r
- __declspec(align(16)) struct state\r
- {\r
- std::array<GLubyte, 32*32> pattern;\r
- std::array<GLint, 16> binded_textures;\r
- std::array<GLint, 4> viewport;\r
- std::array<GLint, 4> scissor;\r
- std::array<GLint, 4> blend_func;\r
- GLint attached_texture;\r
- GLint active_shader;\r
- GLint padding[2];\r
-\r
- state(); \r
- state(const state& other);\r
- state& operator=(const state& other);\r
- };\r
- \r
- state state_;\r
- std::stack<state> state_stack_;\r
-\r
- std::map<GLenum, bool> caps_;\r
- void enable(GLenum cap);\r
- void disable(GLenum cap);\r
+ std::unordered_map<GLenum, bool> caps_;\r
+ std::array<int, 4> viewport_;\r
+ std::array<int, 4> scissor_;\r
+ const GLubyte* pattern_;\r
+ GLint attached_texture_;\r
+ GLint active_shader_;\r
+ std::array<GLint, 16> binded_textures_;\r
+ std::array<GLint, 4> blend_func_;\r
+ GLenum read_buffer_;\r
\r
std::unique_ptr<sf::Context> context_;\r
- GLuint fbo_;\r
\r
std::array<tbb::concurrent_unordered_map<int, safe_ptr<buffer_pool<device_buffer>>>, 4> device_pools_;\r
std::array<tbb::concurrent_unordered_map<int, safe_ptr<buffer_pool<host_buffer>>>, 2> host_pools_;\r
\r
+ unsigned int fbo_;\r
+\r
executor executor_;\r
\r
ogl_device();\r
-\r
- void use(GLint id);\r
- GLint attach(GLint id);\r
- void bind(GLint id, int index); \r
- void flush();\r
- \r
- friend class scoped_state;\r
- \r
public: \r
- void push_state();\r
- state pop_state();\r
- \r
static safe_ptr<ogl_device> create();\r
~ogl_device();\r
\r
// Not thread-safe, must be called inside of context\r
+ void enable(GLenum cap);\r
+ void disable(GLenum cap);\r
void viewport(int x, int y, int width, int height);\r
- void viewport(const std::array<GLint, 4>& ar);\r
void scissor(int x, int y, int width, int height);\r
- void scissor(const std::array<GLint, 4>& ar);\r
- void stipple_pattern(const std::array<GLubyte, 32*32>& pattern);\r
- \r
- void blend_func(const std::array<GLint, 4>& ar);\r
+ void stipple_pattern(const GLubyte* pattern);\r
+\r
+ void attach(device_buffer& texture);\r
+ void clear(device_buffer& texture);\r
+ \r
void blend_func(int c1, int c2, int a1, int a2);\r
void blend_func(int c1, int c2);\r
\r
- void use(const shader& shader);\r
+ void use(shader& shader);\r
\r
- void attach(const device_buffer& texture);\r
+ void read_buffer(device_buffer& texture);\r
\r
- void bind(const device_buffer& texture, int index);\r
+ void flush();\r
\r
// thread-afe\r
template<typename Func>\r
\r
safe_ptr<device_buffer> create_device_buffer(int width, int height, int stride);\r
safe_ptr<host_buffer> create_host_buffer(int size, host_buffer::usage_t usage);\r
-\r
- boost::unique_future<safe_ptr<host_buffer>> transfer(const safe_ptr<device_buffer>& source);\r
- boost::unique_future<safe_ptr<device_buffer>> transfer(const safe_ptr<host_buffer>& source, int width, int height, int stride);\r
\r
- bool yield();\r
+ void yield();\r
boost::unique_future<void> gc();\r
\r
std::wstring version();\r
safe_ptr<host_buffer> allocate_host_buffer(int size, host_buffer::usage_t usage);\r
};\r
\r
-class scoped_state\r
-{\r
- ogl_device& context_;\r
-public:\r
- scoped_state(ogl_device& context)\r
- : context_(context)\r
- {\r
- context_.push_state();\r
- }\r
-\r
- ~scoped_state()\r
- {\r
- context_.pop_state();\r
- }\r
-};\r
-\r
}}
\ No newline at end of file
\r
namespace caspar { namespace core {\r
\r
-std::array<GLubyte, 32*32> upper_pattern = {{\r
+GLubyte upper_pattern[] = {\r
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,\r
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,\r
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,\r
- 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}};\r
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00};\r
\r
-std::array<GLubyte, 32*32> lower_pattern = {{\r
+GLubyte lower_pattern[] = {\r
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, \r
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,\r
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,\r
- 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff}};\r
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff};\r
\r
struct image_kernel::impl : boost::noncopyable\r
{ \r
{\r
static const double epsilon = 0.001;\r
\r
- ogl_->invoke([&]\r
- { \r
- scoped_state scope(*ogl_);\r
+ CASPAR_ASSERT(params.pix_desc.planes.size() == params.textures.size());\r
\r
- CASPAR_ASSERT(params.pix_desc.planes.size() == params.textures.size());\r
+ if(params.textures.empty() || !params.background)\r
+ return;\r
\r
- if(params.textures.empty() || !params.background)\r
- return;\r
-\r
- if(params.transform.opacity < epsilon)\r
- return;\r
- \r
- // Bind textures\r
+ if(params.transform.opacity < epsilon)\r
+ return;\r
+ \r
+ if(!std::all_of(params.textures.begin(), params.textures.end(), std::mem_fn(&device_buffer::ready)))\r
+ {\r
+ CASPAR_LOG(trace) << L"[image_mixer] Performance warning. Host to device transfer not complete, GPU will be stalled";\r
+ ogl_->yield(); // Try to give it some more time.\r
+ } \r
+ \r
+ // Bind textures\r
\r
- for(int n = 0; n < params.textures.size(); ++n)\r
- ogl_->bind(*params.textures[n], n);\r
+ for(int n = 0; n < params.textures.size(); ++n)\r
+ params.textures[n]->bind(n);\r
\r
- if(params.local_key)\r
- ogl_->bind(*params.local_key, texture_id::local_key);\r
+ if(params.local_key)\r
+ params.local_key->bind(texture_id::local_key);\r
\r
- if(params.layer_key)\r
- ogl_->bind(*params.layer_key, texture_id::layer_key);\r
+ if(params.layer_key)\r
+ params.layer_key->bind(texture_id::layer_key);\r
\r
- // Setup shader\r
+ // Setup shader\r
\r
- ogl_->use(*shader_);\r
-\r
- shader_->set("plane[0]", texture_id::plane0);\r
- shader_->set("plane[1]", texture_id::plane1);\r
- shader_->set("plane[2]", texture_id::plane2);\r
- shader_->set("plane[3]", texture_id::plane3);\r
- shader_->set("local_key", texture_id::local_key);\r
- shader_->set("layer_key", texture_id::layer_key);\r
- shader_->set("is_hd", params.pix_desc.planes.at(0).height > 700 ? 1 : 0);\r
- shader_->set("has_local_key", params.local_key);\r
- shader_->set("has_layer_key", params.layer_key);\r
- shader_->set("pixel_format", params.pix_desc.pix_fmt.value()); \r
- shader_->set("opacity", params.transform.is_key ? 1.0 : params.transform.opacity); \r
+ ogl_->use(*shader_);\r
+\r
+ shader_->set("plane[0]", texture_id::plane0);\r
+ shader_->set("plane[1]", texture_id::plane1);\r
+ shader_->set("plane[2]", texture_id::plane2);\r
+ shader_->set("plane[3]", texture_id::plane3);\r
+ shader_->set("local_key", texture_id::local_key);\r
+ shader_->set("layer_key", texture_id::layer_key);\r
+ shader_->set("is_hd", params.pix_desc.planes.at(0).height > 700 ? 1 : 0);\r
+ shader_->set("has_local_key", params.local_key);\r
+ shader_->set("has_layer_key", params.layer_key);\r
+ shader_->set("pixel_format", params.pix_desc.pix_fmt.value()); \r
+ shader_->set("opacity", params.transform.is_key ? 1.0 : params.transform.opacity); \r
\r
- // Setup blend_func\r
+ // Setup blend_func\r
\r
- if(params.transform.is_key)\r
- params.blend_mode = blend_mode::normal;\r
-\r
- if(blend_modes_)\r
+ if(params.transform.is_key)\r
+ params.blend_mode = blend_mode::normal;\r
+\r
+ if(blend_modes_)\r
+ {\r
+ params.background->bind(6);\r
+\r
+ shader_->set("background", texture_id::background);\r
+ shader_->set("blend_mode", params.blend_mode.value());\r
+ shader_->set("keyer", params.keyer.value());\r
+ }\r
+ else\r
+ {\r
+ switch(params.keyer.value())\r
{\r
- ogl_->bind(*params.background, 6);\r
-\r
- shader_->set("background", texture_id::background);\r
- shader_->set("blend_mode", params.blend_mode.value());\r
- shader_->set("keyer", params.keyer.value());\r
- }\r
- else\r
- {\r
- switch(params.keyer.value())\r
- {\r
- case keyer::additive:\r
- ogl_->blend_func(GL_ONE, GL_ONE); \r
- break;\r
- case keyer::linear:\r
- default: \r
- ogl_->blend_func(GL_ONE, GL_ONE_MINUS_SRC_ALPHA); \r
- } \r
- }\r
-\r
- // Setup image-adjustements\r
+ case keyer::additive:\r
+ ogl_->blend_func(GL_ONE, GL_ONE); \r
+ break;\r
+ case keyer::linear:\r
+ default: \r
+ ogl_->blend_func(GL_ONE, GL_ONE_MINUS_SRC_ALPHA); \r
+ } \r
+ }\r
+\r
+ // Setup image-adjustements\r
\r
- if(params.transform.levels.min_input > epsilon ||\r
- params.transform.levels.max_input < 1.0-epsilon ||\r
- params.transform.levels.min_output > epsilon ||\r
- params.transform.levels.max_output < 1.0-epsilon ||\r
- std::abs(params.transform.levels.gamma - 1.0) > epsilon)\r
- {\r
- shader_->set("levels", true); \r
- shader_->set("min_input", params.transform.levels.min_input); \r
- shader_->set("max_input", params.transform.levels.max_input);\r
- shader_->set("min_output", params.transform.levels.min_output);\r
- shader_->set("max_output", params.transform.levels.max_output);\r
- shader_->set("gamma", params.transform.levels.gamma);\r
- }\r
- else\r
- shader_->set("levels", false); \r
-\r
- if(std::abs(params.transform.brightness - 1.0) > epsilon ||\r
- std::abs(params.transform.saturation - 1.0) > epsilon ||\r
- std::abs(params.transform.contrast - 1.0) > epsilon)\r
- {\r
- shader_->set("csb", true); \r
+ if(params.transform.levels.min_input > epsilon ||\r
+ params.transform.levels.max_input < 1.0-epsilon ||\r
+ params.transform.levels.min_output > epsilon ||\r
+ params.transform.levels.max_output < 1.0-epsilon ||\r
+ std::abs(params.transform.levels.gamma - 1.0) > epsilon)\r
+ {\r
+ shader_->set("levels", true); \r
+ shader_->set("min_input", params.transform.levels.min_input); \r
+ shader_->set("max_input", params.transform.levels.max_input);\r
+ shader_->set("min_output", params.transform.levels.min_output);\r
+ shader_->set("max_output", params.transform.levels.max_output);\r
+ shader_->set("gamma", params.transform.levels.gamma);\r
+ }\r
+ else\r
+ shader_->set("levels", false); \r
+\r
+ if(std::abs(params.transform.brightness - 1.0) > epsilon ||\r
+ std::abs(params.transform.saturation - 1.0) > epsilon ||\r
+ std::abs(params.transform.contrast - 1.0) > epsilon)\r
+ {\r
+ shader_->set("csb", true); \r
\r
- shader_->set("brt", params.transform.brightness); \r
- shader_->set("sat", params.transform.saturation);\r
- shader_->set("con", params.transform.contrast);\r
- }\r
- else\r
- shader_->set("csb", false); \r
+ shader_->set("brt", params.transform.brightness); \r
+ shader_->set("sat", params.transform.saturation);\r
+ shader_->set("con", params.transform.contrast);\r
+ }\r
+ else\r
+ shader_->set("csb", false); \r
\r
- // Setup interlacing\r
- \r
+ // Setup interlacing\r
+\r
+ if(params.transform.field_mode == core::field_mode::progressive) \r
+ ogl_->disable(GL_POLYGON_STIPPLE); \r
+ else \r
+ {\r
+ ogl_->enable(GL_POLYGON_STIPPLE);\r
+\r
if(params.transform.field_mode == core::field_mode::upper)\r
ogl_->stipple_pattern(upper_pattern);\r
else if(params.transform.field_mode == core::field_mode::lower)\r
ogl_->stipple_pattern(lower_pattern);\r
+ }\r
\r
- // Setup drawing area\r
+ // Setup drawing area\r
\r
- ogl_->viewport(0, 0, params.background->width(), params.background->height());\r
+ ogl_->viewport(0, 0, params.background->width(), params.background->height());\r
\r
- auto m_p = params.transform.clip_translation;\r
- auto m_s = params.transform.clip_scale;\r
+ auto m_p = params.transform.clip_translation;\r
+ auto m_s = params.transform.clip_scale;\r
\r
- bool scissor = m_p[0] > std::numeric_limits<double>::epsilon() || m_p[1] > std::numeric_limits<double>::epsilon() ||\r
- m_s[0] < (1.0 - std::numeric_limits<double>::epsilon()) || m_s[1] < (1.0 - std::numeric_limits<double>::epsilon());\r
+ bool scissor = m_p[0] > std::numeric_limits<double>::epsilon() || m_p[1] > std::numeric_limits<double>::epsilon() ||\r
+ m_s[0] < (1.0 - std::numeric_limits<double>::epsilon()) || m_s[1] < (1.0 - std::numeric_limits<double>::epsilon());\r
\r
- if(scissor)\r
- {\r
- double w = static_cast<double>(params.background->width());\r
- double h = static_cast<double>(params.background->height());\r
+ if(scissor)\r
+ {\r
+ double w = static_cast<double>(params.background->width());\r
+ double h = static_cast<double>(params.background->height());\r
\r
- ogl_->scissor(static_cast<int>(m_p[0]*w), static_cast<int>(m_p[1]*h), static_cast<int>(m_s[0]*w), static_cast<int>(m_s[1]*h));\r
- }\r
+ ogl_->enable(GL_SCISSOR_TEST);\r
+ ogl_->scissor(static_cast<int>(m_p[0]*w), static_cast<int>(m_p[1]*h), static_cast<int>(m_s[0]*w), static_cast<int>(m_s[1]*h));\r
+ }\r
\r
- auto f_p = params.transform.fill_translation;\r
- auto f_s = params.transform.fill_scale;\r
+ auto f_p = params.transform.fill_translation;\r
+ auto f_s = params.transform.fill_scale;\r
\r
- // Set render target\r
+ // Set render target\r
\r
- ogl_->attach(*params.background);\r
- \r
- // Draw \r
-\r
- glBegin(GL_QUADS);\r
- glMultiTexCoord2d(GL_TEXTURE0, 0.0, 0.0); glMultiTexCoord2d(GL_TEXTURE1, f_p[0] , f_p[1] ); glVertex2d( f_p[0] *2.0-1.0, f_p[1] *2.0-1.0);\r
- glMultiTexCoord2d(GL_TEXTURE0, 1.0, 0.0); glMultiTexCoord2d(GL_TEXTURE1, (f_p[0]+f_s[0]), f_p[1] ); glVertex2d((f_p[0]+f_s[0])*2.0-1.0, f_p[1] *2.0-1.0);\r
- glMultiTexCoord2d(GL_TEXTURE0, 1.0, 1.0); glMultiTexCoord2d(GL_TEXTURE1, (f_p[0]+f_s[0]), (f_p[1]+f_s[1])); glVertex2d((f_p[0]+f_s[0])*2.0-1.0, (f_p[1]+f_s[1])*2.0-1.0);\r
- glMultiTexCoord2d(GL_TEXTURE0, 0.0, 1.0); glMultiTexCoord2d(GL_TEXTURE1, f_p[0] , (f_p[1]+f_s[1])); glVertex2d( f_p[0] *2.0-1.0, (f_p[1]+f_s[1])*2.0-1.0);\r
- glEnd();\r
-\r
- // Sync background\r
-\r
- if(blend_modes_)\r
- {\r
- // http://www.opengl.org/registry/specs/NV/texture_barrier.txt\r
- // This allows us to use framebuffer (background) both as source and target while blending.\r
- glTextureBarrierNV(); \r
- }\r
- });\r
+ ogl_->attach(*params.background);\r
+ \r
+ // Draw\r
+ \r
+ glBegin(GL_QUADS);\r
+ glMultiTexCoord2d(GL_TEXTURE0, 0.0, 0.0); glMultiTexCoord2d(GL_TEXTURE1, f_p[0] , f_p[1] ); glVertex2d( f_p[0] *2.0-1.0, f_p[1] *2.0-1.0);\r
+ glMultiTexCoord2d(GL_TEXTURE0, 1.0, 0.0); glMultiTexCoord2d(GL_TEXTURE1, (f_p[0]+f_s[0]), f_p[1] ); glVertex2d((f_p[0]+f_s[0])*2.0-1.0, f_p[1] *2.0-1.0);\r
+ glMultiTexCoord2d(GL_TEXTURE0, 1.0, 1.0); glMultiTexCoord2d(GL_TEXTURE1, (f_p[0]+f_s[0]), (f_p[1]+f_s[1])); glVertex2d((f_p[0]+f_s[0])*2.0-1.0, (f_p[1]+f_s[1])*2.0-1.0);\r
+ glMultiTexCoord2d(GL_TEXTURE0, 0.0, 1.0); glMultiTexCoord2d(GL_TEXTURE1, f_p[0] , (f_p[1]+f_s[1])); glVertex2d( f_p[0] *2.0-1.0, (f_p[1]+f_s[1])*2.0-1.0);\r
+ glEnd();\r
+ \r
+ // Cleanup\r
+\r
+ ogl_->disable(GL_SCISSOR_TEST); \r
+ \r
+ params.textures.clear();\r
+ ogl_->yield(); // Return resources to pool as early as possible.\r
+\r
+ if(blend_modes_)\r
+ {\r
+ // http://www.opengl.org/registry/specs/NV/texture_barrier.txt\r
+ // This allows us to use framebuffer (background) both as source and target while blending.\r
+ glTextureBarrierNV(); \r
+ }\r
}\r
};\r
\r
#include "image_mixer.h"\r
\r
#include "image_kernel.h"\r
-#include "../device_frame.h"\r
+#include "../write_frame.h"\r
#include "../gpu/ogl_device.h"\r
#include "../gpu/host_buffer.h"\r
#include "../gpu/device_buffer.h"\r
#include <core/producer/frame/pixel_format.h>\r
#include <core/video_format.h>\r
\r
+#include <gl/glew.h>\r
+\r
#include <boost/foreach.hpp>\r
#include <boost/range/algorithm_ext/erase.hpp>\r
#include <boost/thread/future.hpp>\r
\r
struct item\r
{\r
- pixel_format_desc pix_desc;\r
- std::vector<boost::shared_future<safe_ptr<device_buffer>>> textures;\r
- frame_transform transform;\r
+ pixel_format_desc pix_desc;\r
+ std::vector<safe_ptr<device_buffer>> textures;\r
+ frame_transform transform;\r
};\r
\r
typedef std::pair<blend_mode, std::vector<item>> layer;\r
{\r
safe_ptr<ogl_device> ogl_;\r
image_kernel kernel_; \r
+ std::shared_ptr<device_buffer> transferring_buffer_;\r
public:\r
image_renderer(const safe_ptr<ogl_device>& ogl)\r
: ogl_(ogl)\r
\r
boost::unique_future<safe_ptr<host_buffer>> operator()(std::vector<layer>&& layers, const video_format_desc& format_desc)\r
{ \r
- auto draw_buffer = ogl_->create_device_buffer(format_desc.width, format_desc.height, 4);\r
- \r
+ auto layers2 = make_move_on_copy(std::move(layers));\r
+ return ogl_->begin_invoke([=]\r
+ {\r
+ return do_render(std::move(layers2.value), format_desc);\r
+ });\r
+ }\r
+\r
+private:\r
+ safe_ptr<host_buffer> do_render(std::vector<layer>&& layers, const video_format_desc& format_desc)\r
+ {\r
+ auto draw_buffer = create_mixer_buffer(4, format_desc);\r
+\r
if(format_desc.field_mode != field_mode::progressive)\r
{\r
auto upper = layers;\r
{\r
draw(std::move(layers), draw_buffer, format_desc);\r
}\r
- \r
- return ogl_->transfer(draw_buffer); \r
- }\r
\r
-private:\r
+ auto host_buffer = ogl_->create_host_buffer(static_cast<int>(format_desc.size), host_buffer::read_only);\r
+ ogl_->attach(*draw_buffer);\r
+ ogl_->read_buffer(*draw_buffer);\r
+ host_buffer->begin_read(draw_buffer->width(), draw_buffer->height(), format(draw_buffer->stride()));\r
+ \r
+ transferring_buffer_ = std::move(draw_buffer);\r
+\r
+ ogl_->flush(); // NOTE: This is important, otherwise fences will deadlock.\r
+ \r
+ return host_buffer;\r
+ }\r
\r
void draw(std::vector<layer>&& layers, \r
safe_ptr<device_buffer>& draw_buffer, \r
\r
if(layer.first != blend_mode::normal)\r
{\r
- auto layer_draw_buffer = ogl_->create_device_buffer(format_desc.width, format_desc.height, 4);\r
+ auto layer_draw_buffer = create_mixer_buffer(4, format_desc);\r
\r
BOOST_FOREACH(auto& item, layer.second)\r
draw_item(std::move(item), layer_draw_buffer, layer_key_buffer, local_key_buffer, local_mix_buffer, format_desc); \r
{ \r
draw_params draw_params;\r
draw_params.pix_desc = std::move(item.pix_desc);\r
-\r
- BOOST_FOREACH(auto& tex, item.textures)\r
- draw_params.textures.push_back(tex.get());\r
-\r
+ draw_params.textures = std::move(item.textures);\r
draw_params.transform = std::move(item.transform);\r
\r
if(item.transform.is_key)\r
{\r
- local_key_buffer = local_key_buffer ? local_key_buffer : ogl_->create_device_buffer(format_desc.width, format_desc.height, 4);\r
+ local_key_buffer = local_key_buffer ? local_key_buffer : create_mixer_buffer(1, format_desc);\r
\r
draw_params.background = local_key_buffer;\r
draw_params.local_key = nullptr;\r
}\r
else if(item.transform.is_mix)\r
{\r
- local_mix_buffer = local_mix_buffer ? local_mix_buffer : ogl_->create_device_buffer(format_desc.width, format_desc.height, 4);\r
+ local_mix_buffer = local_mix_buffer ? local_mix_buffer : create_mixer_buffer(4, format_desc);\r
\r
draw_params.background = local_mix_buffer;\r
draw_params.local_key = std::move(local_key_buffer);\r
\r
kernel_.draw(std::move(draw_params));\r
}\r
+ \r
+ safe_ptr<device_buffer> create_mixer_buffer(int stride, const video_format_desc& format_desc)\r
+ {\r
+ auto buffer = ogl_->create_device_buffer(format_desc.width, format_desc.height, stride);\r
+ ogl_->clear(*buffer);\r
+ return buffer;\r
+ }\r
};\r
\r
struct image_mixer::impl : boost::noncopyable\r
transform_stack_.push_back(transform_stack_.back()*frame.get_frame_transform());\r
}\r
\r
- void visit(device_frame& frame)\r
+ void visit(write_frame& frame)\r
{ \r
item item;\r
item.pix_desc = frame.get_pixel_format_desc();\r
\r
boost::unique_future<safe_ptr<host_buffer>> render(const video_format_desc& format_desc)\r
{\r
- auto result = renderer_(std::move(layers_), format_desc);\r
- layers_.clear();\r
- return std::move(result);\r
+ return renderer_(std::move(layers_), format_desc);\r
}\r
};\r
\r
image_mixer::image_mixer(const safe_ptr<ogl_device>& ogl) : impl_(new impl(ogl)){}\r
void image_mixer::begin(basic_frame& frame){impl_->begin(frame);}\r
-void image_mixer::visit(device_frame& frame){impl_->visit(frame);}\r
+void image_mixer::visit(write_frame& frame){impl_->visit(frame);}\r
void image_mixer::end(){impl_->end();}\r
boost::unique_future<safe_ptr<host_buffer>> image_mixer::operator()(const video_format_desc& format_desc){return impl_->render(format_desc);}\r
void image_mixer::begin_layer(blend_mode blend_mode){impl_->begin_layer(blend_mode);}\r
image_mixer(const safe_ptr<class ogl_device>& ogl);\r
\r
virtual void begin(class basic_frame& frame);\r
- virtual void visit(class device_frame& frame);\r
+ virtual void visit(class write_frame& frame);\r
virtual void end();\r
\r
void begin_layer(blend_mode blend_mode);\r
g_shader.reset(new shader(get_vertex(), get_fragment(g_blend_modes)));\r
}\r
\r
+ ogl.enable(GL_TEXTURE_2D);\r
+\r
if(!g_blend_modes)\r
{\r
- ogl.blend_func(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE);\r
+ ogl.enable(GL_BLEND);\r
+ glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE);\r
CASPAR_LOG(info) << L"[shader] Blend-modes are disabled.";\r
}\r
\r
\r
#include "mixer.h"\r
\r
-#include "data_frame.h"\r
-#include "device_frame.h"\r
+#include "read_frame.h"\r
+#include "write_frame.h"\r
\r
#include "audio/audio_mixer.h"\r
#include "image/image_mixer.h"\r
#include <common/gl/gl_check.h>\r
#include <common/utility/tweener.h>\r
\r
-#include <core/mixer/data_frame.h>\r
-#include <core/mixer/device_frame.h>\r
+#include <core/mixer/read_frame.h>\r
+#include <core/mixer/write_frame.h>\r
#include <core/producer/frame/basic_frame.h>\r
#include <core/producer/frame/frame_factory.h>\r
#include <core/producer/frame/frame_transform.h>\r
\r
auto image = image_mixer_(format_desc_);\r
auto audio = audio_mixer_(format_desc_);\r
+ image.wait();\r
\r
graph_->set_value("mix-time", mix_timer_.elapsed()*format_desc_.fps*0.5);\r
\r
- target_->send(std::make_pair(make_safe<data_frame>(format_desc_.width, format_desc_.height, std::move(image), std::move(audio)), packet.second)); \r
+ target_->send(std::make_pair(make_safe<read_frame>(ogl_, format_desc_.width, format_desc_.height, std::move(image.get()), std::move(audio)), packet.second)); \r
}\r
catch(...)\r
{\r
, boost::noncopyable\r
{\r
public: \r
- typedef target<std::pair<safe_ptr<class data_frame>, std::shared_ptr<void>>> target_t;\r
+ typedef target<std::pair<safe_ptr<class read_frame>, std::shared_ptr<void>>> target_t;\r
\r
explicit mixer(const safe_ptr<target_t>& target, const safe_ptr<diagnostics::graph>& graph, const struct video_format_desc& format_desc, const safe_ptr<class ogl_device>& ogl);\r
\r
\r
#include "../stdafx.h"\r
\r
-#include "data_frame.h"\r
+#include "read_frame.h"\r
\r
#include "gpu/fence.h"\r
#include "gpu/host_buffer.h" \r
#include "gpu/ogl_device.h"\r
\r
-#include <boost/thread/future.hpp>\r
+#include <tbb/mutex.h>\r
\r
namespace caspar { namespace core {\r
\r
-struct data_frame::impl : boost::noncopyable\r
+struct read_frame::impl : boost::noncopyable\r
{\r
- int width_;\r
- int height_;\r
- boost::unique_future<safe_ptr<host_buffer>> image_data_;\r
- audio_buffer audio_data_;\r
+ safe_ptr<ogl_device> ogl_;\r
+ int width_;\r
+ int height_;\r
+ safe_ptr<host_buffer> image_data_;\r
+ tbb::mutex mutex_;\r
+ audio_buffer audio_data_;\r
\r
public:\r
- impl(int width, int height, boost::unique_future<safe_ptr<host_buffer>>&& image_data, audio_buffer&& audio_data) \r
- : width_(width)\r
+ impl(const safe_ptr<ogl_device>& ogl, int width, int height, safe_ptr<host_buffer>&& image_data, audio_buffer&& audio_data) \r
+ : ogl_(ogl)\r
+ , width_(width)\r
, height_(height)\r
, image_data_(std::move(image_data))\r
, audio_data_(std::move(audio_data)){} \r
\r
const boost::iterator_range<const uint8_t*> image_data()\r
{\r
- auto ptr = static_cast<const uint8_t*>(image_data_.get()->data());\r
- return boost::iterator_range<const uint8_t*>(ptr, ptr + image_data_.get()->size());\r
+ {\r
+ tbb::mutex::scoped_lock lock(mutex_);\r
+\r
+ if(!image_data_->data())\r
+ {\r
+ image_data_.get()->wait(*ogl_);\r
+ ogl_->invoke([=]{image_data_.get()->map();}, high_priority);\r
+ }\r
+ }\r
+\r
+ auto ptr = static_cast<const uint8_t*>(image_data_->data());\r
+ return boost::iterator_range<const uint8_t*>(ptr, ptr + image_data_->size());\r
}\r
const boost::iterator_range<const int32_t*> audio_data()\r
{\r
}\r
};\r
\r
-data_frame::data_frame(int width, int height, boost::unique_future<safe_ptr<host_buffer>>&& image_data, audio_buffer&& audio_data) \r
- : impl_(new impl(width, height, std::move(image_data), std::move(audio_data))){}\r
-data_frame::data_frame(){}\r
-const boost::iterator_range<const uint8_t*> data_frame::image_data()\r
+read_frame::read_frame(const safe_ptr<ogl_device>& ogl, int width, int height, safe_ptr<host_buffer>&& image_data, audio_buffer&& audio_data) \r
+ : impl_(new impl(ogl, width, height, std::move(image_data), std::move(audio_data))){}\r
+read_frame::read_frame(){}\r
+const boost::iterator_range<const uint8_t*> read_frame::image_data()\r
{\r
return impl_ ? impl_->image_data() : boost::iterator_range<const uint8_t*>();\r
}\r
\r
-const boost::iterator_range<const int32_t*> data_frame::audio_data()\r
+const boost::iterator_range<const int32_t*> read_frame::audio_data()\r
{\r
return impl_ ? impl_->audio_data() : boost::iterator_range<const int32_t*>();\r
}\r
\r
-int data_frame::width() const{return impl_ ? impl_->width_ : 0;}\r
-int data_frame::height() const{return impl_ ? impl_->height_ : 0;}\r
+int read_frame::width() const{return impl_ ? impl_->width_ : 0;}\r
+int read_frame::height() const{return impl_ ? impl_->height_ : 0;}\r
\r
//#include <tbb/scalable_allocator.h>\r
//#include <tbb/parallel_for.h>\r
//\r
//void CopyFrame( void * pSrc, void * pDest, UINT width, UINT height, UINT pitch );\r
//\r
-//void* copy_frame(void* dest, const safe_ptr<data_frame>& frame)\r
+//void* copy_frame(void* dest, const safe_ptr<read_frame>& frame)\r
//{\r
// auto src = frame->image_data().begin();\r
// auto height = 720;\r
#pragma once\r
\r
#include <common/memory/safe_ptr.h>\r
-#include <common/forward.h>\r
\r
#include <core/mixer/audio/audio_mixer.h>\r
\r
#include <memory>\r
#include <vector>\r
\r
-FORWARD1(boost, template<typename> class unique_future);\r
-\r
namespace caspar { namespace core {\r
\r
-class data_frame sealed : boost::noncopyable\r
+class read_frame sealed : boost::noncopyable\r
{\r
public:\r
- data_frame();\r
- data_frame(int width, int height, boost::unique_future<safe_ptr<class host_buffer>>&& image_data, audio_buffer&& audio_data);\r
+ read_frame();\r
+ read_frame(const safe_ptr<class ogl_device>& ogl, int width, int height, safe_ptr<class host_buffer>&& image_data, audio_buffer&& audio_data);\r
\r
const boost::iterator_range<const uint8_t*> image_data();\r
const boost::iterator_range<const int32_t*> audio_data();\r
--- /dev/null
+/*\r
+* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>\r
+*\r
+* This file is part of CasparCG (www.casparcg.com).\r
+*\r
+* CasparCG is free software: you can redistribute it and/or modify\r
+* it under the terms of the GNU General Public License as published by\r
+* the Free Software Foundation, either version 3 of the License, or\r
+* (at your option) any later version.\r
+*\r
+* CasparCG is distributed in the hope that it will be useful,\r
+* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
+* GNU General Public License for more details.\r
+*\r
+* You should have received a copy of the GNU General Public License\r
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
+*\r
+* Author: Robert Nagy, ronag89@gmail.com\r
+*/\r
+\r
+#include "../stdafx.h"\r
+\r
+#include "write_frame.h"\r
+\r
+#include "gpu/ogl_device.h"\r
+#include "gpu/host_buffer.h"\r
+#include "gpu/device_buffer.h"\r
+\r
+#include <core/producer/frame/frame_visitor.h>\r
+#include <core/producer/frame/pixel_format.h>\r
+\r
+#include <boost/lexical_cast.hpp>\r
+\r
+namespace caspar { namespace core {\r
+ \r
+struct write_frame::impl : boost::noncopyable\r
+{ \r
+ std::shared_ptr<ogl_device> ogl_;\r
+ std::vector<std::shared_ptr<host_buffer>> buffers_;\r
+ std::vector<safe_ptr<device_buffer>> textures_;\r
+ audio_buffer audio_data_;\r
+ const core::pixel_format_desc desc_;\r
+ const void* tag_;\r
+ core::field_mode mode_;\r
+\r
+ impl(const void* tag)\r
+ : tag_(tag)\r
+ , mode_(core::field_mode::empty)\r
+ {\r
+ }\r
+\r
+ impl(const safe_ptr<ogl_device>& ogl, const void* tag, const core::pixel_format_desc& desc) \r
+ : ogl_(ogl)\r
+ , desc_(desc)\r
+ , tag_(tag)\r
+ , mode_(core::field_mode::progressive)\r
+ {\r
+ std::transform(desc.planes.begin(), desc.planes.end(), std::back_inserter(buffers_), [&](const core::pixel_format_desc::plane& plane)\r
+ {\r
+ return ogl_->create_host_buffer(plane.size, host_buffer::write_only);\r
+ });\r
+ std::transform(desc.planes.begin(), desc.planes.end(), std::back_inserter(textures_), [&](const core::pixel_format_desc::plane& plane)\r
+ {\r
+ return ogl_->create_device_buffer(plane.width, plane.height, plane.channels); \r
+ });\r
+ }\r
+ \r
+ void accept(write_frame& self, core::frame_visitor& visitor)\r
+ {\r
+ visitor.begin(self);\r
+ visitor.visit(self);\r
+ visitor.end();\r
+ }\r
+\r
+ boost::iterator_range<uint8_t*> image_data(int index)\r
+ {\r
+ if(index >= buffers_.size() || !buffers_[index]->data())\r
+ return boost::iterator_range<uint8_t*>();\r
+ auto ptr = static_cast<uint8_t*>(buffers_[index]->data());\r
+ return boost::iterator_range<uint8_t*>(ptr, ptr+buffers_[index]->size());\r
+ }\r
+ \r
+ void commit()\r
+ {\r
+ for(int n = 0; n < buffers_.size(); ++n)\r
+ commit(n);\r
+ }\r
+\r
+ void commit(int plane_index)\r
+ {\r
+ if(plane_index >= buffers_.size())\r
+ return;\r
+ \r
+ auto buffer = std::move(buffers_[plane_index]); // Release buffer once done.\r
+\r
+ if(!buffer)\r
+ return;\r
+\r
+ auto texture = textures_.at(plane_index);\r
+ \r
+ ogl_->begin_invoke([=]\r
+ { \r
+ buffer->unmap();\r
+ buffer->bind();\r
+ texture->begin_read();\r
+ buffer->unbind();\r
+ }, high_priority);\r
+ }\r
+};\r
+ \r
+write_frame::write_frame(const void* tag) : impl_(new impl(tag)){}\r
+write_frame::write_frame(const safe_ptr<ogl_device>& ogl, const void* tag, const core::pixel_format_desc& desc) \r
+ : impl_(new impl(ogl, tag, desc)){}\r
+write_frame::write_frame(write_frame&& other) : impl_(std::move(other.impl_)){}\r
+write_frame& write_frame::operator=(write_frame&& other)\r
+{\r
+ write_frame temp(std::move(other));\r
+ temp.swap(*this);\r
+ return *this;\r
+}\r
+void write_frame::swap(write_frame& other){impl_.swap(other.impl_);}\r
+boost::iterator_range<uint8_t*> write_frame::image_data(int index){return impl_->image_data(index);}\r
+audio_buffer& write_frame::audio_data() { return impl_->audio_data_; }\r
+const void* write_frame::tag() const {return impl_->tag_;}\r
+const core::pixel_format_desc& write_frame::get_pixel_format_desc() const{return impl_->desc_;}\r
+const std::vector<safe_ptr<device_buffer>>& write_frame::get_textures() const{return impl_->textures_;}\r
+void write_frame::commit(int plane_index){impl_->commit(plane_index);}\r
+void write_frame::commit(){impl_->commit();}\r
+void write_frame::set_type(const field_mode& mode){impl_->mode_ = mode;}\r
+core::field_mode write_frame::get_type() const{return impl_->mode_;}\r
+void write_frame::accept(core::frame_visitor& visitor){impl_->accept(*this, visitor);}\r
+\r
+}}
\ No newline at end of file
#include <core/mixer/audio/audio_mixer.h>\r
\r
#include <boost/range/iterator_range.hpp>\r
-#include <boost/thread/future.hpp>\r
\r
#include <stdint.h>\r
#include <vector>\r
\r
namespace caspar { namespace core {\r
\r
-class device_frame sealed : public core::basic_frame\r
+class write_frame sealed : public core::basic_frame\r
{\r
public: \r
- typedef boost::unique_future<safe_ptr<class device_buffer>> future_texture;\r
+ explicit write_frame(const void* tag);\r
+ explicit write_frame(const safe_ptr<class ogl_device>& ogl, const void* tag, const struct pixel_format_desc& desc);\r
\r
- explicit device_frame(const void* tag);\r
- explicit device_frame(std::vector<future_texture>&& textures, const void* tag, const struct pixel_format_desc& desc, field_mode type);\r
- explicit device_frame(future_texture&& texture, const void* tag, const struct pixel_format_desc& desc, field_mode type);\r
-\r
- device_frame(device_frame&& other);\r
- device_frame& operator=(device_frame&& other);\r
+ write_frame(write_frame&& other);\r
+ write_frame& operator=(write_frame&& other);\r
\r
// basic_frame\r
\r
\r
// write _frame\r
\r
- void swap(device_frame& other);\r
+ void swap(write_frame& other);\r
\r
+ boost::iterator_range<uint8_t*> image_data(int plane_index = 0); \r
audio_buffer& audio_data();\r
- \r
+ \r
+ void commit(int plane_index);\r
+ void commit();\r
+ \r
+ void set_type(const field_mode& mode);\r
field_mode get_type() const;\r
\r
const void* tag() const;\r
private:\r
friend class image_mixer;\r
\r
- const std::vector<boost::shared_future<safe_ptr<class device_buffer>>>& get_textures() const;\r
+ const std::vector<safe_ptr<class device_buffer>>& get_textures() const;\r
\r
struct impl;\r
safe_ptr<impl> impl_;\r
#include "../frame/basic_frame.h"\r
#include "../frame/frame_factory.h"\r
#include "../frame/pixel_format.h"\r
-#include "../../mixer/device_frame.h"\r
-#include "../../mixer/data_frame.h"\r
+#include "../../mixer/write_frame.h"\r
+#include "../../mixer/read_frame.h"\r
\r
#include <common/exception/exceptions.h>\r
\r
\r
class channel_consumer : public frame_consumer\r
{ \r
- tbb::concurrent_bounded_queue<std::shared_ptr<data_frame>> frame_buffer_;\r
+ tbb::concurrent_bounded_queue<std::shared_ptr<read_frame>> frame_buffer_;\r
core::video_format_desc format_desc_;\r
int channel_index_;\r
tbb::atomic<bool> is_running_;\r
\r
// frame_consumer\r
\r
- virtual bool send(const safe_ptr<data_frame>& frame) override\r
+ virtual bool send(const safe_ptr<read_frame>& frame) override\r
{\r
frame_buffer_.try_push(frame);\r
return is_running_;\r
void stop()\r
{\r
is_running_ = false;\r
- frame_buffer_.try_push(make_safe<data_frame>());\r
+ frame_buffer_.try_push(make_safe<read_frame>());\r
}\r
\r
const core::video_format_desc& get_video_format_desc()\r
return format_desc_;\r
}\r
\r
- std::shared_ptr<data_frame> receive()\r
+ std::shared_ptr<read_frame> receive()\r
{\r
if(!is_running_)\r
- return make_safe<data_frame>();\r
- std::shared_ptr<data_frame> frame;\r
+ return make_safe<read_frame>();\r
+ std::shared_ptr<read_frame> frame;\r
frame_buffer_.try_pop(frame);\r
return frame;\r
}\r
return last_frame_ = frame;\r
}\r
\r
- auto data_frame = consumer_->receive();\r
- if(!data_frame || data_frame->image_data().empty())\r
+ auto read_frame = consumer_->receive();\r
+ if(!read_frame || read_frame->image_data().empty())\r
return basic_frame::late(); \r
\r
frame_number_++;\r
\r
desc.pix_fmt = core::pixel_format::bgra;\r
desc.planes.push_back(core::pixel_format_desc::plane(format_desc.width, format_desc.height, 4));\r
- auto frame = frame_factory_->create_frame(this, desc, [&](const frame_factory::range_vector_type& ranges)\r
- {\r
- A_memcpy(ranges.at(0).begin(), data_frame->image_data().begin(), data_frame->image_data().size());\r
- });\r
- \r
+ auto frame = frame_factory_->create_frame(this, desc);\r
+\r
+ A_memcpy(frame->image_data().begin(), read_frame->image_data().begin(), read_frame->image_data().size());\r
+ frame->commit();\r
+\r
frame_buffer_.push(frame); \r
\r
if(double_speed) \r
#include "../frame/basic_frame.h"\r
#include "../frame/frame_factory.h"\r
#include "../frame/pixel_format.h"\r
-#include "../../mixer/device_frame.h"\r
+#include "../../mixer/write_frame.h"\r
\r
#include <common/exception/exceptions.h>\r
\r
return create_producer_print_proxy(\r
make_safe<color_producer>(frame_factory, color2));\r
}\r
-safe_ptr<device_frame> create_color_frame(void* tag, const safe_ptr<frame_factory>& frame_factory, const std::wstring& color)\r
+safe_ptr<write_frame> create_color_frame(void* tag, const safe_ptr<frame_factory>& frame_factory, const std::wstring& color)\r
{\r
auto color2 = get_hex_color(color);\r
if(color2.length() != 9 || color2[0] != '#')\r
core::pixel_format_desc desc;\r
desc.pix_fmt = pixel_format::bgra;\r
desc.planes.push_back(core::pixel_format_desc::plane(1, 1, 4));\r
- return frame_factory->create_frame(tag, desc, [&](const frame_factory::range_vector_type& ranges)\r
- { \r
- auto& value = *reinterpret_cast<uint32_t*>(ranges.at(0).begin());\r
- std::wstringstream str(color2.substr(1));\r
- if(!(str >> std::hex >> value) || !str.eof())\r
- BOOST_THROW_EXCEPTION(invalid_argument() << arg_name_info("color") << arg_value_info(color2) << msg_info("Invalid color."));\r
- });\r
+ auto frame = frame_factory->create_frame(tag, desc);\r
+ \r
+ // Read color from hex-string and write to frame pixel.\r
+\r
+ auto& value = *reinterpret_cast<uint32_t*>(frame->image_data().begin());\r
+ std::wstringstream str(color2.substr(1));\r
+ if(!(str >> std::hex >> value) || !str.eof())\r
+ BOOST_THROW_EXCEPTION(invalid_argument() << arg_name_info("color") << arg_value_info(color2) << msg_info("Invalid color."));\r
+\r
+ frame->commit();\r
+ \r
+ return frame;\r
}\r
\r
}}
\ No newline at end of file
namespace caspar { namespace core {\r
\r
safe_ptr<struct frame_producer> create_color_producer(const safe_ptr<struct frame_factory>& frame_factory, const std::vector<std::wstring>& params);\r
-safe_ptr<class device_frame> create_color_frame(void* tag, const safe_ptr<struct frame_factory>& frame_factory, const std::wstring& color);\r
+safe_ptr<class write_frame> create_color_frame(void* tag, const safe_ptr<struct frame_factory>& frame_factory, const std::wstring& color);\r
\r
}}\r
#include <common/memory/safe_ptr.h>\r
\r
#include <boost/noncopyable.hpp>\r
-#include <boost/range.hpp>\r
\r
namespace caspar { namespace core {\r
\r
struct frame_factory : boost::noncopyable\r
{\r
- typedef boost::iterator_range<uint8_t*> range_type;\r
- typedef std::vector<range_type> range_vector_type;\r
-\r
frame_factory(){}\r
- \r
-\r
- virtual safe_ptr<class device_frame> create_frame(const void* video_stream_tag, \r
- const struct pixel_format_desc& desc, \r
- const std::function<void(const range_vector_type&)>& func, \r
- core::field_mode type = core::field_mode::progressive) = 0; \r
\r
+ virtual safe_ptr<class write_frame> create_frame(const void* video_stream_tag, const struct pixel_format_desc& desc) = 0; \r
virtual struct video_format_desc get_video_format_desc() const = 0; // nothrow\r
};\r
\r
{\r
virtual void begin(class basic_frame& frame) = 0;\r
virtual void end() = 0;\r
- virtual void visit(class device_frame& frame) = 0;\r
+ virtual void visit(class write_frame& frame) = 0;\r
};\r
\r
}}
\ No newline at end of file
#include "consumer/output.h"\r
#include "mixer/mixer.h"\r
#include "mixer/gpu/ogl_device.h"\r
-#include "mixer/device_frame.h"\r
+#include "mixer/write_frame.h"\r
#include "producer/stage.h"\r
#include "producer/frame/frame_factory.h"\r
-#include "producer/frame/pixel_format.h"\r
\r
#include <common/diagnostics/graph.h>\r
#include <common/env.h>\r
\r
// frame_factory\r
\r
- virtual safe_ptr<device_frame> create_frame(const void* tag, const core::pixel_format_desc& desc, const std::function<void(const frame_factory::range_vector_type&)>& func, core::field_mode type) override\r
+ virtual safe_ptr<write_frame> create_frame(const void* tag, const core::pixel_format_desc& desc) override\r
{ \r
- std::vector<safe_ptr<host_buffer>> buffers;\r
- BOOST_FOREACH(auto& plane, desc.planes)\r
- buffers.push_back(ogl_->create_host_buffer(plane.size, host_buffer::write_only));\r
-\r
- std::vector<boost::iterator_range<uint8_t*>> dest;\r
- boost::range::transform(buffers, std::back_inserter(dest), [](const safe_ptr<host_buffer>& buffer) -> boost::iterator_range<uint8_t*>\r
- {\r
- auto ptr = reinterpret_cast<uint8_t*>(buffer->data());\r
- return boost::iterator_range<uint8_t*>(ptr, ptr + buffer->size());\r
- });\r
-\r
- func(dest);\r
-\r
- std::vector<boost::unique_future<safe_ptr<device_buffer>>> textures;\r
- for(std::size_t n = 0; n < desc.planes.size(); ++n)\r
- {\r
- auto texture = ogl_->transfer(buffers.at(n), desc.planes[n].width, desc.planes[n].height, desc.planes[n].channels);\r
- textures.push_back(std::move(texture));\r
- }\r
- \r
- return make_safe<device_frame>(std::move(textures), tag, desc, type);\r
+ return make_safe<write_frame>(ogl_, tag, desc);\r
}\r
\r
virtual core::video_format_desc get_video_format_desc() const override\r
#include "../util/memory.h"\r
\r
#include <core/video_format.h>\r
-#include <core/mixer/data_frame.h>\r
+#include <core/mixer/read_frame.h>\r
\r
#include <common/concurrency/executor.h>\r
#include <common/diagnostics/graph.h>\r
unsigned int vid_fmt_;\r
\r
std::array<blue_dma_buffer_ptr, 4> reserved_frames_; \r
- tbb::concurrent_bounded_queue<std::shared_ptr<core::data_frame>> frame_buffer_;\r
+ tbb::concurrent_bounded_queue<std::shared_ptr<core::read_frame>> frame_buffer_;\r
\r
const bool embedded_audio_;\r
const bool key_only_;\r
CASPAR_LOG(error)<< print() << TEXT(" Failed to disable video output."); \r
}\r
\r
- void send(const safe_ptr<core::data_frame>& frame)\r
+ void send(const safe_ptr<core::read_frame>& frame)\r
{ \r
executor_.begin_invoke([=]\r
{\r
});\r
}\r
\r
- void display_frame(const safe_ptr<core::data_frame>& frame)\r
+ void display_frame(const safe_ptr<core::read_frame>& frame)\r
{\r
// Sync\r
\r
CASPAR_LOG(info) << print() << L" Successfully Initialized."; \r
}\r
\r
- virtual bool send(const safe_ptr<core::data_frame>& frame) override\r
+ virtual bool send(const safe_ptr<core::read_frame>& frame) override\r
{\r
CASPAR_VERIFY(audio_cadence_.front() == static_cast<size_t>(frame->audio_data().size()));\r
boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1);\r
\r
#include "../interop/DeckLinkAPI_h.h"\r
\r
-#include <core/mixer/data_frame.h>\r
+#include <core/mixer/read_frame.h>\r
\r
#include <common/concurrency/executor.h>\r
#include <common/concurrency/lock.h>\r
class decklink_frame : public IDeckLinkVideoFrame\r
{\r
tbb::atomic<int> ref_count_;\r
- std::shared_ptr<core::data_frame> frame_;\r
+ std::shared_ptr<core::read_frame> frame_;\r
const core::video_format_desc format_desc_;\r
\r
const bool key_only_;\r
std::vector<uint8_t, tbb::cache_aligned_allocator<uint8_t>> data_;\r
public:\r
- decklink_frame(const safe_ptr<core::data_frame>& frame, const core::video_format_desc& format_desc, bool key_only)\r
+ decklink_frame(const safe_ptr<core::read_frame>& frame, const core::video_format_desc& format_desc, bool key_only)\r
: frame_(frame)\r
, format_desc_(format_desc)\r
, key_only_(key_only)\r
\r
boost::circular_buffer<std::vector<int32_t>> audio_container_;\r
\r
- tbb::concurrent_bounded_queue<std::shared_ptr<core::data_frame>> video_frame_buffer_;\r
- tbb::concurrent_bounded_queue<std::shared_ptr<core::data_frame>> audio_frame_buffer_;\r
+ tbb::concurrent_bounded_queue<std::shared_ptr<core::read_frame>> video_frame_buffer_;\r
+ tbb::concurrent_bounded_queue<std::shared_ptr<core::read_frame>> audio_frame_buffer_;\r
\r
safe_ptr<diagnostics::graph> graph_;\r
boost::timer tick_timer_;\r
output_->BeginAudioPreroll(); \r
\r
for(int n = 0; n < buffer_size_; ++n)\r
- schedule_next_video(make_safe<core::data_frame>());\r
+ schedule_next_video(make_safe<core::read_frame>());\r
\r
if(!config.embedded_audio)\r
start_playback();\r
~decklink_consumer()\r
{ \r
is_running_ = false;\r
- video_frame_buffer_.try_push(std::make_shared<core::data_frame>());\r
- audio_frame_buffer_.try_push(std::make_shared<core::data_frame>());\r
+ video_frame_buffer_.try_push(std::make_shared<core::read_frame>());\r
+ audio_frame_buffer_.try_push(std::make_shared<core::read_frame>());\r
\r
if(output_ != nullptr) \r
{\r
else if(result == bmdOutputFrameFlushed)\r
graph_->set_tag("flushed-frame");\r
\r
- std::shared_ptr<core::data_frame> frame; \r
+ std::shared_ptr<core::read_frame> frame; \r
video_frame_buffer_.pop(frame); \r
schedule_next_video(make_safe_ptr(frame)); \r
\r
}\r
else\r
{\r
- std::shared_ptr<core::data_frame> frame;\r
+ std::shared_ptr<core::read_frame> frame;\r
audio_frame_buffer_.pop(frame);\r
schedule_next_audio(frame->audio_data());\r
}\r
audio_scheduled_ += sample_frame_count;\r
}\r
\r
- void schedule_next_video(const safe_ptr<core::data_frame>& frame)\r
+ void schedule_next_video(const safe_ptr<core::read_frame>& frame)\r
{\r
CComPtr<IDeckLinkVideoFrame> frame2(new decklink_frame(frame, format_desc_, config_.key_only));\r
if(FAILED(output_->ScheduleVideoFrame(frame2, video_scheduled_, format_desc_.duration, format_desc_.time_scale)))\r
tick_timer_.restart();\r
}\r
\r
- void send(const safe_ptr<core::data_frame>& frame)\r
+ void send(const safe_ptr<core::read_frame>& frame)\r
{\r
auto exception = lock(exception_mutex_, [&]\r
{\r
});\r
}\r
\r
- virtual bool send(const safe_ptr<core::data_frame>& frame) override\r
+ virtual bool send(const safe_ptr<core::read_frame>& frame) override\r
{\r
CASPAR_VERIFY(audio_cadence_.front() == static_cast<int>(frame->audio_data().size()));\r
boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1);\r
#include <common/log.h>\r
#include <common/utility/param.h>\r
\r
-#include <core/mixer/device_frame.h>\r
+#include <core/mixer/write_frame.h>\r
#include <core/producer/frame/frame_transform.h>\r
#include <core/producer/frame/frame_factory.h>\r
\r
\r
#include "ffmpeg_consumer.h"\r
\r
-#include <core/mixer/data_frame.h>\r
+#include <core/mixer/read_frame.h>\r
#include <core/mixer/audio/audio_util.h>\r
#include <core/consumer/frame_consumer.h>\r
#include <core/video_format.h>\r
});\r
}\r
\r
- std::shared_ptr<AVFrame> convert_video_frame(const safe_ptr<core::data_frame>& frame, AVCodecContext* c)\r
+ std::shared_ptr<AVFrame> convert_video_frame(const safe_ptr<core::read_frame>& frame, AVCodecContext* c)\r
{\r
if(!sws_) \r
{\r
return local_av_frame;\r
}\r
\r
- std::shared_ptr<AVPacket> encode_video_frame(const safe_ptr<core::data_frame>& frame)\r
+ std::shared_ptr<AVPacket> encode_video_frame(const safe_ptr<core::read_frame>& frame)\r
{ \r
auto c = video_st_->codec;\r
\r
return nullptr;\r
}\r
\r
- std::shared_ptr<AVPacket> encode_audio_frame(const safe_ptr<core::data_frame>& frame)\r
+ std::shared_ptr<AVPacket> encode_audio_frame(const safe_ptr<core::read_frame>& frame)\r
{ \r
auto c = audio_st_->codec;\r
\r
return pkt;\r
}\r
\r
- void send(const safe_ptr<core::data_frame>& frame)\r
+ void send(const safe_ptr<core::read_frame>& frame)\r
{\r
executor_.begin_invoke([=]\r
{ \r
consumer_.reset(new ffmpeg_consumer(filename_, format_desc, codec_, options_));\r
}\r
\r
- virtual bool send(const safe_ptr<core::data_frame>& frame) override\r
+ virtual bool send(const safe_ptr<core::read_frame>& frame) override\r
{\r
consumer_->send(frame);\r
return true;\r
#include <core/producer/frame/frame_transform.h>\r
#include <core/producer/frame/pixel_format.h>\r
#include <core/producer/frame/frame_factory.h>\r
-#include <core/mixer/device_frame.h>\r
+#include <core/mixer/write_frame.h>\r
\r
#include <common/env.h>\r
#include <common/exception/exceptions.h>\r
\r
struct frame_muxer::impl : boost::noncopyable\r
{ \r
- std::queue<std::queue<safe_ptr<device_frame>>> video_streams_;\r
+ std::queue<std::queue<safe_ptr<write_frame>>> video_streams_;\r
std::queue<core::audio_buffer> audio_streams_;\r
std::queue<safe_ptr<basic_frame>> frame_buffer_;\r
display_mode display_mode_;\r
, filter_str_(filter_str)\r
, force_deinterlacing_(false)\r
{\r
- video_streams_.push(std::queue<safe_ptr<device_frame>>());\r
+ video_streams_.push(std::queue<safe_ptr<write_frame>>());\r
audio_streams_.push(core::audio_buffer());\r
\r
// Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)\r
\r
if(video_frame == flush_video())\r
{ \r
- video_streams_.push(std::queue<safe_ptr<device_frame>>());\r
+ video_streams_.push(std::queue<safe_ptr<write_frame>>());\r
}\r
else if(video_frame == empty_video())\r
{\r
- video_streams_.back().push(make_safe<core::device_frame>(this));\r
+ video_streams_.back().push(make_safe<core::write_frame>(this));\r
display_mode_ = display_mode::simple;\r
}\r
else\r
if(video_frame->format == PIX_FMT_GRAY8 && format == CASPAR_PIX_FMT_LUMA)\r
av_frame->format = format;\r
\r
- video_streams_.back().push(make_device_frame(this, av_frame, frame_factory_, flags));\r
+ video_streams_.back().push(make_write_frame(this, av_frame, frame_factory_, flags));\r
}\r
}\r
\r
}\r
case display_mode::duplicate: \r
{\r
- auto frame2 = make_safe<core::device_frame>(*frame1);\r
+ auto frame2 = make_safe<core::write_frame>(*frame1);\r
frame2->audio_data() = pop_audio();\r
\r
frame_buffer_.push(frame1);\r
return frame_buffer_.empty() ? nullptr : poll();\r
}\r
\r
- safe_ptr<core::device_frame> pop_video()\r
+ safe_ptr<core::write_frame> pop_video()\r
{\r
auto frame = video_streams_.front().front();\r
video_streams_.front().pop(); \r
filter_.push(frame);\r
auto av_frame = filter_.poll();\r
if(av_frame) \r
- video_streams_.back().push(make_device_frame(this, make_safe_ptr(av_frame), frame_factory_, 0));\r
+ video_streams_.back().push(make_write_frame(this, make_safe_ptr(av_frame), frame_factory_, 0));\r
}\r
filter_ = filter(filter_str);\r
CASPAR_LOG(info) << L"[frame_muxer] " << display_mode_ << L" " << print_mode(frame->width, frame->height, in_fps_, frame->interlaced_frame > 0);\r
\r
namespace core {\r
\r
-class device_frame;\r
+class write_frame;\r
class basic_frame;\r
struct frame_factory;\r
\r
#include <core/producer/frame/frame_transform.h>\r
#include <core/producer/frame/frame_factory.h>\r
#include <core/producer/frame_producer.h>\r
-#include <core/mixer/device_frame.h>\r
+#include <core/mixer/write_frame.h>\r
\r
#include <common/exception/exceptions.h>\r
\r
}\r
}\r
\r
-safe_ptr<core::device_frame> make_device_frame(const void* tag, const safe_ptr<AVFrame>& decoded_frame, const safe_ptr<core::frame_factory>& frame_factory, int flags)\r
+safe_ptr<core::write_frame> make_write_frame(const void* tag, const safe_ptr<AVFrame>& decoded_frame, const safe_ptr<core::frame_factory>& frame_factory, int flags)\r
{ \r
static tbb::concurrent_unordered_map<int, tbb::concurrent_queue<std::shared_ptr<SwsContext>>> sws_contexts_;\r
\r
if(decoded_frame->width < 1 || decoded_frame->height < 1)\r
- return make_safe<core::device_frame>(tag);\r
+ return make_safe<core::write_frame>(tag);\r
\r
const auto width = decoded_frame->width;\r
const auto height = decoded_frame->height;\r
if(flags & core::frame_producer::flags::alpha_only)\r
desc = get_pixel_format_desc(static_cast<PixelFormat>(make_alpha_format(decoded_frame->format)), width, height);\r
\r
- std::shared_ptr<core::device_frame> write;\r
+ std::shared_ptr<core::write_frame> write;\r
\r
if(desc.pix_fmt == core::pixel_format::invalid)\r
{\r
\r
auto target_desc = get_pixel_format_desc(target_pix_fmt, width, height);\r
\r
- write = frame_factory->create_frame(tag, target_desc, [&](const std::vector<boost::iterator_range<uint8_t*>>& dest)\r
- {\r
- std::shared_ptr<SwsContext> sws_context;\r
+ write = frame_factory->create_frame(tag, target_desc);\r
+ write->set_type(get_mode(*decoded_frame));\r
+\r
+ std::shared_ptr<SwsContext> sws_context;\r
\r
- //CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
+ //CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
\r
- int key = ((width << 22) & 0xFFC00000) | ((height << 6) & 0x003FC000) | ((pix_fmt << 7) & 0x00007F00) | ((target_pix_fmt << 0) & 0x0000007F);\r
+ int key = ((width << 22) & 0xFFC00000) | ((height << 6) & 0x003FC000) | ((pix_fmt << 7) & 0x00007F00) | ((target_pix_fmt << 0) & 0x0000007F);\r
\r
- auto& pool = sws_contexts_[key];\r
+ auto& pool = sws_contexts_[key];\r
\r
- if(!pool.try_pop(sws_context))\r
- {\r
- double param;\r
- sws_context.reset(sws_getContext(width, height, pix_fmt, width, height, target_pix_fmt, SWS_BILINEAR, nullptr, nullptr, ¶m), sws_freeContext);\r
- }\r
+ if(!pool.try_pop(sws_context))\r
+ {\r
+ double param;\r
+ sws_context.reset(sws_getContext(width, height, pix_fmt, width, height, target_pix_fmt, SWS_BILINEAR, nullptr, nullptr, ¶m), sws_freeContext);\r
+ }\r
\r
- if(!sws_context)\r
- {\r
- BOOST_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling context.") << \r
- boost::errinfo_api_function("sws_getContext"));\r
- } \r
+ if(!sws_context)\r
+ {\r
+ BOOST_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling context.") << \r
+ boost::errinfo_api_function("sws_getContext"));\r
+ } \r
\r
- safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free); \r
- avcodec_get_frame_defaults(av_frame.get()); \r
- if(target_pix_fmt == PIX_FMT_BGRA)\r
- {\r
- avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), dest.at(0).begin(), PIX_FMT_BGRA, width, height);\r
- }\r
- else\r
+ safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free); \r
+ avcodec_get_frame_defaults(av_frame.get()); \r
+ if(target_pix_fmt == PIX_FMT_BGRA)\r
+ {\r
+ auto size = avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width, height);\r
+ CASPAR_VERIFY(size == write->image_data().size()); \r
+ }\r
+ else\r
+ {\r
+ av_frame->width = width;\r
+ av_frame->height = height;\r
+ for(int n = 0; n < target_desc.planes.size(); ++n)\r
{\r
- av_frame->width = width;\r
- av_frame->height = height;\r
- for(int n = 0; n < target_desc.planes.size(); ++n)\r
- {\r
- av_frame->data[n] = dest.at(n).begin();\r
- av_frame->linesize[n] = target_desc.planes[n].linesize;\r
- }\r
+ av_frame->data[n] = write->image_data(n).begin();\r
+ av_frame->linesize[n] = target_desc.planes[n].linesize;\r
}\r
+ }\r
+\r
+ sws_scale(sws_context.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame->data, av_frame->linesize); \r
+ pool.push(sws_context);\r
\r
- sws_scale(sws_context.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame->data, av_frame->linesize); \r
- pool.push(sws_context); \r
- }, get_mode(*decoded_frame));\r
+ write->commit(); \r
}\r
else\r
{\r
- write = frame_factory->create_frame(tag, desc, [&](const std::vector<boost::iterator_range<uint8_t*>>& dest)\r
+ write = frame_factory->create_frame(tag, desc);\r
+ write->set_type(get_mode(*decoded_frame));\r
+\r
+ for(int n = 0; n < static_cast<int>(desc.planes.size()); ++n)\r
{\r
- for(int n = 0; n < static_cast<int>(desc.planes.size()); ++n)\r
- {\r
- auto plane = desc.planes[n];\r
- auto result = dest.at(n).begin();\r
- auto decoded = decoded_frame->data[n];\r
- auto decoded_linesize = decoded_frame->linesize[n];\r
+ auto plane = desc.planes[n];\r
+ auto result = write->image_data(n).begin();\r
+ auto decoded = decoded_frame->data[n];\r
+ auto decoded_linesize = decoded_frame->linesize[n];\r
\r
- CASPAR_ASSERT(decoded);\r
- CASPAR_ASSERT(dest.at(n).begin());\r
+ CASPAR_ASSERT(decoded);\r
+ CASPAR_ASSERT(write->image_data(n).begin());\r
\r
- // Copy line by line since ffmpeg sometimes pads each line.\r
- tbb::affinity_partitioner ap;\r
- tbb::parallel_for(tbb::blocked_range<int>(0, desc.planes[n].height), [&](const tbb::blocked_range<int>& r)\r
- {\r
- for(int y = r.begin(); y != r.end(); ++y)\r
- A_memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
- }, ap);\r
- }\r
- }, get_mode(*decoded_frame));\r
+ // Copy line by line since ffmpeg sometimes pads each line.\r
+ tbb::affinity_partitioner ap;\r
+ tbb::parallel_for(tbb::blocked_range<int>(0, desc.planes[n].height), [&](const tbb::blocked_range<int>& r)\r
+ {\r
+ for(int y = r.begin(); y != r.end(); ++y)\r
+ A_memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
+ }, ap);\r
+\r
+ write->commit(n);\r
+ }\r
}\r
\r
if(decoded_frame->height == 480) // NTSC DV\r
namespace core {\r
\r
struct pixel_format_desc;\r
-class device_frame;\r
+class write_frame;\r
struct frame_factory;\r
\r
}\r
\r
core::field_mode get_mode(const AVFrame& frame);\r
int make_alpha_format(int format); // NOTE: Be careful about CASPAR_PIX_FMT_LUMA, change it to PIX_FMT_GRAY8 if you want to use the frame inside some ffmpeg function.\r
-safe_ptr<core::device_frame> make_device_frame(const void* tag, const safe_ptr<AVFrame>& decoded_frame, const safe_ptr<core::frame_factory>& frame_factory, int flags);\r
+safe_ptr<core::write_frame> make_write_frame(const void* tag, const safe_ptr<AVFrame>& decoded_frame, const safe_ptr<core::frame_factory>& frame_factory, int flags);\r
\r
safe_ptr<AVPacket> create_packet();\r
\r
\r
namespace core {\r
struct frame_factory;\r
- class device_frame;\r
+ class write_frame;\r
}\r
\r
namespace ffmpeg {\r
#include <core/producer/frame/basic_frame.h>\r
#include <core/producer/frame/frame_factory.h>\r
#include <core/producer/frame/pixel_format.h>\r
-#include <core/mixer/device_frame.h>\r
+#include <core/mixer/write_frame.h>\r
\r
#include <common/env.h>\r
#include <common/concurrency/executor.h>\r
core::pixel_format_desc desc;\r
desc.pix_fmt = core::pixel_format::bgra;\r
desc.planes.push_back(core::pixel_format_desc::plane(width_, height_, 4));\r
- head_ = frame_factory_->create_frame(this, desc, [&](const core::frame_factory::range_vector_type& ranges)\r
- {\r
- A_memcpy(ranges.at(0).begin(), bmp_.data(), width_*height_*4);\r
- });\r
+ auto frame = frame_factory_->create_frame(this, desc);\r
+\r
+ A_memcpy(frame->image_data().begin(), bmp_.data(), width_*height_*4);\r
+ frame->commit();\r
+ head_ = frame;\r
} \r
\r
graph_->set_value("frame-time", static_cast<float>(frame_timer_.elapsed()/frame_time)*0.5f);\r
\r
#include <core/consumer/frame_consumer.h>\r
#include <core/video_format.h>\r
-#include <core/mixer/data_frame.h>\r
+#include <core/mixer/read_frame.h>\r
\r
#include <boost/date_time/posix_time/posix_time.hpp>\r
#include <boost/thread.hpp>\r
{\r
}\r
\r
- virtual bool send(const safe_ptr<core::data_frame>& frame) override\r
+ virtual bool send(const safe_ptr<core::read_frame>& frame) override\r
{ \r
boost::thread async([frame]\r
{\r
#include <core/producer/frame/basic_frame.h>\r
#include <core/producer/frame/frame_factory.h>\r
#include <core/producer/frame/pixel_format.h>\r
-#include <core/mixer/device_frame.h>\r
+#include <core/mixer/write_frame.h>\r
\r
#include <common/env.h>\r
#include <common/log.h>\r
core::pixel_format_desc desc;\r
desc.pix_fmt = core::pixel_format::bgra;\r
desc.planes.push_back(core::pixel_format_desc::plane(FreeImage_GetWidth(bitmap.get()), FreeImage_GetHeight(bitmap.get()), 4));\r
+ auto frame = frame_factory->create_frame(this, desc);\r
\r
- frame_ = frame_factory->create_frame(this, desc, [&](const core::frame_factory::range_vector_type& ranges)\r
- {\r
- std::copy_n(FreeImage_GetBits(bitmap.get()), ranges[0].size(), ranges[0].begin());\r
- });\r
+ std::copy_n(FreeImage_GetBits(bitmap.get()), frame->image_data().size(), frame->image_data().begin());\r
+ frame->commit();\r
+ frame_ = std::move(frame);\r
}\r
\r
// frame_producer\r
#include <core/producer/frame/frame_factory.h>\r
#include <core/producer/frame/frame_transform.h>\r
#include <core/producer/frame/pixel_format.h>\r
-#include <core/mixer/device_frame.h>\r
+#include <core/mixer/write_frame.h>\r
\r
#include <common/env.h>\r
#include <common/log.h>\r
core::pixel_format_desc desc;\r
desc.pix_fmt = core::pixel_format::bgra;\r
desc.planes.push_back(core::pixel_format_desc::plane(width_, format_desc_.height, 4));\r
- auto frame = frame_factory->create_frame(reinterpret_cast<void*>(rand()), desc, [&](const core::frame_factory::range_vector_type& ranges)\r
- { \r
- if(count >= ranges.at(0).size())\r
- { \r
- std::copy_n(bytes + count - ranges.at(0).size(), ranges.at(0).size(), ranges.at(0).begin());\r
- count -= static_cast<int>(ranges.at(0).size());\r
- }\r
- else\r
- {\r
- memset(ranges.at(0).begin(), 0, ranges.at(0).size()); \r
- std::copy_n(bytes, count, ranges.at(0).begin() + format_desc_.size - count);\r
- count = 0;\r
- } \r
- });\r
+ auto frame = frame_factory->create_frame(reinterpret_cast<void*>(rand()), desc);\r
\r
+ if(count >= frame->image_data().size())\r
+ { \r
+ std::copy_n(bytes + count - frame->image_data().size(), frame->image_data().size(), frame->image_data().begin());\r
+ count -= static_cast<int>(frame->image_data().size());\r
+ }\r
+ else\r
+ {\r
+ memset(frame->image_data().begin(), 0, frame->image_data().size()); \r
+ std::copy_n(bytes, count, frame->image_data().begin() + format_desc_.size - count);\r
+ count = 0;\r
+ }\r
+ \r
+ frame->commit();\r
frames_.push_back(frame);\r
}\r
\r
core::pixel_format_desc desc;\r
desc.pix_fmt = core::pixel_format::bgra;\r
desc.planes.push_back(core::pixel_format_desc::plane(format_desc_.width, height_, 4));\r
- auto frame = frame_factory->create_frame(reinterpret_cast<void*>(rand()), desc, [&](const core::frame_factory::range_vector_type& ranges)\r
+ auto frame = frame_factory->create_frame(reinterpret_cast<void*>(rand()), desc);\r
+ if(count >= frame->image_data().size())\r
{ \r
- if(count >= ranges.at(0).size())\r
- { \r
- for(int y = 0; y < height_; ++y)\r
- std::copy_n(bytes + i * format_desc_.width*4 + y * width_*4, format_desc_.width*4, ranges.at(0).begin() + y * format_desc_.width*4);\r
+ for(int y = 0; y < height_; ++y)\r
+ std::copy_n(bytes + i * format_desc_.width*4 + y * width_*4, format_desc_.width*4, frame->image_data().begin() + y * format_desc_.width*4);\r
\r
- ++i;\r
- count -= static_cast<int>(ranges.at(0).size());\r
- }\r
- else\r
- {\r
- memset(ranges.at(0).begin(), 0, ranges.at(0).size()); \r
- int width2 = width_ % format_desc_.width;\r
- for(int y = 0; y < height_; ++y)\r
- std::copy_n(bytes + i * format_desc_.width*4 + y * width_*4, width2*4, ranges.at(0).begin() + y * format_desc_.width*4);\r
-\r
- count = 0;\r
- }\r
- });\r
+ ++i;\r
+ count -= static_cast<int>(frame->image_data().size());\r
+ }\r
+ else\r
+ {\r
+ memset(frame->image_data().begin(), 0, frame->image_data().size()); \r
+ int width2 = width_ % format_desc_.width;\r
+ for(int y = 0; y < height_; ++y)\r
+ std::copy_n(bytes + i * format_desc_.width*4 + y * width_*4, width2*4, frame->image_data().begin() + y * format_desc_.width*4);\r
+\r
+ count = 0;\r
+ }\r
\r
+ frame->commit();\r
frames_.push_back(frame);\r
}\r
\r
#include <core/mixer/audio/audio_util.h>\r
#include <core/video_format.h>\r
\r
-#include <core/mixer/data_frame.h>\r
+#include <core/mixer/read_frame.h>\r
\r
#include <SFML/Audio/SoundStream.hpp>\r
\r
CASPAR_LOG(info) << print() << " Sucessfully Initialized.";\r
}\r
\r
- virtual bool send(const safe_ptr<core::data_frame>& frame) override\r
+ virtual bool send(const safe_ptr<core::read_frame>& frame) override\r
{ \r
input_.push(std::make_shared<audio_buffer_16>(core::audio_32_to_16(frame->audio_data())));\r
return true;\r
#include <ffmpeg/producer/filter/filter.h>\r
\r
#include <core/video_format.h>\r
-#include <core/mixer/data_frame.h>\r
+#include <core/mixer/read_frame.h>\r
#include <core/consumer/frame_consumer.h>\r
\r
#include <boost/timer.hpp>\r
boost::timer perf_timer_;\r
boost::timer tick_timer_;\r
\r
- tbb::concurrent_bounded_queue<safe_ptr<core::data_frame>> frame_buffer_;\r
+ tbb::concurrent_bounded_queue<safe_ptr<core::read_frame>> frame_buffer_;\r
\r
boost::thread thread_;\r
tbb::atomic<bool> is_running_;\r
~ogl_consumer()\r
{\r
is_running_ = false;\r
- frame_buffer_.try_push(make_safe<core::data_frame>());\r
+ frame_buffer_.try_push(make_safe<core::read_frame>());\r
thread_.join();\r
}\r
\r
is_running_ = false;\r
}\r
\r
- safe_ptr<core::data_frame> frame;\r
+ safe_ptr<core::read_frame> frame;\r
frame_buffer_.pop(frame);\r
\r
perf_timer_.restart();\r
return av_frame;\r
}\r
\r
- void render(const safe_ptr<core::data_frame>& frame)\r
+ void render(const safe_ptr<core::read_frame>& frame)\r
{ \r
if(static_cast<int>(frame->image_data().size()) != format_desc_.size)\r
return;\r
std::rotate(pbos_.begin(), pbos_.begin() + 1, pbos_.end());\r
}\r
\r
- bool send(const safe_ptr<core::data_frame>& frame)\r
+ bool send(const safe_ptr<core::read_frame>& frame)\r
{\r
if(!frame_buffer_.try_push(frame))\r
graph_->set_tag("dropped-frame");\r
CASPAR_LOG(info) << print() << L" Successfully Initialized."; \r
}\r
\r
- virtual bool send(const safe_ptr<core::data_frame>& frame) override\r
+ virtual bool send(const safe_ptr<core::read_frame>& frame) override\r
{\r
return consumer_->send(frame);\r
}\r
<?xml version="1.0" encoding="utf-8"?>\r
<configuration>\r
<paths>\r
- <media-path>D:\casparcg\_media\</media-path>\r
- <log-path>D:\casparcg\_log\</log-path>\r
- <data-path>D:\casparcg\_data\</data-path>\r
- <template-path>D:\casparcg\_templates\</template-path>\r
+ <media-path>M:\caspar\_media\</media-path>\r
+ <log-path>M:\caspar\_log\</log-path>\r
+ <data-path>M:\caspar\_data\</data-path>\r
+ <template-path>M:\caspar\_templates\</template-path>\r
</paths>\r
<log-level>trace</log-level>\r
<channel-grid>true</channel-grid>\r
<channel>\r
<video-mode>720p5000</video-mode>\r
<consumers>\r
- </consumers>\r
- </channel>\r
- <channel>\r
- <video-mode>720p5000</video-mode>\r
- <consumers>\r
- </consumers>\r
- </channel>\r
- <channel>\r
- <video-mode>720p5000</video-mode>\r
- <consumers>\r
+ <screen>\r
+ <device>1</device>\r
+ </screen>\r
</consumers>\r
</channel>\r
</channels>\r
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
<MultiProcessorCompilation>true</MultiProcessorCompilation>\r
<PreprocessorDefinitions>TBB_USE_CAPTURED_EXCEPTION=0;NDEBUG;COMPILE_RELEASE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
- <WholeProgramOptimization>false</WholeProgramOptimization>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
<TreatWarningAsError>true</TreatWarningAsError>\r
<OmitFramePointers>true</OmitFramePointers>\r
<FloatingPointModel>Fast</FloatingPointModel>\r