2 * Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
4 * This file is part of CasparCG (www.casparcg.com).
6 * CasparCG is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 3 of the License, or
9 * (at your option) any later version.
11 * CasparCG is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
19 * Author: Robert Nagy, ronag89@gmail.com
24 #include "../../StdAfx.h"
32 #include <common/assert.h>
33 #include <common/except.h>
34 #include <common/future.h>
35 #include <common/array.h>
36 #include <common/memory.h>
37 #include <common/gl/gl_check.h>
38 #include <common/timer.h>
42 #include <SFML/Window/Context.hpp>
44 #include <tbb/concurrent_unordered_map.h>
45 #include <tbb/concurrent_hash_map.h>
46 #include <tbb/concurrent_queue.h>
48 #include <boost/utility/declval.hpp>
49 #include <boost/property_tree/ptree.hpp>
52 #include <unordered_map>
54 #include <tbb/parallel_for.h>
56 namespace caspar { namespace accelerator { namespace ogl {
58 struct device::impl : public std::enable_shared_from_this<impl>
60 static_assert(std::is_same<decltype(boost::declval<device>().impl_), spl::shared_ptr<impl>>::value, "impl_ must be shared_ptr");
62 tbb::concurrent_hash_map<buffer*, std::shared_ptr<texture>> texture_cache_;
64 std::unique_ptr<sf::Context> device_;
66 std::array<tbb::concurrent_unordered_map<std::size_t, tbb::concurrent_bounded_queue<std::shared_ptr<texture>>>, 8> device_pools_;
67 std::array<tbb::concurrent_unordered_map<std::size_t, tbb::concurrent_bounded_queue<std::shared_ptr<buffer>>>, 2> host_pools_;
73 impl(executor& executor)
76 executor_.set_capacity(256);
78 CASPAR_LOG(info) << L"Initializing OpenGL Device.";
82 device_.reset(new sf::Context());
83 device_->setActive(true);
85 if (glewInit() != GLEW_OK)
86 CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to initialize GLEW."));
89 CASPAR_THROW_EXCEPTION(not_supported() << msg_info("Your graphics card does not meet the minimum hardware requirements since it does not support OpenGL 3.0 or higher."));
91 glGenFramebuffers(1, &fbo_);
92 glBindFramebuffer(GL_FRAMEBUFFER, fbo_);
95 CASPAR_LOG(info) << L"Successfully initialized OpenGL " << version();
100 auto context = executor_.is_current() ? std::string() : get_context();
104 CASPAR_SCOPED_CONTEXT_MSG(context);
105 texture_cache_.clear();
107 for (auto& pool : host_pools_)
110 for (auto& pool : device_pools_)
113 glDeleteFramebuffers(1, &fbo_);
119 boost::property_tree::wptree info() const
121 boost::property_tree::wptree info;
123 boost::property_tree::wptree pooled_device_buffers;
124 size_t total_pooled_device_buffer_size = 0;
125 size_t total_pooled_device_buffer_count = 0;
127 for (size_t i = 0; i < device_pools_.size(); ++i)
129 auto& pools = device_pools_.at(i);
130 bool mipmapping = i > 3;
131 auto stride = mipmapping ? i - 3 : i + 1;
133 for (auto& pool : pools)
135 auto width = pool.first >> 16;
136 auto height = pool.first & 0x0000FFFF;
137 auto size = width * height * stride;
138 auto count = pool.second.size();
143 boost::property_tree::wptree pool_info;
145 pool_info.add(L"stride", stride);
146 pool_info.add(L"mipmapping", mipmapping);
147 pool_info.add(L"width", width);
148 pool_info.add(L"height", height);
149 pool_info.add(L"size", size);
150 pool_info.add(L"count", count);
152 total_pooled_device_buffer_size += size * count;
153 total_pooled_device_buffer_count += count;
155 pooled_device_buffers.add_child(L"device_buffer_pool", pool_info);
159 info.add_child(L"gl.details.pooled_device_buffers", pooled_device_buffers);
161 boost::property_tree::wptree pooled_host_buffers;
162 size_t total_read_size = 0;
163 size_t total_write_size = 0;
164 size_t total_read_count = 0;
165 size_t total_write_count = 0;
167 for (size_t i = 0; i < host_pools_.size(); ++i)
169 auto& pools = host_pools_.at(i);
170 auto usage = static_cast<buffer::usage>(i);
172 for (auto& pool : pools)
174 auto size = pool.first;
175 auto count = pool.second.size();
180 boost::property_tree::wptree pool_info;
182 pool_info.add(L"usage", usage == buffer::usage::read_only ? L"read_only" : L"write_only");
183 pool_info.add(L"size", size);
184 pool_info.add(L"count", count);
186 pooled_host_buffers.add_child(L"host_buffer_pool", pool_info);
188 (usage == buffer::usage::read_only ? total_read_count : total_write_count) += count;
189 (usage == buffer::usage::read_only ? total_read_size : total_write_size) += size * count;
193 info.add_child(L"gl.details.pooled_host_buffers", pooled_host_buffers);
194 info.add(L"gl.summary.pooled_device_buffers.total_count", total_pooled_device_buffer_count);
195 info.add(L"gl.summary.pooled_device_buffers.total_size", total_pooled_device_buffer_size);
196 info.add_child(L"gl.summary.all_device_buffers", texture::info());
197 info.add(L"gl.summary.pooled_host_buffers.total_read_count", total_read_count);
198 info.add(L"gl.summary.pooled_host_buffers.total_write_count", total_write_count);
199 info.add(L"gl.summary.pooled_host_buffers.total_read_size", total_read_size);
200 info.add(L"gl.summary.pooled_host_buffers.total_write_size", total_write_size);
201 info.add_child(L"gl.summary.all_host_buffers", buffer::info());
206 std::wstring version()
210 return executor_.invoke([]
212 return u16(reinterpret_cast<const char*>(GL2(glGetString(GL_VERSION)))) + L" " + u16(reinterpret_cast<const char*>(GL2(glGetString(GL_VENDOR))));
217 return L"Not found";;
221 spl::shared_ptr<texture> create_texture(int width, int height, int stride, bool mipmapped, bool clear)
223 CASPAR_VERIFY(stride > 0 && stride < 5);
224 CASPAR_VERIFY(width > 0 && height > 0);
226 if(!executor_.is_current())
227 CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info("Operation only valid in an OpenGL Context."));
229 auto pool = &device_pools_[stride - 1 + (mipmapped ? 4 : 0)][((width << 16) & 0xFFFF0000) | (height & 0x0000FFFF)];
231 std::shared_ptr<texture> tex;
232 if(!pool->try_pop(tex))
233 tex = spl::make_shared<texture>(width, height, stride, mipmapped);
238 return spl::shared_ptr<texture>(tex.get(), [tex, pool](texture*) mutable
244 spl::shared_ptr<buffer> create_buffer(std::size_t size, buffer::usage usage)
246 CASPAR_VERIFY(size > 0);
248 auto pool = &host_pools_[static_cast<int>(usage)][size];
250 std::shared_ptr<buffer> buf;
251 if(!pool->try_pop(buf))
255 auto context = executor_.is_current() ? std::string() : get_context();
257 buf = executor_.invoke([&]
259 CASPAR_SCOPED_CONTEXT_MSG(context);
260 return std::make_shared<buffer>(size, usage);
261 }, task_priority::high_priority);
263 if(timer.elapsed() > 0.02)
264 CASPAR_LOG(warning) << L"[ogl-device] Performance warning. Buffer allocation blocked: " << timer.elapsed();
267 std::weak_ptr<impl> self = shared_from_this(); // buffers can leave the device context, take a hold on life-time.
268 return spl::shared_ptr<buffer>(buf.get(), [=](buffer*) mutable
270 auto strong = self.lock();
274 auto context = executor_.is_current() ? std::string() : get_context();
276 strong->executor_.invoke([&]
278 CASPAR_SCOPED_CONTEXT_MSG(context);
279 strong->texture_cache_.erase(buf.get());
280 }, task_priority::high_priority);
286 CASPAR_LOG(info) << L"Buffer outlived ogl device";
291 array<std::uint8_t> create_array(std::size_t size)
293 auto buf = create_buffer(size, buffer::usage::write_only);
294 return array<std::uint8_t>(buf->data(), buf->size(), false, buf);
298 std::shared_ptr<buffer> copy_to_buf(const T& source)
300 std::shared_ptr<buffer> buf;
302 auto tmp = source.template storage<spl::shared_ptr<buffer>>();
307 buf = create_buffer(source.size(), buffer::usage::write_only);
308 tbb::parallel_for(tbb::blocked_range<std::size_t>(0, source.size()), [&](const tbb::blocked_range<std::size_t>& r)
310 std::memcpy(buf->data() + r.begin(), source.data() + r.begin(), r.size());
317 // TODO: Since the returned texture is cached it SHOULD NOT be modified.
318 std::future<std::shared_ptr<texture>> copy_async(const array<const std::uint8_t>& source, int width, int height, int stride, bool mipmapped)
320 std::shared_ptr<buffer> buf = copy_to_buf(source);
321 auto context = executor_.is_current() ? std::string() : get_context();
323 return executor_.begin_invoke([=]() -> std::shared_ptr<texture>
325 CASPAR_SCOPED_CONTEXT_MSG(context);
326 tbb::concurrent_hash_map<buffer*, std::shared_ptr<texture>>::const_accessor a;
327 if(texture_cache_.find(a, buf.get()))
328 return spl::make_shared_ptr(a->second);
330 auto texture = create_texture(width, height, stride, mipmapped, false);
331 texture->copy_from(*buf);
333 texture_cache_.insert(std::make_pair(buf.get(), texture));
336 }, task_priority::high_priority);
339 std::future<std::shared_ptr<texture>> copy_async(const array<std::uint8_t>& source, int width, int height, int stride, bool mipmapped)
341 std::shared_ptr<buffer> buf = copy_to_buf(source);
342 auto context = executor_.is_current() ? std::string() : get_context();
344 return executor_.begin_invoke([=]() -> std::shared_ptr<texture>
346 CASPAR_SCOPED_CONTEXT_MSG(context);
347 auto texture = create_texture(width, height, stride, mipmapped, false);
348 texture->copy_from(*buf);
351 }, task_priority::high_priority);
354 std::future<array<const std::uint8_t>> copy_async(const spl::shared_ptr<texture>& source)
356 if(!executor_.is_current())
357 CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info("Operation only valid in an OpenGL Context."));
359 auto buffer = create_buffer(source->size(), buffer::usage::read_only);
360 source->copy_to(*buffer);
362 auto self = shared_from_this();
363 auto context = get_context();
364 auto cmd = [self, buffer, context]() mutable -> array<const std::uint8_t>
366 self->executor_.invoke([&buffer, &context] // Defer blocking "map" call until data is needed.
368 CASPAR_LOG_CALL(trace) << "Readback <- " << context;
371 return array<const std::uint8_t>(buffer->data(), buffer->size(), true, buffer);
373 return std::async(std::launch::deferred, std::move(cmd));
376 std::future<void> gc()
378 return executor_.begin_invoke([=]
380 CASPAR_LOG(info) << " ogl: Running GC.";
384 for (auto& pools : device_pools_)
386 for (auto& pool : pools)
389 for (auto& pools : host_pools_)
391 for (auto& pool : pools)
397 CASPAR_LOG_CURRENT_EXCEPTION();
399 }, task_priority::high_priority);
404 : executor_(L"OpenGL Rendering Context")
405 , impl_(new impl(executor_)){}
407 spl::shared_ptr<texture> device::create_texture(int width, int height, int stride, bool mipmapped){ return impl_->create_texture(width, height, stride, mipmapped, true); }
408 array<std::uint8_t> device::create_array(int size){return impl_->create_array(size);}
409 std::future<std::shared_ptr<texture>> device::copy_async(const array<const std::uint8_t>& source, int width, int height, int stride, bool mipmapped){return impl_->copy_async(source, width, height, stride, mipmapped);}
410 std::future<std::shared_ptr<texture>> device::copy_async(const array<std::uint8_t>& source, int width, int height, int stride, bool mipmapped){ return impl_->copy_async(source, width, height, stride, mipmapped); }
411 std::future<array<const std::uint8_t>> device::copy_async(const spl::shared_ptr<texture>& source){return impl_->copy_async(source);}
412 std::future<void> device::gc() { return impl_->gc(); }
413 boost::property_tree::wptree device::info() const { return impl_->info(); }
414 std::wstring device::version() const{return impl_->version();}