]> git.sesse.net Git - casparcg/blob - accelerator/ogl/util/device.cpp
1e41ceb7a0eb637fd0571b9a73fe16d66d86f97b
[casparcg] / accelerator / ogl / util / device.cpp
1 /*
2 * Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
3 *
4 * This file is part of CasparCG (www.casparcg.com).
5 *
6 * CasparCG is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 3 of the License, or
9 * (at your option) any later version.
10 *
11 * CasparCG is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * Author: Robert Nagy, ronag89@gmail.com
20 */
21
22 // TODO: Smart GC
23
24 #include "../../StdAfx.h"
25
26 #include "device.h"
27
28 #include "buffer.h"
29 #include "texture.h"
30 #include "shader.h"
31
32 #include <common/assert.h>
33 #include <common/except.h>
34 #include <common/future.h>
35 #include <common/array.h>
36 #include <common/memory.h>
37 #include <common/gl/gl_check.h>
38 #include <common/timer.h>
39
40 #include <GL/glew.h>
41
42 #include <SFML/Window/Context.hpp>
43
44 #include <tbb/concurrent_unordered_map.h>
45 #include <tbb/concurrent_hash_map.h>
46 #include <tbb/concurrent_queue.h>
47
48 #include <boost/utility/declval.hpp>
49 #include <boost/property_tree/ptree.hpp>
50
51 #include <array>
52 #include <unordered_map>
53
54 #include <asmlib.h>
55 #include <tbb/parallel_for.h>
56
57 namespace caspar { namespace accelerator { namespace ogl {
58                 
59 struct device::impl : public std::enable_shared_from_this<impl>
60 {       
61         static_assert(std::is_same<decltype(boost::declval<device>().impl_), spl::shared_ptr<impl>>::value, "impl_ must be shared_ptr");
62
63         tbb::concurrent_hash_map<buffer*, std::shared_ptr<texture>> texture_cache_;
64
65         std::unique_ptr<sf::Context> device_;
66         
67         std::array<tbb::concurrent_unordered_map<std::size_t, tbb::concurrent_bounded_queue<std::shared_ptr<texture>>>, 8>      device_pools_;
68         std::array<tbb::concurrent_unordered_map<std::size_t, tbb::concurrent_bounded_queue<std::shared_ptr<buffer>>>, 2>       host_pools_;
69         
70         GLuint fbo_;
71
72         executor& executor_;
73                                 
74         impl(executor& executor) 
75                 : executor_(executor)
76         {
77                 executor_.set_capacity(256);
78
79                 CASPAR_LOG(info) << L"Initializing OpenGL Device.";
80                 
81                 executor_.invoke([=]
82                 {
83                         device_.reset(new sf::Context());
84                         device_->setActive(true);               
85                                                 
86                         if (glewInit() != GLEW_OK)
87                                 CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to initialize GLEW."));
88                 
89                         if(!GLEW_VERSION_3_0)
90                                 CASPAR_THROW_EXCEPTION(not_supported() << msg_info("Your graphics card does not meet the minimum hardware requirements since it does not support OpenGL 3.0 or higher."));
91         
92                         glGenFramebuffers(1, &fbo_);                            
93                         glBindFramebuffer(GL_FRAMEBUFFER, fbo_);
94                 });
95                                 
96                 CASPAR_LOG(info) << L"Successfully initialized OpenGL " << version();
97         }
98
99         ~impl()
100         {
101                 executor_.invoke([=]
102                 {
103                         texture_cache_.clear();
104
105                         for (auto& pool : host_pools_)
106                                 pool.clear();
107
108                         for (auto& pool : device_pools_)
109                                 pool.clear();
110
111                         glDeleteFramebuffers(1, &fbo_);
112
113                         device_.reset();
114                 });
115         }
116
117         boost::property_tree::wptree info() const
118         {
119                 boost::property_tree::wptree info;
120
121                 boost::property_tree::wptree pooled_device_buffers;
122                 size_t total_pooled_device_buffer_size  = 0;
123                 size_t total_pooled_device_buffer_count = 0;
124
125                 for (size_t i = 0; i < device_pools_.size(); ++i)
126                 {
127                         auto& pools             = device_pools_.at(i);
128                         bool mipmapping = i > 3;
129                         auto stride             = mipmapping ? i - 3 : i + 1;
130
131                         for (auto& pool : pools)
132                         {
133                                 auto width      = pool.first >> 16;
134                                 auto height     = pool.first & 0x0000FFFF;
135                                 auto size       = width * height * stride;
136                                 auto count      = pool.second.size();
137
138                                 if (count == 0)
139                                         continue;
140
141                                 boost::property_tree::wptree pool_info;
142
143                                 pool_info.add(L"stride",                stride);
144                                 pool_info.add(L"mipmapping",    mipmapping);
145                                 pool_info.add(L"width",                 width);
146                                 pool_info.add(L"height",                height);
147                                 pool_info.add(L"size",                  size);
148                                 pool_info.add(L"count",                 count);
149
150                                 total_pooled_device_buffer_size         += size * count;
151                                 total_pooled_device_buffer_count        += count;
152
153                                 pooled_device_buffers.add_child(L"device_buffer_pool", pool_info);
154                         }
155                 }
156
157                 info.add_child(L"gl.details.pooled_device_buffers", pooled_device_buffers);
158
159                 boost::property_tree::wptree pooled_host_buffers;
160                 size_t total_read_size          = 0;
161                 size_t total_write_size         = 0;
162                 size_t total_read_count         = 0;
163                 size_t total_write_count        = 0;
164
165                 for (size_t i = 0; i < host_pools_.size(); ++i)
166                 {
167                         auto& pools     = host_pools_.at(i);
168                         auto usage      = static_cast<buffer::usage>(i);
169
170                         for (auto& pool : pools)
171                         {
172                                 auto size       = pool.first;
173                                 auto count      = pool.second.size();
174
175                                 if (count == 0)
176                                         continue;
177
178                                 boost::property_tree::wptree pool_info;
179
180                                 pool_info.add(L"usage", usage == buffer::usage::read_only ? L"read_only" : L"write_only");
181                                 pool_info.add(L"size",  size);
182                                 pool_info.add(L"count", count);
183
184                                 pooled_host_buffers.add_child(L"host_buffer_pool", pool_info);
185
186                                 (usage == buffer::usage::read_only ? total_read_count : total_write_count) += count;
187                                 (usage == buffer::usage::read_only ? total_read_size : total_write_size) += size * count;
188                         }
189                 }
190
191                 info.add_child(L"gl.details.pooled_host_buffers",                               pooled_host_buffers);
192                 info.add(L"gl.summary.pooled_device_buffers.total_count",               total_pooled_device_buffer_count);
193                 info.add(L"gl.summary.pooled_device_buffers.total_size",                total_pooled_device_buffer_size);
194                 info.add_child(L"gl.summary.all_device_buffers",                                texture::info());
195                 info.add(L"gl.summary.pooled_host_buffers.total_read_count",    total_read_count);
196                 info.add(L"gl.summary.pooled_host_buffers.total_write_count",   total_write_count);
197                 info.add(L"gl.summary.pooled_host_buffers.total_read_size",             total_read_size);
198                 info.add(L"gl.summary.pooled_host_buffers.total_write_size",    total_write_size);
199                 info.add_child(L"gl.summary.all_host_buffers",                                  buffer::info());
200
201                 return info;
202         }
203                 
204         std::wstring version()
205         {       
206                 try
207                 {
208                         return executor_.invoke([]
209                         {
210                                 return u16(reinterpret_cast<const char*>(GL2(glGetString(GL_VERSION)))) + L" " + u16(reinterpret_cast<const char*>(GL2(glGetString(GL_VENDOR))));
211                         });     
212                 }
213                 catch(...)
214                 {
215                         return L"Not found";;
216                 }
217         }
218                                                         
219         spl::shared_ptr<texture> create_texture(int width, int height, int stride, bool mipmapped, bool clear)
220         {
221                 CASPAR_VERIFY(stride > 0 && stride < 5);
222                 CASPAR_VERIFY(width > 0 && height > 0);
223
224                 if(!executor_.is_current())
225                         CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info("Operation only valid in an OpenGL Context."));
226                                         
227                 auto pool = &device_pools_[stride - 1 + (mipmapped ? 4 : 0)][((width << 16) & 0xFFFF0000) | (height & 0x0000FFFF)];
228                 
229                 std::shared_ptr<texture> tex;
230                 if(!pool->try_pop(tex))         
231                         tex = spl::make_shared<texture>(width, height, stride, mipmapped);
232         
233                 if(clear)
234                         tex->clear();
235
236                 return spl::shared_ptr<texture>(tex.get(), [tex, pool](texture*) mutable
237                 {               
238                         pool->push(tex);        
239                 });
240         }
241                 
242         spl::shared_ptr<buffer> create_buffer(std::size_t size, buffer::usage usage)
243         {
244                 CASPAR_VERIFY(size > 0);
245                 
246                 auto pool = &host_pools_[static_cast<int>(usage)][size];
247                 
248                 std::shared_ptr<buffer> buf;
249                 if(!pool->try_pop(buf)) 
250                 {
251                         caspar::timer timer;
252
253                         buf = executor_.invoke([&]
254                         {
255                                 return std::make_shared<buffer>(size, usage);
256                         }, task_priority::high_priority);
257                         
258                         if(timer.elapsed() > 0.02)
259                                 CASPAR_LOG(warning) << L"[ogl-device] Performance warning. Buffer allocation blocked: " << timer.elapsed();
260                 }
261                 
262                 std::weak_ptr<impl> self = shared_from_this(); // buffers can leave the device context, take a hold on life-time.
263                 return spl::shared_ptr<buffer>(buf.get(), [=](buffer*) mutable
264                 {
265                         auto strong = self.lock();
266
267                         if (strong)
268                         {
269                                 strong->texture_cache_.erase(buf.get());
270                                 pool->push(buf);
271                         }
272                         else
273                         {
274                                 CASPAR_LOG(info) << L"Buffer outlived ogl device";
275                         }
276                 });
277         }
278
279         array<std::uint8_t> create_array(std::size_t size)
280         {               
281                 auto buf = create_buffer(size, buffer::usage::write_only);
282                 return array<std::uint8_t>(buf->data(), buf->size(), false, buf);
283         }
284
285         template<typename T>
286         std::shared_ptr<buffer> copy_to_buf(const T& source)
287         {
288                 std::shared_ptr<buffer> buf;
289
290                 auto tmp = source.template storage<spl::shared_ptr<buffer>>();
291                 if(tmp)
292                         buf = *tmp;
293                 else
294                 {                       
295                         buf = create_buffer(source.size(), buffer::usage::write_only);
296                         tbb::parallel_for(tbb::blocked_range<std::size_t>(0, source.size()), [&](const tbb::blocked_range<std::size_t>& r)
297                         {
298                                 A_memcpy(buf->data() + r.begin(), source.data() + r.begin(), r.size());
299                         });
300                 }
301
302                 return buf;
303         }
304
305         // TODO: Since the returned texture is cached it SHOULD NOT be modified.
306         std::future<std::shared_ptr<texture>> copy_async(const array<const std::uint8_t>& source, int width, int height, int stride, bool mipmapped)
307         {
308                 std::shared_ptr<buffer> buf = copy_to_buf(source);
309                                 
310                 return executor_.begin_invoke([=]() -> std::shared_ptr<texture>
311                 {
312                         tbb::concurrent_hash_map<buffer*, std::shared_ptr<texture>>::const_accessor a;
313                         if(texture_cache_.find(a, buf.get()))
314                                 return spl::make_shared_ptr(a->second);
315
316                         auto texture = create_texture(width, height, stride, mipmapped, false);
317                         texture->copy_from(*buf);
318
319                         texture_cache_.insert(std::make_pair(buf.get(), texture));
320                         
321                         return texture;
322                 }, task_priority::high_priority);
323         }
324         
325         std::future<std::shared_ptr<texture>> copy_async(const array<std::uint8_t>& source, int width, int height, int stride, bool mipmapped)
326         {
327                 std::shared_ptr<buffer> buf = copy_to_buf(source);
328
329                 return executor_.begin_invoke([=]() -> std::shared_ptr<texture>
330                 {
331                         auto texture = create_texture(width, height, stride, mipmapped, false);
332                         texture->copy_from(*buf);       
333                         
334                         return texture;
335                 }, task_priority::high_priority);
336         }
337
338         std::future<array<const std::uint8_t>> copy_async(const spl::shared_ptr<texture>& source)
339         {
340                 if(!executor_.is_current())
341                         CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info("Operation only valid in an OpenGL Context."));
342
343                 auto buffer = create_buffer(source->size(), buffer::usage::read_only); 
344                 source->copy_to(*buffer);       
345
346                 auto self = shared_from_this();
347                 auto cmd = [self, buffer]() mutable -> array<const std::uint8_t>
348                 {
349                         self->executor_.invoke(std::bind(&buffer::map, std::ref(buffer))); // Defer blocking "map" call until data is needed.
350                         return array<const std::uint8_t>(buffer->data(), buffer->size(), true, buffer);
351                 };
352                 return std::async(std::launch::deferred, std::move(cmd));
353         }
354
355         std::future<void> gc()
356         {
357                 return executor_.begin_invoke([=]
358                 {
359                         CASPAR_LOG(info) << " ogl: Running GC.";
360
361                         try
362                         {
363                                 for (auto& pools : device_pools_)
364                                 {
365                                         for (auto& pool : pools)
366                                                 pool.second.clear();
367                                 }
368                                 for (auto& pools : host_pools_)
369                                 {
370                                         for (auto& pool : pools)
371                                                 pool.second.clear();
372                                 }
373                         }
374                         catch (...)
375                         {
376                                 CASPAR_LOG_CURRENT_EXCEPTION();
377                         }
378                 }, task_priority::high_priority);
379         }
380 };
381
382 device::device() 
383         : executor_(L"OpenGL Rendering Context")
384         , impl_(new impl(executor_)){}
385 device::~device(){}
386 spl::shared_ptr<texture>                                        device::create_texture(int width, int height, int stride, bool mipmapped){ return impl_->create_texture(width, height, stride, mipmapped, true); }
387 array<std::uint8_t>                                                     device::create_array(int size){return impl_->create_array(size);}
388 std::future<std::shared_ptr<texture>>           device::copy_async(const array<const std::uint8_t>& source, int width, int height, int stride, bool mipmapped){return impl_->copy_async(source, width, height, stride, mipmapped);}
389 std::future<std::shared_ptr<texture>>           device::copy_async(const array<std::uint8_t>& source, int width, int height, int stride, bool mipmapped){ return impl_->copy_async(source, width, height, stride, mipmapped); }
390 std::future<array<const std::uint8_t>>          device::copy_async(const spl::shared_ptr<texture>& source){return impl_->copy_async(source);}
391 std::future<void>                                                       device::gc() { return impl_->gc(); }
392 boost::property_tree::wptree                            device::info() const { return impl_->info(); }
393 std::wstring                                                            device::version() const{return impl_->version();}
394
395
396 }}}
397
398