2 * copyright (c) 2010 Sveriges Television AB <info@casparcg.com>
\r
4 * This file is part of CasparCG.
\r
6 * CasparCG is free software: you can redistribute it and/or modify
\r
7 * it under the terms of the GNU General Public License as published by
\r
8 * the Free Software Foundation, either version 3 of the License, or
\r
9 * (at your option) any later version.
\r
11 * CasparCG is distributed in the hope that it will be useful,
\r
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
\r
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
\r
14 * GNU General Public License for more details.
\r
16 * You should have received a copy of the GNU General Public License
\r
17 * along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
\r
22 #include <unordered_map>
\r
23 #include <tbb/mutex.h>
\r
29 class page_locked_allocator
\r
32 typedef size_t size_type;
\r
33 typedef ptrdiff_t difference_type;
\r
35 typedef const T* const_pointer;
\r
36 typedef T& reference;
\r
37 typedef const T& const_reference;
\r
38 typedef T value_type;
\r
40 page_locked_allocator() {}
\r
41 page_locked_allocator(const page_locked_allocator&) {}
\r
43 pointer allocate(size_type n, const void * = 0)
\r
45 tbb::mutex::scoped_lock lock(get().mutex);
\r
47 size_type size = n * sizeof(T);
\r
48 if(get().free < size)
\r
49 allocate_store(size);
\r
51 auto p = ::VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
\r
53 throw std::bad_alloc();
\r
55 if(::VirtualLock(p, size) == 0)
\r
57 ::VirtualFree(p, 0, MEM_RELEASE);
\r
58 throw std::bad_alloc();
\r
62 get().map[p] = size;
\r
63 return reinterpret_cast<T*>(p);
\r
66 void deallocate(void* p, size_type)
\r
68 tbb::mutex::scoped_lock lock(get().mutex);
\r
70 if(!p || get().map.find(p) == get().map.end())
\r
75 ::VirtualFree(p, 0, MEM_RELEASE);
\r
76 get().free += get().map[p];
\r
82 pointer address(reference x) const { return &x; }
\r
83 const_pointer address(const_reference x) const { return &x; }
\r
84 page_locked_allocator<T>& operator=(const page_locked_allocator&) { return *this; }
\r
85 void construct(pointer p, const T& val) { new ((T*) p) T(val); }
\r
86 void destroy(pointer p) { p->~T(); }
\r
88 size_type max_size() const { return size_t(-1); }
\r
91 struct rebind { typedef page_locked_allocator<U> other; };
\r
94 page_locked_allocator(const page_locked_allocator<U>&) {}
\r
97 page_locked_allocator& operator=(const page_locked_allocator<U>&) { return *this; }
\r
101 void allocate_store(size_type size)
\r
103 SIZE_T workingSetMinSize = 0, workingSetMaxSize = 0;
\r
104 if(::GetProcessWorkingSetSize(::GetCurrentProcess(), &workingSetMinSize, &workingSetMaxSize))
\r
106 workingSetMinSize += size;
\r
107 workingSetMaxSize += size;
\r
109 if(!::SetProcessWorkingSetSize(::GetCurrentProcess(), workingSetMinSize, workingSetMaxSize))
\r
110 throw std::bad_alloc();
\r
112 get().free += size;
\r
119 std::unordered_map<void*, size_type> map;
\r