2 * Copyright 2013 Sveriges Television AB http://casparcg.com/
\r
4 * This file is part of CasparCG (www.casparcg.com).
\r
6 * CasparCG is free software: you can redistribute it and/or modify
\r
7 * it under the terms of the GNU General Public License as published by
\r
8 * the Free Software Foundation, either version 3 of the License, or
\r
9 * (at your option) any later version.
\r
11 * CasparCG is distributed in the hope that it will be useful,
\r
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
\r
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
\r
14 * GNU General Public License for more details.
\r
16 * You should have received a copy of the GNU General Public License
\r
17 * along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
\r
19 * Author: Robert Nagy, ronag89@gmail.com
\r
24 #include <unordered_map>
\r
25 #include <tbb/mutex.h>
\r
31 class page_locked_allocator
\r
34 typedef size_t size_type;
\r
35 typedef ptrdiff_t difference_type;
\r
37 typedef const T* const_pointer;
\r
38 typedef T& reference;
\r
39 typedef const T& const_reference;
\r
40 typedef T value_type;
\r
42 page_locked_allocator() {}
\r
43 page_locked_allocator(const page_locked_allocator&) {}
\r
45 pointer allocate(size_type n, const void * = 0)
\r
47 tbb::mutex::scoped_lock lock(get().mutex);
\r
49 size_type size = n * sizeof(T);
\r
50 if(get().free < size)
\r
51 allocate_store(size);
\r
53 auto p = ::VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
\r
55 throw std::bad_alloc();
\r
57 if(::VirtualLock(p, size) == 0)
\r
59 ::VirtualFree(p, 0, MEM_RELEASE);
\r
60 throw std::bad_alloc();
\r
64 get().map[p] = size;
\r
65 return reinterpret_cast<T*>(p);
\r
68 void deallocate(void* p, size_type)
\r
70 tbb::mutex::scoped_lock lock(get().mutex);
\r
72 if(!p || get().map.find(p) == get().map.end())
\r
77 ::VirtualFree(p, 0, MEM_RELEASE);
\r
78 get().free += get().map[p];
\r
84 pointer address(reference x) const { return &x; }
\r
85 const_pointer address(const_reference x) const { return &x; }
\r
86 page_locked_allocator<T>& operator=(const page_locked_allocator&) { return *this; }
\r
87 bool operator!=(const page_locked_allocator&) const { return false; }
\r
88 bool operator==(const page_locked_allocator&) const { return true; }
\r
89 void construct(pointer p, const T& val) { new ((T*) p) T(val); }
\r
90 void destroy(pointer p) { p->~T(); }
\r
92 size_type max_size() const { return size_t(-1); }
\r
95 struct rebind { typedef page_locked_allocator<U> other; };
\r
98 page_locked_allocator(const page_locked_allocator<U>&) {}
\r
101 page_locked_allocator& operator=(const page_locked_allocator<U>&) { return *this; }
\r
105 void allocate_store(size_type size)
\r
107 SIZE_T workingSetMinSize = 0, workingSetMaxSize = 0;
\r
108 if(::GetProcessWorkingSetSize(::GetCurrentProcess(), &workingSetMinSize, &workingSetMaxSize))
\r
110 workingSetMinSize += size;
\r
111 workingSetMaxSize += size;
\r
113 if(!::SetProcessWorkingSetSize(::GetCurrentProcess(), workingSetMinSize, workingSetMaxSize))
\r
114 throw std::bad_alloc();
\r
116 get().free += size;
\r
123 std::unordered_map<void*, size_type> map;
\r