1 /**********************************************************************************************
3 * rmem - raylib memory pool and objects pool
5 * A quick, efficient, and minimal free list and arena-based allocator
8 * - A quicker, efficient memory allocator alternative to 'malloc' and friends.
9 * - Reduce the possibilities of memory leaks for beginner developers using Raylib.
10 * - Being able to flexibly range check memory if necessary.
14 * #define RMEM_IMPLEMENTATION
15 * Generates the implementation of the library into the included file.
16 * If not defined, the library is in header only mode and can be included in other headers
17 * or source files without problems. But only ONE file should hold the implementation.
20 * LICENSE: zlib/libpng
22 * Copyright (c) 2019 Kevin 'Assyrianic' Yonan (@assyrianic) and reviewed by Ramon Santamaria (@raysan5)
24 * This software is provided "as-is", without any express or implied warranty. In no event
25 * will the authors be held liable for any damages arising from the use of this software.
27 * Permission is granted to anyone to use this software for any purpose, including commercial
28 * applications, and to alter it and redistribute it freely, subject to the following restrictions:
30 * 1. The origin of this software must not be misrepresented; you must not claim that you
31 * wrote the original software. If you use this software in a product, an acknowledgment
32 * in the product documentation would be appreciated but is not required.
34 * 2. Altered source versions must be plainly marked as such, and must not be misrepresented
35 * as being the original software.
37 * 3. This notice may not be removed or altered from any source distribution.
39 **********************************************************************************************/
47 //----------------------------------------------------------------------------------
49 //----------------------------------------------------------------------------------
50 #if defined(_WIN32) && defined(BUILD_LIBTYPE_SHARED)
51 #define RMEMAPI __declspec(dllexport) // We are building library as a Win32 shared library (.dll)
52 #elif defined(_WIN32) && defined(USE_LIBTYPE_SHARED)
53 #define RMEMAPI __declspec(dllimport) // We are using library as a Win32 shared library (.dll)
55 #define RMEMAPI // We are building or using library as a static library (or Linux shared library)
58 #define RMEM_VERSION "v1.3" // changelog at bottom of header.
60 //----------------------------------------------------------------------------------
61 // Types and Structures Definition
62 //----------------------------------------------------------------------------------
65 typedef struct MemNode MemNode;
71 // Freelist implementation
72 typedef struct AllocList {
78 typedef struct Arena {
85 MEMPOOL_BUCKET_SIZE = 8,
86 MEMPOOL_BUCKET_BITS = (sizeof(uintptr_t) >> 1) + 1,
87 MEM_SPLIT_THRESHOLD = sizeof(uintptr_t) * 4
90 typedef struct MemPool {
91 AllocList large, buckets[MEMPOOL_BUCKET_SIZE];
97 typedef struct ObjPool {
99 size_t objSize, freeBlocks, memSize;
103 // Double-Ended Stack aka Deque
104 typedef struct BiStack {
105 uintptr_t mem, front, back;
110 #if defined(__cplusplus)
111 extern "C" { // Prevents name mangling of functions
114 //------------------------------------------------------------------------------------
115 // Functions Declaration - Memory Pool
116 //------------------------------------------------------------------------------------
117 RMEMAPI MemPool CreateMemPool(size_t bytes);
118 RMEMAPI MemPool CreateMemPoolFromBuffer(void *buf, size_t bytes);
119 RMEMAPI void DestroyMemPool(MemPool *mempool);
121 RMEMAPI void *MemPoolAlloc(MemPool *mempool, size_t bytes);
122 RMEMAPI void *MemPoolRealloc(MemPool *mempool, void *ptr, size_t bytes);
123 RMEMAPI void MemPoolFree(MemPool *mempool, void *ptr);
124 RMEMAPI void MemPoolCleanUp(MemPool *mempool, void **ptrref);
125 RMEMAPI void MemPoolReset(MemPool *mempool);
126 RMEMAPI size_t GetMemPoolFreeMemory(const MemPool mempool);
128 //------------------------------------------------------------------------------------
129 // Functions Declaration - Object Pool
130 //------------------------------------------------------------------------------------
131 RMEMAPI ObjPool CreateObjPool(size_t objsize, size_t len);
132 RMEMAPI ObjPool CreateObjPoolFromBuffer(void *buf, size_t objsize, size_t len);
133 RMEMAPI void DestroyObjPool(ObjPool *objpool);
135 RMEMAPI void *ObjPoolAlloc(ObjPool *objpool);
136 RMEMAPI void ObjPoolFree(ObjPool *objpool, void *ptr);
137 RMEMAPI void ObjPoolCleanUp(ObjPool *objpool, void **ptrref);
139 //------------------------------------------------------------------------------------
140 // Functions Declaration - Double-Ended Stack
141 //------------------------------------------------------------------------------------
142 RMEMAPI BiStack CreateBiStack(size_t len);
143 RMEMAPI BiStack CreateBiStackFromBuffer(void *buf, size_t len);
144 RMEMAPI void DestroyBiStack(BiStack *destack);
146 RMEMAPI void *BiStackAllocFront(BiStack *destack, size_t len);
147 RMEMAPI void *BiStackAllocBack(BiStack *destack, size_t len);
149 RMEMAPI void BiStackResetFront(BiStack *destack);
150 RMEMAPI void BiStackResetBack(BiStack *destack);
151 RMEMAPI void BiStackResetAll(BiStack *destack);
153 RMEMAPI intptr_t BiStackMargins(BiStack destack);
161 /***********************************************************************************
163 * RMEM IMPLEMENTATION
165 ************************************************************************************/
167 #if defined(RMEM_IMPLEMENTATION)
169 #include <stdio.h> // Required for:
170 #include <stdlib.h> // Required for:
171 #include <string.h> // Required for:
173 //----------------------------------------------------------------------------------
174 // Defines and Macros
175 //----------------------------------------------------------------------------------
177 // Make sure restrict type qualifier for pointers is defined
178 // NOTE: Not supported by C++, it is a C only keyword
179 #if defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__) || defined(_MSC_VER)
181 #define restrict __restrict
185 //----------------------------------------------------------------------------------
186 // Global Variables Definition
187 //----------------------------------------------------------------------------------
190 //----------------------------------------------------------------------------------
191 // Module specific Functions Declaration
192 //----------------------------------------------------------------------------------
193 static inline size_t __AlignSize(const size_t size, const size_t align)
195 return (size + (align - 1)) & -align;
198 static MemNode *__SplitMemNode(MemNode *const node, const size_t bytes)
200 uintptr_t n = ( uintptr_t )node;
201 MemNode *const r = ( MemNode* )(n + (node->size - bytes));
207 static void __InsertMemNodeBefore(AllocList *const list, MemNode *const insert, MemNode *const curr)
210 if (curr->prev==NULL) list->head = insert;
213 insert->prev = curr->prev;
214 curr->prev->next = insert;
219 static void __ReplaceMemNode(MemNode *const old, MemNode *const replace)
221 replace->prev = old->prev;
222 replace->next = old->next;
223 if( old->prev != NULL )
224 old->prev->next = replace;
225 if( old->next != NULL )
226 old->next->prev = replace;
230 static MemNode *__RemoveMemNode(AllocList *const list, MemNode *const node)
232 if (node->prev != NULL) node->prev->next = node->next;
235 list->head = node->next;
236 if (list->head != NULL) list->head->prev = NULL;
237 else list->tail = NULL;
240 if (node->next != NULL) node->next->prev = node->prev;
243 list->tail = node->prev;
244 if (list->tail != NULL) list->tail->next = NULL;
245 else list->head = NULL;
251 static MemNode *__FindMemNode(AllocList *const list, const size_t bytes)
253 for (MemNode *node = list->head; node != NULL; node = node->next)
255 if (node->size < bytes) continue;
256 // close in size - reduce fragmentation by not splitting.
257 else if (node->size <= bytes + MEM_SPLIT_THRESHOLD) return __RemoveMemNode(list, node);
258 else return __SplitMemNode(node, bytes);
263 static void __InsertMemNode(MemPool *const mempool, AllocList *const list, MemNode *const node, const bool is_bucket)
265 if (list->head == NULL)
272 for (MemNode *iter = list->head; iter != NULL; iter = iter->next)
274 if (( uintptr_t )iter == mempool->arena.offs)
276 mempool->arena.offs += iter->size;
277 __RemoveMemNode(list, iter);
280 const uintptr_t inode = ( uintptr_t )node;
281 const uintptr_t iiter = ( uintptr_t )iter;
282 const uintptr_t iter_end = iiter + iter->size;
283 const uintptr_t node_end = inode + node->size;
284 if (iter==node) return;
285 else if (iter < node)
287 // node was coalesced prior.
288 if (iter_end > inode) return;
289 else if (iter_end==inode && !is_bucket)
291 // if we can coalesce, do so.
292 iter->size += node->size;
296 else if (iter > node)
298 // Address sort, lowest to highest aka ascending order.
299 if (iiter < node_end) return;
300 else if (iter==list->head && !is_bucket)
302 if (iter_end==inode) iter->size += node->size;
303 else if (node_end==iiter)
305 node->size += list->head->size;
306 node->next = list->head->next;
320 else if (iter_end==inode && !is_bucket)
322 // if we can coalesce, do so.
323 iter->size += node->size;
328 __InsertMemNodeBefore(list, iter, node);
337 //----------------------------------------------------------------------------------
338 // Module Functions Definition - Memory Pool
339 //----------------------------------------------------------------------------------
341 MemPool CreateMemPool(const size_t size)
343 MemPool mempool = { 0 };
345 if (size == 0) return mempool;
348 // Align the mempool size to at least the size of an alloc node.
349 uint8_t *const restrict buf = malloc(size*sizeof *buf);
350 if (buf==NULL) return mempool;
353 mempool.arena.size = size;
354 mempool.arena.mem = ( uintptr_t )buf;
355 mempool.arena.offs = mempool.arena.mem + mempool.arena.size;
361 MemPool CreateMemPoolFromBuffer(void *const restrict buf, const size_t size)
363 MemPool mempool = { 0 };
364 if ((size == 0) || (buf == NULL) || (size <= sizeof(MemNode))) return mempool;
367 mempool.arena.size = size;
368 mempool.arena.mem = ( uintptr_t )buf;
369 mempool.arena.offs = mempool.arena.mem + mempool.arena.size;
374 void DestroyMemPool(MemPool *const restrict mempool)
376 if (mempool->arena.mem == 0) return;
379 void *const restrict ptr = ( void* )mempool->arena.mem;
381 *mempool = (MemPool){ 0 };
385 void *MemPoolAlloc(MemPool *const mempool, const size_t size)
387 if ((size == 0) || (size > mempool->arena.size)) return NULL;
390 MemNode *new_mem = NULL;
391 const size_t ALLOC_SIZE = __AlignSize(size + sizeof *new_mem, sizeof(intptr_t));
392 const size_t BUCKET_SLOT = (ALLOC_SIZE >> MEMPOOL_BUCKET_BITS) - 1;
394 // If the size is small enough, let's check if our buckets has a fitting memory block.
395 if (BUCKET_SLOT < MEMPOOL_BUCKET_SIZE)
397 new_mem = __FindMemNode(&mempool->buckets[BUCKET_SLOT], ALLOC_SIZE);
399 else if (mempool->large.head != NULL)
401 new_mem = __FindMemNode(&mempool->large, ALLOC_SIZE);
406 // not enough memory to support the size!
407 if ((mempool->arena.offs - ALLOC_SIZE) < mempool->arena.mem) return NULL;
410 // Couldn't allocate from a freelist, allocate from available mempool.
411 // Subtract allocation size from the mempool.
412 mempool->arena.offs -= ALLOC_SIZE;
414 // Use the available mempool space as the new node.
415 new_mem = ( MemNode* )mempool->arena.offs;
416 new_mem->size = ALLOC_SIZE;
420 // Visual of the allocation block.
422 // | mem size | lowest addr of block
423 // | next node | 12 byte (32-bit) header
424 // | prev node | 24 byte (64-bit) header
428 // | space | highest addr of block
430 new_mem->next = new_mem->prev = NULL;
431 uint8_t *const restrict final_mem = ( uint8_t* )new_mem + sizeof *new_mem;
432 return memset(final_mem, 0, new_mem->size - sizeof *new_mem);
436 void *MemPoolRealloc(MemPool *const restrict mempool, void *const ptr, const size_t size)
438 if (size > mempool->arena.size) return NULL;
439 // NULL ptr should make this work like regular Allocation.
440 else if (ptr == NULL) return MemPoolAlloc(mempool, size);
441 else if ((uintptr_t)ptr - sizeof(MemNode) < mempool->arena.mem) return NULL;
444 MemNode *const node = ( MemNode* )(( uint8_t* )ptr - sizeof *node);
445 const size_t NODE_SIZE = sizeof *node;
446 uint8_t *const resized_block = MemPoolAlloc(mempool, size);
447 if (resized_block == NULL) return NULL;
450 MemNode *const resized = ( MemNode* )(resized_block - sizeof *resized);
451 memmove(resized_block, ptr, (node->size > resized->size)? (resized->size - NODE_SIZE) : (node->size - NODE_SIZE));
452 MemPoolFree(mempool, ptr);
453 return resized_block;
458 void MemPoolFree(MemPool *const restrict mempool, void *const ptr)
460 const uintptr_t p = ( uintptr_t )ptr;
461 if ((ptr == NULL) || (p - sizeof(MemNode) < mempool->arena.mem)) return;
464 // Behind the actual pointer data is the allocation info.
465 const uintptr_t block = p - sizeof(MemNode);
466 MemNode *const mem_node = ( MemNode* )block;
467 const size_t BUCKET_SLOT = (mem_node->size >> MEMPOOL_BUCKET_BITS) - 1;
469 // Make sure the pointer data is valid.
470 if ((block < mempool->arena.offs) ||
471 ((block - mempool->arena.mem) > mempool->arena.size) ||
472 (mem_node->size == 0) ||
473 (mem_node->size > mempool->arena.size)) return;
474 // If the mem_node is right at the arena offs, then merge it back to the arena.
475 else if (block == mempool->arena.offs)
477 mempool->arena.offs += mem_node->size;
481 // try to place it into bucket or large freelist.
482 struct AllocList *const l = (BUCKET_SLOT < MEMPOOL_BUCKET_SIZE) ? &mempool->buckets[BUCKET_SLOT] : &mempool->large;
483 __InsertMemNode(mempool, l, mem_node, (BUCKET_SLOT < MEMPOOL_BUCKET_SIZE));
488 void MemPoolCleanUp(MemPool *const restrict mempool, void **const ptrref)
490 if ((ptrref == NULL) || (*ptrref == NULL)) return;
493 MemPoolFree(mempool, *ptrref);
498 size_t GetMemPoolFreeMemory(const MemPool mempool)
500 size_t total_remaining = mempool.arena.offs - mempool.arena.mem;
502 for (MemNode *n=mempool.large.head; n != NULL; n = n->next) total_remaining += n->size;
504 for (size_t i=0; i<MEMPOOL_BUCKET_SIZE; i++) for (MemNode *n = mempool.buckets[i].head; n != NULL; n = n->next) total_remaining += n->size;
506 return total_remaining;
509 void MemPoolReset(MemPool *const mempool)
511 mempool->large.head = mempool->large.tail = NULL;
512 mempool->large.len = 0;
513 for (size_t i = 0; i < MEMPOOL_BUCKET_SIZE; i++)
515 mempool->buckets[i].head = mempool->buckets[i].tail = NULL;
516 mempool->buckets[i].len = 0;
518 mempool->arena.offs = mempool->arena.mem + mempool->arena.size;
521 //----------------------------------------------------------------------------------
522 // Module Functions Definition - Object Pool
523 //----------------------------------------------------------------------------------
525 ObjPool CreateObjPool(const size_t objsize, const size_t len)
527 ObjPool objpool = { 0 };
528 if ((len == 0) || (objsize == 0)) return objpool;
531 const size_t aligned_size = __AlignSize(objsize, sizeof(size_t));
532 uint8_t *const restrict buf = calloc(len, aligned_size);
533 if (buf == NULL) return objpool;
534 objpool.objSize = aligned_size;
535 objpool.memSize = objpool.freeBlocks = len;
536 objpool.mem = ( uintptr_t )buf;
538 for (size_t i=0; i<objpool.freeBlocks; i++)
540 size_t *const restrict index = ( size_t* )(objpool.mem + (i*aligned_size));
544 objpool.offs = objpool.mem;
549 ObjPool CreateObjPoolFromBuffer(void *const restrict buf, const size_t objsize, const size_t len)
551 ObjPool objpool = { 0 };
553 // If the object size isn't large enough to align to a size_t, then we can't use it.
554 const size_t aligned_size = __AlignSize(objsize, sizeof(size_t));
555 if ((buf == NULL) || (len == 0) || (objsize < sizeof(size_t)) || (objsize*len != aligned_size*len)) return objpool;
558 objpool.objSize = aligned_size;
559 objpool.memSize = objpool.freeBlocks = len;
560 objpool.mem = (uintptr_t)buf;
562 for (size_t i=0; i<objpool.freeBlocks; i++)
564 size_t *const restrict index = ( size_t* )(objpool.mem + (i*aligned_size));
568 objpool.offs = objpool.mem;
573 void DestroyObjPool(ObjPool *const restrict objpool)
575 if (objpool->mem == 0) return;
578 void *const restrict ptr = ( void* )objpool->mem;
580 *objpool = (ObjPool){0};
584 void *ObjPoolAlloc(ObjPool *const objpool)
586 if (objpool->freeBlocks > 0)
588 // For first allocation, head points to the very first index.
590 // ret = Head == ret = &pool[0];
591 size_t *const restrict block = ( size_t* )objpool->offs;
592 objpool->freeBlocks--;
594 // after allocating, we set head to the address of the index that *Head holds.
595 // Head = &pool[*Head * pool.objsize];
596 objpool->offs = (objpool->freeBlocks != 0)? objpool->mem + (*block*objpool->objSize) : 0;
597 return memset(block, 0, objpool->objSize);
602 void ObjPoolFree(ObjPool *const restrict objpool, void *const ptr)
604 uintptr_t block = (uintptr_t)ptr;
605 if ((ptr == NULL) || (block < objpool->mem) || (block > objpool->mem + objpool->memSize*objpool->objSize)) return;
608 // When we free our pointer, we recycle the pointer space to store the previous index and then we push it as our new head.
609 // *p = index of Head in relation to the buffer;
611 size_t *const restrict index = ( size_t* )block;
612 *index = (objpool->offs != 0)? (objpool->offs - objpool->mem)/objpool->objSize : objpool->memSize;
613 objpool->offs = block;
614 objpool->freeBlocks++;
618 void ObjPoolCleanUp(ObjPool *const restrict objpool, void **const restrict ptrref)
620 if (ptrref == NULL) return;
623 ObjPoolFree(objpool, *ptrref);
629 //----------------------------------------------------------------------------------
630 // Module Functions Definition - Double-Ended Stack
631 //----------------------------------------------------------------------------------
632 BiStack CreateBiStack(const size_t len)
634 BiStack destack = { 0 };
635 if (len == 0) return destack;
637 uint8_t *const buf = malloc(len*sizeof *buf);
638 if (buf==NULL) return destack;
640 destack.mem = ( uintptr_t )buf;
641 destack.front = destack.mem;
642 destack.back = destack.mem + len;
646 BiStack CreateBiStackFromBuffer(void *const buf, const size_t len)
648 BiStack destack = { 0 };
649 if (len == 0 || buf == NULL) return destack;
653 destack.mem = destack.front = ( uintptr_t )buf;
654 destack.back = destack.mem + len;
659 void DestroyBiStack(BiStack *const restrict destack)
661 if (destack->mem == 0) return;
664 uint8_t *const restrict buf = ( uint8_t* )destack->mem;
666 *destack = (BiStack){0};
670 void *BiStackAllocFront(BiStack *const restrict destack, const size_t len)
672 if (destack->mem == 0) return NULL;
675 const size_t ALIGNED_LEN = __AlignSize(len, sizeof(uintptr_t));
676 // front end arena is too high!
677 if (destack->front + ALIGNED_LEN >= destack->back) return NULL;
680 uint8_t *const restrict ptr = ( uint8_t* )destack->front;
681 destack->front += ALIGNED_LEN;
687 void *BiStackAllocBack(BiStack *const restrict destack, const size_t len)
689 if (destack->mem == 0) return NULL;
692 const size_t ALIGNED_LEN = __AlignSize(len, sizeof(uintptr_t));
693 // back end arena is too low
694 if (destack->back - ALIGNED_LEN <= destack->front) return NULL;
697 destack->back -= ALIGNED_LEN;
698 uint8_t *const restrict ptr = ( uint8_t* )destack->back;
704 void BiStackResetFront(BiStack *const destack)
706 if (destack->mem == 0) return;
707 else destack->front = destack->mem;
710 void BiStackResetBack(BiStack *const destack)
712 if (destack->mem == 0) return;
713 else destack->back = destack->mem + destack->size;
716 void BiStackResetAll(BiStack *const destack)
718 BiStackResetBack(destack);
719 BiStackResetFront(destack);
722 inline intptr_t BiStackMargins(const BiStack destack)
724 return destack.back - destack.front;
727 #endif // RMEM_IMPLEMENTATION
731 * v1.0: First Creation.
732 * v1.1: bug patches for the mempool and addition of object pool.
733 * v1.2: addition of bidirectional arena.
735 * optimizations of allocators.
736 * renamed 'Stack' to 'Arena'.
737 * replaced certain define constants with an anonymous enum.
738 * refactored MemPool to no longer require active or deferred defragging.