1 #ifndef __TOOLS_LINUX_SLAB_H
2 #define __TOOLS_LINUX_SLAB_H
8 #include <linux/kernel.h>
9 #include <linux/log2.h>
10 #include <linux/page.h>
11 #include <linux/shrinker.h>
12 #include <linux/types.h>
14 #define ARCH_KMALLOC_MINALIGN 16
15 #define KMALLOC_MAX_SIZE SIZE_MAX
17 static inline void *kmalloc(size_t size, gfp_t flags)
24 size_t alignment = min(rounddown_pow_of_two(size), (size_t)PAGE_SIZE);
25 alignment = max(sizeof(void *), alignment);
26 if (posix_memalign(&p, alignment, size))
31 if (p && (flags & __GFP_ZERO))
37 static inline void *krealloc(void *old, size_t size, gfp_t flags)
47 if (flags & __GFP_ZERO)
51 min(malloc_usable_size(old),
52 malloc_usable_size(new)));
58 #define kzalloc(size, flags) kmalloc(size, flags|__GFP_ZERO)
59 #define kmalloc_array(n, size, flags) \
60 ((size) != 0 && (n) > SIZE_MAX / (size) \
61 ? NULL : kmalloc(n * size, flags))
63 #define kcalloc(n, size, flags) kmalloc_array(n, size, flags|__GFP_ZERO)
65 #define kfree(p) free(p)
66 #define kzfree(p) free(p)
68 #define kvmalloc(size, flags) kmalloc(size, flags)
69 #define kvfree(p) kfree(p)
71 static inline struct page *alloc_pages(gfp_t flags, unsigned int order)
73 size_t size = PAGE_SIZE << order;
78 p = aligned_alloc(PAGE_SIZE, size);
79 if (p && (flags & __GFP_ZERO))
85 #define alloc_page(gfp) alloc_pages(gfp, 0)
87 #define __get_free_pages(gfp, order) ((unsigned long) alloc_pages(gfp, order))
88 #define __get_free_page(gfp) __get_free_pages(gfp, 0)
90 #define __free_pages(page, order) \
96 #define free_pages(addr, order) \
99 free((void *) (addr)); \
102 #define __free_page(page) __free_pages((page), 0)
103 #define free_page(addr) free_pages((addr), 0)
105 #define VM_IOREMAP 0x00000001 /* ioremap() and friends */
106 #define VM_ALLOC 0x00000002 /* vmalloc() */
107 #define VM_MAP 0x00000004 /* vmap()ed pages */
108 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
109 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
110 #define VM_NO_GUARD 0x00000040 /* don't add guard page */
111 #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
113 static inline void vunmap(const void *addr) {}
115 static inline void *vmap(struct page **pages, unsigned int count,
116 unsigned long flags, unsigned prot)
121 #define is_vmalloc_addr(page) 0
123 #define vmalloc_to_page(addr) ((struct page *) (addr))
125 static inline void *kmemdup(const void *src, size_t len, gfp_t gfp)
129 p = kmalloc(len, gfp);
135 #endif /* __TOOLS_LINUX_SLAB_H */