1 #ifndef __TOOLS_LINUX_SLAB_H
2 #define __TOOLS_LINUX_SLAB_H
8 #include <linux/kernel.h>
9 #include <linux/log2.h>
10 #include <linux/overflow.h>
11 #include <linux/page.h>
12 #include <linux/shrinker.h>
13 #include <linux/types.h>
18 #define alloc_hooks(_do, ...) _do
20 #define ARCH_KMALLOC_MINALIGN 16
21 #define KMALLOC_MAX_SIZE SIZE_MAX
23 static inline void *kmalloc_noprof(size_t size, gfp_t flags)
28 for (i = 0; i < 10; i++) {
30 size_t alignment = min_t(size_t, PAGE_SIZE,
31 rounddown_pow_of_two(size));
32 alignment = max(sizeof(void *), alignment);
33 if (posix_memalign(&p, alignment, size))
40 if (flags & __GFP_ZERO)
45 run_shrinkers(flags, true);
50 #define kmalloc kmalloc_noprof
52 static inline void *krealloc(void *old, size_t size, gfp_t flags)
56 new = kmalloc(size, flags);
60 if (flags & __GFP_ZERO)
65 min(malloc_usable_size(old),
66 malloc_usable_size(new)));
73 static inline void *krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags)
77 if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
80 return krealloc(p, bytes, flags);
83 #define kzalloc(size, flags) kmalloc(size, flags|__GFP_ZERO)
85 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
89 if (unlikely(check_mul_overflow(n, size, &bytes)))
91 return kmalloc(bytes, flags);
94 #define kvmalloc_array(n, size, flags) \
95 ((size) != 0 && (n) > SIZE_MAX / (size) \
96 ? NULL : kmalloc((n) * (size), flags))
98 #define kcalloc(n, size, flags) kmalloc_array(n, size, flags|__GFP_ZERO)
100 #define kfree(p) free(p)
101 #define kzfree(p) free(p)
103 #define kvmalloc(size, flags) kmalloc(size, flags)
104 #define kvzalloc(size, flags) kzalloc(size, flags)
105 #define kvfree(p) kfree(p)
107 static inline struct page *alloc_pages_noprof(gfp_t flags, unsigned int order)
109 size_t size = PAGE_SIZE << order;
113 for (i = 0; i < 10; i++) {
114 p = aligned_alloc(PAGE_SIZE, size);
117 if (flags & __GFP_ZERO)
122 run_shrinkers(flags, true);
127 #define alloc_pages alloc_pages_noprof
129 #define alloc_page(gfp) alloc_pages(gfp, 0)
131 #define _get_free_pages(gfp, order) ((unsigned long) alloc_pages(gfp, order))
132 #define __get_free_pages(gfp, order) ((unsigned long) alloc_pages(gfp, order))
133 #define get_free_pages_noprof(gfp, order) \
134 ((unsigned long) alloc_pages(gfp, order))
135 #define __get_free_page(gfp) __get_free_pages(gfp, 0)
137 #define __free_pages(page, order) \
143 #define free_pages(addr, order) \
146 free((void *) (addr)); \
149 #define __free_page(page) __free_pages((page), 0)
150 #define free_page(addr) free_pages((addr), 0)
152 #define VM_IOREMAP 0x00000001 /* ioremap() and friends */
153 #define VM_ALLOC 0x00000002 /* vmalloc() */
154 #define VM_MAP 0x00000004 /* vmap()ed pages */
155 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
156 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
157 #define VM_NO_GUARD 0x00000040 /* don't add guard page */
158 #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
160 static inline void vunmap(const void *addr) {}
162 static inline void *vmap(struct page **pages, unsigned int count,
163 unsigned long flags, unsigned prot)
168 #define is_vmalloc_addr(page) 0
170 #define vmalloc_to_page(addr) ((struct page *) (addr))
172 static inline void *kmemdup(const void *src, size_t len, gfp_t gfp)
176 p = kmalloc(len, gfp);
186 static inline void *kmem_cache_alloc(struct kmem_cache *c, gfp_t gfp)
188 return kmalloc(c->obj_size, gfp);
191 static inline void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t gfp)
193 return kzalloc(c->obj_size, gfp);
196 static inline void kmem_cache_free(struct kmem_cache *c, void *p)
201 static inline void kmem_cache_destroy(struct kmem_cache *p)
206 static inline struct kmem_cache *kmem_cache_create(size_t obj_size)
208 struct kmem_cache *p = kmalloc(sizeof(*p), GFP_KERNEL);
212 p->obj_size = obj_size;
216 #define KMEM_CACHE(_struct, _flags) kmem_cache_create(sizeof(struct _struct))
218 #define PAGE_KERNEL 0
219 #define PAGE_KERNEL_EXEC 1
221 #define vfree(p) free(p)
223 static inline void *__vmalloc_noprof(unsigned long size, gfp_t flags)
228 size = round_up(size, PAGE_SIZE);
230 for (i = 0; i < 10; i++) {
231 p = aligned_alloc(PAGE_SIZE, size);
234 if (flags & __GFP_ZERO)
239 run_shrinkers(flags, true);
244 #define __vmalloc __vmalloc_noprof
246 static inline void *vmalloc_exec(unsigned long size, gfp_t gfp_mask)
250 p = __vmalloc(size, gfp_mask);
254 if (mprotect(p, size, PROT_READ|PROT_WRITE|PROT_EXEC)) {
262 static inline void *vmalloc(unsigned long size)
264 return __vmalloc(size, GFP_KERNEL);
267 static inline void *vzalloc(unsigned long size)
269 return __vmalloc(size, GFP_KERNEL|__GFP_ZERO);
272 #endif /* __TOOLS_LINUX_SLAB_H */