X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=include%2Flinux%2Fslab.h;h=cf48570c1580e0ea092a4380bbc2a650829b496c;hb=e2670a38d1ad6038d64687cb1d585349508e06d7;hp=c19f190b1fb0d95cb73fee10ad3697419939df21;hpb=17e2f2775be6e10b966cd958bc0461aab662571a;p=bcachefs-tools-debian diff --git a/include/linux/slab.h b/include/linux/slab.h index c19f190..cf48570 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -6,22 +6,41 @@ #include #include +#include +#include #include #include #include +#include +#include + #define ARCH_KMALLOC_MINALIGN 16 #define KMALLOC_MAX_SIZE SIZE_MAX static inline void *kmalloc(size_t size, gfp_t flags) { + unsigned i; void *p; - run_shrinkers(); - - p = malloc(size); - if (p && (flags & __GFP_ZERO)) - memset(p, 0, size); + for (i = 0; i < 10; i++) { + if (size) { + size_t alignment = min(rounddown_pow_of_two(size), (size_t)PAGE_SIZE); + alignment = max(sizeof(void *), alignment); + if (posix_memalign(&p, alignment, size)) + p = NULL; + } else { + p = malloc(0); + } + + if (p) { + if (flags & __GFP_ZERO) + memset(p, 0, size); + break; + } + + run_shrinkers(flags, true); + } return p; } @@ -30,44 +49,68 @@ static inline void *krealloc(void *old, size_t size, gfp_t flags) { void *new; - run_shrinkers(); - - new = malloc(size); + new = kmalloc(size, flags); if (!new) return NULL; if (flags & __GFP_ZERO) memset(new, 0, size); - memcpy(new, old, - min(malloc_usable_size(old), - malloc_usable_size(new))); - free(old); + if (old) { + memcpy(new, old, + min(malloc_usable_size(old), + malloc_usable_size(new))); + free(old); + } return new; } +static inline void *krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) + return NULL; + + return krealloc(p, bytes, flags); +} + #define kzalloc(size, flags) kmalloc(size, flags|__GFP_ZERO) #define kmalloc_array(n, size, flags) \ ((size) != 0 && (n) > SIZE_MAX / (size) \ - ? NULL : kmalloc(n * size, flags)) + ? NULL : kmalloc((n) * (size), flags)) + +#define kvmalloc_array(n, size, flags) \ + ((size) != 0 && (n) > SIZE_MAX / (size) \ + ? NULL : kmalloc((n) * (size), flags)) #define kcalloc(n, size, flags) kmalloc_array(n, size, flags|__GFP_ZERO) #define kfree(p) free(p) -#define kvfree(p) free(p) #define kzfree(p) free(p) +#define kvmalloc(size, flags) kmalloc(size, flags) +#define kvzalloc(size, flags) kzalloc(size, flags) +#define kvfree(p) kfree(p) + static inline struct page *alloc_pages(gfp_t flags, unsigned int order) { size_t size = PAGE_SIZE << order; + unsigned i; void *p; - run_shrinkers(); + for (i = 0; i < 10; i++) { + p = aligned_alloc(PAGE_SIZE, size); + + if (p) { + if (flags & __GFP_ZERO) + memset(p, 0, size); + break; + } - p = aligned_alloc(PAGE_SIZE, size); - if (p && (flags & __GFP_ZERO)) - memset(p, 0, size); + run_shrinkers(flags, true); + } return p; } @@ -122,4 +165,88 @@ static inline void *kmemdup(const void *src, size_t len, gfp_t gfp) return p; } +struct kmem_cache { + size_t obj_size; +}; + +static inline void *kmem_cache_alloc(struct kmem_cache *c, gfp_t gfp) +{ + return kmalloc(c->obj_size, gfp); +} + +static inline void kmem_cache_free(struct kmem_cache *c, void *p) +{ + kfree(p); +} + +static inline void kmem_cache_destroy(struct kmem_cache *p) +{ + kfree(p); +} + +static inline struct kmem_cache *kmem_cache_create(size_t obj_size) +{ + struct kmem_cache *p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return NULL; + + p->obj_size = obj_size; + return p; +} + +#define KMEM_CACHE(_struct, _flags) kmem_cache_create(sizeof(struct _struct)) + +#define PAGE_KERNEL 0 +#define PAGE_KERNEL_EXEC 1 + +#define vfree(p) free(p) + +static inline void *__vmalloc(unsigned long size, gfp_t flags) +{ + unsigned i; + void *p; + + size = round_up(size, PAGE_SIZE); + + for (i = 0; i < 10; i++) { + p = aligned_alloc(PAGE_SIZE, size); + + if (p) { + if (flags & __GFP_ZERO) + memset(p, 0, size); + break; + } + + run_shrinkers(flags, true); + } + + return p; +} + +static inline void *vmalloc_exec(unsigned long size, gfp_t gfp_mask) +{ + void *p; + + p = __vmalloc(size, gfp_mask); + if (!p) + return NULL; + + if (mprotect(p, size, PROT_READ|PROT_WRITE|PROT_EXEC)) { + vfree(p); + return NULL; + } + + return p; +} + +static inline void *vmalloc(unsigned long size) +{ + return __vmalloc(size, GFP_KERNEL); +} + +static inline void *vzalloc(unsigned long size) +{ + return __vmalloc(size, GFP_KERNEL|__GFP_ZERO); +} + #endif /* __TOOLS_LINUX_SLAB_H */