From e2670a38d1ad6038d64687cb1d585349508e06d7 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 19 Dec 2022 14:47:42 -0500 Subject: [PATCH] Change memory reclaim - Spin up a background thread to call the shrinkers every 1 second - Memory allocations will only call reclaim after a failed allocation, not every single time This will be a major performance boost on allocation intensive workloads. Signed-off-by: Kent Overstreet --- include/linux/slab.h | 56 +++++++++++++++++++++++++++----------------- linux/shrinker.c | 29 +++++++++++++++++++++++ 2 files changed, 63 insertions(+), 22 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index 17fe235..cf48570 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -20,12 +20,10 @@ static inline void *kmalloc(size_t size, gfp_t flags) { - unsigned i = 0; + unsigned i; void *p; - do { - run_shrinkers(flags, i != 0); - + for (i = 0; i < 10; i++) { if (size) { size_t alignment = min(rounddown_pow_of_two(size), (size_t)PAGE_SIZE); alignment = max(sizeof(void *), alignment); @@ -34,9 +32,15 @@ static inline void *kmalloc(size_t size, gfp_t flags) } else { p = malloc(0); } - if (p && (flags & __GFP_ZERO)) - memset(p, 0, size); - } while (!p && i++ < 10); + + if (p) { + if (flags & __GFP_ZERO) + memset(p, 0, size); + break; + } + + run_shrinkers(flags, true); + } return p; } @@ -93,16 +97,20 @@ static inline void *krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t static inline struct page *alloc_pages(gfp_t flags, unsigned int order) { size_t size = PAGE_SIZE << order; - unsigned i = 0; + unsigned i; void *p; - do { - run_shrinkers(flags, i != 0); - + for (i = 0; i < 10; i++) { p = aligned_alloc(PAGE_SIZE, size); - if (p && (flags & __GFP_ZERO)) - memset(p, 0, size); - } while (!p && i++ < 10); + + if (p) { + if (flags & __GFP_ZERO) + memset(p, 0, size); + break; + } + + run_shrinkers(flags, true); + } return p; } @@ -193,20 +201,24 @@ static inline struct kmem_cache *kmem_cache_create(size_t obj_size) #define vfree(p) free(p) -static inline void *__vmalloc(unsigned long size, gfp_t gfp_mask) +static inline void *__vmalloc(unsigned long size, gfp_t flags) { - unsigned i = 0; + unsigned i; void *p; size = round_up(size, PAGE_SIZE); - do { - run_shrinkers(gfp_mask, i != 0); - + for (i = 0; i < 10; i++) { p = aligned_alloc(PAGE_SIZE, size); - if (p && gfp_mask & __GFP_ZERO) - memset(p, 0, size); - } while (!p && i++ < 10); + + if (p) { + if (flags & __GFP_ZERO) + memset(p, 0, size); + break; + } + + run_shrinkers(flags, true); + } return p; } diff --git a/linux/shrinker.c b/linux/shrinker.c index 23e288d..0b5715b 100644 --- a/linux/shrinker.c +++ b/linux/shrinker.c @@ -1,6 +1,7 @@ #include +#include #include #include #include @@ -126,3 +127,31 @@ void run_shrinkers(gfp_t gfp_mask, bool allocation_failed) } mutex_unlock(&shrinker_lock); } + +static int shrinker_thread(void *arg) +{ + while (!kthread_should_stop()) { + sleep(1); + run_shrinkers(GFP_KERNEL, false); + } + + return 0; +} + +struct task_struct *shrinker_task; + +__attribute__((constructor(103))) +static void shrinker_thread_init(void) +{ + shrinker_task = kthread_run(shrinker_thread, NULL, "shrinkers"); + BUG_ON(IS_ERR(shrinker_task)); +} + +__attribute__((destructor(103))) +static void shrinker_thread_exit(void) +{ + int ret = kthread_stop(shrinker_task); + BUG_ON(ret); + + shrinker_task = NULL; +} -- 2.39.5