]> git.sesse.net Git - bcachefs-tools-debian/commitdiff
Shrinker improvements
authorKent Overstreet <kent.overstreet@gmail.com>
Tue, 22 Mar 2022 00:20:09 +0000 (20:20 -0400)
committerKent Overstreet <kent.overstreet@gmail.com>
Tue, 22 Mar 2022 00:20:09 +0000 (20:20 -0400)
After memory allocation failure, don't rely on /proc/meminfo to figure
out how much memory we should free - instead unconditionally free 1/8th
of each cache.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
include/linux/shrinker.h
include/linux/slab.h
include/linux/vmalloc.h
linux/shrinker.c

index 626b768cda969e248202965f8638b1a10ac363e8..eba6cfdd48d2fe69c3575acbfb4ce428b673676a 100644 (file)
@@ -25,6 +25,6 @@ struct shrinker {
 int register_shrinker(struct shrinker *);
 void unregister_shrinker(struct shrinker *);
 
-void run_shrinkers(void);
+void run_shrinkers(gfp_t gfp_mask, bool);
 
 #endif /* __TOOLS_LINUX_SHRINKER_H */
index bc99973fccd22059805f43d99fbda4f9e7daefaf..557c04113ae66fafdb11fca48bedf95c9f1b5530 100644 (file)
@@ -20,7 +20,7 @@ static inline void *kmalloc(size_t size, gfp_t flags)
        void *p;
 
        do {
-               run_shrinkers();
+               run_shrinkers(flags, i != 0);
 
                if (size) {
                        size_t alignment = min(rounddown_pow_of_two(size), (size_t)PAGE_SIZE);
@@ -83,7 +83,7 @@ static inline struct page *alloc_pages(gfp_t flags, unsigned int order)
        void *p;
 
        do {
-               run_shrinkers();
+               run_shrinkers(flags, i != 0);
 
                p = aligned_alloc(PAGE_SIZE, size);
                if (p && (flags & __GFP_ZERO))
index ccb319eb52a4a444db0f5981f23a7acfa92bc2f7..965e341d44d5406af20fe0cb8f796aaf6f453d46 100644 (file)
@@ -20,7 +20,7 @@ static inline void *__vmalloc(unsigned long size, gfp_t gfp_mask)
        size = round_up(size, PAGE_SIZE);
 
        do {
-               run_shrinkers();
+               run_shrinkers(gfp_mask, i != 0);
 
                p = aligned_alloc(PAGE_SIZE, size);
                if (p && gfp_mask & __GFP_ZERO)
index f6c979aa6ae1bf751139ad604c3e56ead7cc777f..876c1baeec662c630054945a1b3fb2abf1fd1419 100644 (file)
@@ -65,7 +65,24 @@ static struct meminfo read_meminfo(void)
        return ret;
 }
 
-void run_shrinkers(void)
+static void run_shrinkers_allocation_failed(gfp_t gfp_mask)
+{
+       struct shrinker *shrinker;
+
+       mutex_lock(&shrinker_lock);
+       list_for_each_entry(shrinker, &shrinker_list, list) {
+               struct shrink_control sc = { .gfp_mask  = gfp_mask, };
+
+               unsigned long have = shrinker->count_objects(shrinker, &sc);
+
+               sc.nr_to_scan = have / 8;
+
+               shrinker->scan_objects(shrinker, &sc);
+       }
+       mutex_unlock(&shrinker_lock);
+}
+
+void run_shrinkers(gfp_t gfp_mask, bool allocation_failed)
 {
        struct shrinker *shrinker;
        struct meminfo info;
@@ -75,6 +92,11 @@ void run_shrinkers(void)
        if (list_empty(&shrinker_list))
                return;
 
+       if (allocation_failed) {
+               run_shrinkers_allocation_failed(gfp_mask);
+               return;
+       }
+
        info = read_meminfo();
 
        if (info.total && info.available) {
@@ -92,7 +114,8 @@ void run_shrinkers(void)
        mutex_lock(&shrinker_lock);
        list_for_each_entry(shrinker, &shrinker_list, list) {
                struct shrink_control sc = {
-                       .nr_to_scan = want_shrink >> PAGE_SHIFT
+                       .gfp_mask       = gfp_mask,
+                       .nr_to_scan     = want_shrink >> PAGE_SHIFT
                };
 
                shrinker->scan_objects(shrinker, &sc);