From f290aff64f4c46e50e1c768fa94171be08c162df Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Fri, 13 Apr 2012 08:52:01 +1000 Subject: [PATCH] mm-add-extra-free-kbytes-tunable-update All the fixes suggested by Andrew Morton. Not much of a changelog since the patch should probably be folded into mm-add-extra-free-kbytes-tunable.patch Thank you for pointing these out, Andrew. Signed-off-by: Rik van Riel Signed-off-by: Andrew Morton --- include/linux/mmzone.h | 2 +- include/linux/swap.h | 2 ++ kernel/sysctl.c | 6 ++---- mm/page_alloc.c | 13 +++++++------ 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 41aa49b74821..0cfed36b587a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -796,7 +796,7 @@ static inline int is_dma(struct zone *zone) /* These two functions are used to setup the per zone pages min values */ struct ctl_table; -int min_free_kbytes_sysctl_handler(struct ctl_table *, int, +int free_kbytes_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, diff --git a/include/linux/swap.h b/include/linux/swap.h index a9330c732a82..d3d7b6abefd5 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -215,6 +215,8 @@ struct swap_list_t { extern unsigned long totalram_pages; extern unsigned long totalreserve_pages; extern unsigned long dirty_balance_reserve; +extern int min_free_kbytes; +extern int extra_free_kbytes; extern unsigned int nr_free_buffer_pages(void); extern unsigned int nr_free_pagecache_pages(void); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 38e0e70f008d..84f81c77373d 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -101,8 +101,6 @@ extern int suid_dumpable; extern char core_pattern[]; extern unsigned int core_pipe_limit; extern int pid_max; -extern int min_free_kbytes; -extern int extra_free_kbytes; extern int pid_max_min, pid_max_max; extern int sysctl_drop_caches; extern int percpu_pagelist_fraction; @@ -1196,7 +1194,7 @@ static struct ctl_table vm_table[] = { .data = &min_free_kbytes, .maxlen = sizeof(min_free_kbytes), .mode = 0644, - .proc_handler = min_free_kbytes_sysctl_handler, + .proc_handler = free_kbytes_sysctl_handler, .extra1 = &zero, }, { @@ -1204,7 +1202,7 @@ static struct ctl_table vm_table[] = { .data = &extra_free_kbytes, .maxlen = sizeof(extra_free_kbytes), .mode = 0644, - .proc_handler = min_free_kbytes_sysctl_handler, + .proc_handler = free_kbytes_sysctl_handler, .extra1 = &zero, }, { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index decfbf0f71c2..14fda9e57f1d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -199,11 +199,12 @@ static char * const zone_names[MAX_NR_ZONES] = { int min_free_kbytes = 1024; /* - * Extra memory for the system to try freeing. Used to temporarily - * free memory, to make space for new workloads. Anyone can allocate - * down to the min watermarks controlled by min_free_kbytes above. + * Extra memory for the system to try freeing between the min and + * low watermarks. Useful for workloads that require low latency + * memory allocations in bursts larger than the normal gap between + * low and min. */ -int extra_free_kbytes = 0; +int extra_free_kbytes; static unsigned long __meminitdata nr_kernel_pages; static unsigned long __meminitdata nr_all_pages; @@ -5143,11 +5144,11 @@ int __meminit init_per_zone_wmark_min(void) module_init(init_per_zone_wmark_min) /* - * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so + * free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so * that we can call two helper functions whenever min_free_kbytes * or extra_free_kbytes changes. */ -int min_free_kbytes_sysctl_handler(ctl_table *table, int write, +int free_kbytes_sysctl_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { proc_dointvec(table, write, buffer, length, ppos); -- 2.39.5