From b19d6fd204bf5fe87a9b43d243a8becc376444db Mon Sep 17 00:00:00 2001 From: HATAYAMA Daisuke Date: Wed, 20 Mar 2013 15:08:34 +1100 Subject: [PATCH] vmcore: allocate buffer for ELF headers on page-size alignment Allocate buffer for ELF headers on page-size aligned boudary to satisfy mmap() requirement. For this, __get_free_pages() is used instead of kmalloc(). Also, later patch will decrease actually used buffer size for ELF headers, so it's necessary to keep original buffer size and actually used buffer size separately. elfcorebuf_sz_orig keeps the original one and elfcorebuf_sz the actually used one. Signed-off-by: HATAYAMA Daisuke Reviewed-by: Zhang Yanfei Cc: Vivek Goyal Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton --- fs/proc/vmcore.c | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 7d2dc4cd95e2..17e2501151a2 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -31,6 +31,7 @@ static LIST_HEAD(vmcore_list); /* Stores the pointer to the buffer containing kernel elf core headers. */ static char *elfcorebuf; static size_t elfcorebuf_sz; +static size_t elfcorebuf_sz_orig; /* Total size of vmcore file. */ static u64 vmcore_size; @@ -608,13 +609,16 @@ static int __init parse_crash_elf64_headers(void) /* Read in all elf headers. */ elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr); - elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); + elfcorebuf_sz_orig = elfcorebuf_sz; + elfcorebuf = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(elfcorebuf_sz_orig)); if (!elfcorebuf) return -ENOMEM; addr = elfcorehdr_addr; rc = read_from_oldmem(elfcorebuf, sizeof(Elf64_Ehdr), &addr, 0); if (rc < 0) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } addr = elfcorehdr_addr + ehdr.e_phoff; @@ -629,13 +633,15 @@ static int __init parse_crash_elf64_headers(void) /* Merge all PT_NOTE headers into one. */ rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list); if (rc) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, &vmcore_list); if (rc) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list); @@ -671,7 +677,9 @@ static int __init parse_crash_elf32_headers(void) /* Read in all elf headers. */ elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); - elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); + elfcorebuf_sz_orig = elfcorebuf_sz; + elfcorebuf = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(elfcorebuf_sz_orig)); if (!elfcorebuf) return -ENOMEM; addr = elfcorehdr_addr; @@ -684,7 +692,8 @@ static int __init parse_crash_elf32_headers(void) rc = read_from_oldmem(elfcorebuf + sizeof(Elf32_Ehdr), ehdr.e_phnum * sizeof(Elf32_Phdr), &addr, 0); if (rc < 0) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } ((Elf32_Ehdr *)elfcorebuf)->e_phoff = sizeof(Elf32_Ehdr); @@ -692,13 +701,15 @@ static int __init parse_crash_elf32_headers(void) /* Merge all PT_NOTE headers into one. */ rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list); if (rc) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, &vmcore_list); if (rc) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list); @@ -780,7 +791,8 @@ void vmcore_cleanup(void) list_del(&m->list); kfree(m); } - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); elfcorebuf = NULL; } EXPORT_SYMBOL_GPL(vmcore_cleanup); -- 2.39.5