From: Tang Chen Date: Wed, 20 Feb 2013 02:14:26 +0000 (+1100) Subject: Rename movablecore_map to movablemem_map. X-Git-Tag: next-20130220~1^2~507 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=8785679bc9c0f6864a7d3f6d06db493ef142c804;p=karo-tx-linux.git Rename movablecore_map to movablemem_map. Since "core" could be confused with cpu cores, but here it is memory, so rename the boot option movablecore_map to movablemem_map. Signed-off-by: Tang Chen Cc: Wen Congyang Cc: Lai Jiangshan Cc: Lin Feng Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton --- diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 71c3daa4a42c..2c13750031cf 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1644,7 +1644,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. that the amount of memory usable for all allocations is not too small. - movablecore_map=nn[KMG]@ss[KMG] + movablemem_map=nn[KMG]@ss[KMG] [KNL,X86,IA-64,PPC] This parameter is similar to memmap except it specifies the memory map of ZONE_MOVABLE. @@ -1654,11 +1654,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ss to the end of the 1st node will be ZONE_MOVABLE, and all the rest nodes will only have ZONE_MOVABLE. If memmap is specified at the same time, the - movablecore_map will be limited within the memmap + movablemem_map will be limited within the memmap areas. If kernelcore or movablecore is also specified, - movablecore_map will have higher priority to be + movablemem_map will have higher priority to be satisfied. So the administrator should be careful that - the amount of movablecore_map areas are not too large. + the amount of movablemem_map areas are not too large. Otherwise kernel won't have enough memory to start. MTD_Partition= [MTD] diff --git a/include/linux/mm.h b/include/linux/mm.h index d0483011b3a3..662384c1c5a1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1361,15 +1361,15 @@ extern void free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn); extern void sparse_memory_present_with_active_regions(int nid); -#define MOVABLECORE_MAP_MAX MAX_NUMNODES -struct movablecore_entry { +#define MOVABLEMEM_MAP_MAX MAX_NUMNODES +struct movablemem_entry { unsigned long start_pfn; /* start pfn of memory segment */ unsigned long end_pfn; /* end pfn of memory segment (exclusive) */ }; -struct movablecore_map { +struct movablemem_map { int nr_map; - struct movablecore_entry map[MOVABLECORE_MAP_MAX]; + struct movablemem_entry map[MOVABLEMEM_MAP_MAX]; }; #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 70e5e6280388..01991c40daf5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -203,7 +203,7 @@ static unsigned long __meminitdata dma_reserve; #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP /* Movable memory ranges, will also be used by memblock subsystem. */ -struct movablecore_map movablecore_map; +struct movablemem_map movablemem_map; static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; @@ -5082,14 +5082,14 @@ early_param("kernelcore", cmdline_parse_kernelcore); early_param("movablecore", cmdline_parse_movablecore); /** - * insert_movablecore_map - Insert a memory range in to movablecore_map.map. + * insert_movablemem_map - Insert a memory range in to movablemem_map.map. * @start_pfn: start pfn of the range * @end_pfn: end pfn of the range * * This function will also merge the overlapped ranges, and sort the array * by start_pfn in monotonic increasing order. */ -static void __init insert_movablecore_map(unsigned long start_pfn, +static void __init insert_movablemem_map(unsigned long start_pfn, unsigned long end_pfn) { int pos, overlap; @@ -5098,31 +5098,31 @@ static void __init insert_movablecore_map(unsigned long start_pfn, * pos will be at the 1st overlapped range, or the position * where the element should be inserted. */ - for (pos = 0; pos < movablecore_map.nr_map; pos++) - if (start_pfn <= movablecore_map.map[pos].end_pfn) + for (pos = 0; pos < movablemem_map.nr_map; pos++) + if (start_pfn <= movablemem_map.map[pos].end_pfn) break; /* If there is no overlapped range, just insert the element. */ - if (pos == movablecore_map.nr_map || - end_pfn < movablecore_map.map[pos].start_pfn) { + if (pos == movablemem_map.nr_map || + end_pfn < movablemem_map.map[pos].start_pfn) { /* * If pos is not the end of array, we need to move all * the rest elements backward. */ - if (pos < movablecore_map.nr_map) - memmove(&movablecore_map.map[pos+1], - &movablecore_map.map[pos], - sizeof(struct movablecore_entry) * - (movablecore_map.nr_map - pos)); - movablecore_map.map[pos].start_pfn = start_pfn; - movablecore_map.map[pos].end_pfn = end_pfn; - movablecore_map.nr_map++; + if (pos < movablemem_map.nr_map) + memmove(&movablemem_map.map[pos+1], + &movablemem_map.map[pos], + sizeof(struct movablemem_entry) * + (movablemem_map.nr_map - pos)); + movablemem_map.map[pos].start_pfn = start_pfn; + movablemem_map.map[pos].end_pfn = end_pfn; + movablemem_map.nr_map++; return; } /* overlap will be at the last overlapped range */ - for (overlap = pos + 1; overlap < movablecore_map.nr_map; overlap++) - if (end_pfn < movablecore_map.map[overlap].start_pfn) + for (overlap = pos + 1; overlap < movablemem_map.nr_map; overlap++) + if (end_pfn < movablemem_map.map[overlap].start_pfn) break; /* @@ -5130,29 +5130,29 @@ static void __init insert_movablecore_map(unsigned long start_pfn, * and move the rest elements forward. */ overlap--; - movablecore_map.map[pos].start_pfn = min(start_pfn, - movablecore_map.map[pos].start_pfn); - movablecore_map.map[pos].end_pfn = max(end_pfn, - movablecore_map.map[overlap].end_pfn); + movablemem_map.map[pos].start_pfn = min(start_pfn, + movablemem_map.map[pos].start_pfn); + movablemem_map.map[pos].end_pfn = max(end_pfn, + movablemem_map.map[overlap].end_pfn); - if (pos != overlap && overlap + 1 != movablecore_map.nr_map) - memmove(&movablecore_map.map[pos+1], - &movablecore_map.map[overlap+1], - sizeof(struct movablecore_entry) * - (movablecore_map.nr_map - overlap - 1)); + if (pos != overlap && overlap + 1 != movablemem_map.nr_map) + memmove(&movablemem_map.map[pos+1], + &movablemem_map.map[overlap+1], + sizeof(struct movablemem_entry) * + (movablemem_map.nr_map - overlap - 1)); - movablecore_map.nr_map -= overlap - pos; + movablemem_map.nr_map -= overlap - pos; } /** - * movablecore_map_add_region - Add a memory range into movablecore_map. + * movablemem_map_add_region - Add a memory range into movablemem_map. * @start: physical start address of range * @end: physical end address of range * * This function transform the physical address into pfn, and then add the - * range into movablecore_map by calling insert_movablecore_map(). + * range into movablemem_map by calling insert_movablemem_map(). */ -static void __init movablecore_map_add_region(u64 start, u64 size) +static void __init movablemem_map_add_region(u64 start, u64 size) { unsigned long start_pfn, end_pfn; @@ -5160,8 +5160,8 @@ static void __init movablecore_map_add_region(u64 start, u64 size) if (start + size <= start) return; - if (movablecore_map.nr_map >= ARRAY_SIZE(movablecore_map.map)) { - pr_err("movable_memory_map: too many entries;" + if (movablemem_map.nr_map >= ARRAY_SIZE(movablemem_map.map)) { + pr_err("movablemem_map: too many entries;" " ignoring [mem %#010llx-%#010llx]\n", (unsigned long long) start, (unsigned long long) (start + size - 1)); @@ -5170,19 +5170,19 @@ static void __init movablecore_map_add_region(u64 start, u64 size) start_pfn = PFN_DOWN(start); end_pfn = PFN_UP(start + size); - insert_movablecore_map(start_pfn, end_pfn); + insert_movablemem_map(start_pfn, end_pfn); } /* - * cmdline_parse_movablecore_map - Parse boot option movablecore_map. + * cmdline_parse_movablemem_map - Parse boot option movablemem_map. * @p: The boot option of the following format: - * movablecore_map=nn[KMG]@ss[KMG] + * movablemem_map=nn[KMG]@ss[KMG] * * This option sets the memory range [ss, ss+nn) to be used as movable memory. * * Return: 0 on success or -EINVAL on failure. */ -static int __init cmdline_parse_movablecore_map(char *p) +static int __init cmdline_parse_movablemem_map(char *p) { char *oldp; u64 start_at, mem_size; @@ -5201,13 +5201,13 @@ static int __init cmdline_parse_movablecore_map(char *p) if (p == oldp || *p != '\0') goto err; - movablecore_map_add_region(start_at, mem_size); + movablemem_map_add_region(start_at, mem_size); return 0; } err: return -EINVAL; } -early_param("movablecore_map", cmdline_parse_movablecore_map); +early_param("movablemem_map", cmdline_parse_movablemem_map); #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */