#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/* Movable memory ranges, will also be used by memblock subsystem. */
-struct movablecore_map movablecore_map;
+struct movablemem_map movablemem_map;
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
early_param("movablecore", cmdline_parse_movablecore);
/**
- * insert_movablecore_map - Insert a memory range in to movablecore_map.map.
+ * insert_movablemem_map - Insert a memory range in to movablemem_map.map.
* @start_pfn: start pfn of the range
* @end_pfn: end pfn of the range
*
* This function will also merge the overlapped ranges, and sort the array
* by start_pfn in monotonic increasing order.
*/
-static void __init insert_movablecore_map(unsigned long start_pfn,
+static void __init insert_movablemem_map(unsigned long start_pfn,
unsigned long end_pfn)
{
int pos, overlap;
* pos will be at the 1st overlapped range, or the position
* where the element should be inserted.
*/
- for (pos = 0; pos < movablecore_map.nr_map; pos++)
- if (start_pfn <= movablecore_map.map[pos].end_pfn)
+ for (pos = 0; pos < movablemem_map.nr_map; pos++)
+ if (start_pfn <= movablemem_map.map[pos].end_pfn)
break;
/* If there is no overlapped range, just insert the element. */
- if (pos == movablecore_map.nr_map ||
- end_pfn < movablecore_map.map[pos].start_pfn) {
+ if (pos == movablemem_map.nr_map ||
+ end_pfn < movablemem_map.map[pos].start_pfn) {
/*
* If pos is not the end of array, we need to move all
* the rest elements backward.
*/
- if (pos < movablecore_map.nr_map)
- memmove(&movablecore_map.map[pos+1],
- &movablecore_map.map[pos],
- sizeof(struct movablecore_entry) *
- (movablecore_map.nr_map - pos));
- movablecore_map.map[pos].start_pfn = start_pfn;
- movablecore_map.map[pos].end_pfn = end_pfn;
- movablecore_map.nr_map++;
+ if (pos < movablemem_map.nr_map)
+ memmove(&movablemem_map.map[pos+1],
+ &movablemem_map.map[pos],
+ sizeof(struct movablemem_entry) *
+ (movablemem_map.nr_map - pos));
+ movablemem_map.map[pos].start_pfn = start_pfn;
+ movablemem_map.map[pos].end_pfn = end_pfn;
+ movablemem_map.nr_map++;
return;
}
/* overlap will be at the last overlapped range */
- for (overlap = pos + 1; overlap < movablecore_map.nr_map; overlap++)
- if (end_pfn < movablecore_map.map[overlap].start_pfn)
+ for (overlap = pos + 1; overlap < movablemem_map.nr_map; overlap++)
+ if (end_pfn < movablemem_map.map[overlap].start_pfn)
break;
/*
* and move the rest elements forward.
*/
overlap--;
- movablecore_map.map[pos].start_pfn = min(start_pfn,
- movablecore_map.map[pos].start_pfn);
- movablecore_map.map[pos].end_pfn = max(end_pfn,
- movablecore_map.map[overlap].end_pfn);
+ movablemem_map.map[pos].start_pfn = min(start_pfn,
+ movablemem_map.map[pos].start_pfn);
+ movablemem_map.map[pos].end_pfn = max(end_pfn,
+ movablemem_map.map[overlap].end_pfn);
- if (pos != overlap && overlap + 1 != movablecore_map.nr_map)
- memmove(&movablecore_map.map[pos+1],
- &movablecore_map.map[overlap+1],
- sizeof(struct movablecore_entry) *
- (movablecore_map.nr_map - overlap - 1));
+ if (pos != overlap && overlap + 1 != movablemem_map.nr_map)
+ memmove(&movablemem_map.map[pos+1],
+ &movablemem_map.map[overlap+1],
+ sizeof(struct movablemem_entry) *
+ (movablemem_map.nr_map - overlap - 1));
- movablecore_map.nr_map -= overlap - pos;
+ movablemem_map.nr_map -= overlap - pos;
}
/**
- * movablecore_map_add_region - Add a memory range into movablecore_map.
+ * movablemem_map_add_region - Add a memory range into movablemem_map.
* @start: physical start address of range
* @end: physical end address of range
*
* This function transform the physical address into pfn, and then add the
- * range into movablecore_map by calling insert_movablecore_map().
+ * range into movablemem_map by calling insert_movablemem_map().
*/
-static void __init movablecore_map_add_region(u64 start, u64 size)
+static void __init movablemem_map_add_region(u64 start, u64 size)
{
unsigned long start_pfn, end_pfn;
if (start + size <= start)
return;
- if (movablecore_map.nr_map >= ARRAY_SIZE(movablecore_map.map)) {
- pr_err("movable_memory_map: too many entries;"
+ if (movablemem_map.nr_map >= ARRAY_SIZE(movablemem_map.map)) {
+ pr_err("movablemem_map: too many entries;"
" ignoring [mem %#010llx-%#010llx]\n",
(unsigned long long) start,
(unsigned long long) (start + size - 1));
start_pfn = PFN_DOWN(start);
end_pfn = PFN_UP(start + size);
- insert_movablecore_map(start_pfn, end_pfn);
+ insert_movablemem_map(start_pfn, end_pfn);
}
/*
- * cmdline_parse_movablecore_map - Parse boot option movablecore_map.
+ * cmdline_parse_movablemem_map - Parse boot option movablemem_map.
* @p: The boot option of the following format:
- * movablecore_map=nn[KMG]@ss[KMG]
+ * movablemem_map=nn[KMG]@ss[KMG]
*
* This option sets the memory range [ss, ss+nn) to be used as movable memory.
*
* Return: 0 on success or -EINVAL on failure.
*/
-static int __init cmdline_parse_movablecore_map(char *p)
+static int __init cmdline_parse_movablemem_map(char *p)
{
char *oldp;
u64 start_at, mem_size;
if (p == oldp || *p != '\0')
goto err;
- movablecore_map_add_region(start_at, mem_size);
+ movablemem_map_add_region(start_at, mem_size);
return 0;
}
err:
return -EINVAL;
}
-early_param("movablecore_map", cmdline_parse_movablecore_map);
+early_param("movablemem_map", cmdline_parse_movablemem_map);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */