}
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
-/*
- * Basic iterator support. Return the first range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns first region regardless of node
- */
-static int __meminit first_active_region_index_in_nid(int nid)
-{
- int i;
-
- for (i = 0; i < nr_nodemap_entries; i++)
- if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
- return i;
-
- return -1;
-}
-
-/*
- * Basic iterator support. Return the next active range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns next region regardless of node
- */
-static int __meminit next_active_region_index_in_nid(int index, int nid)
-{
- for (index = index + 1; index < nr_nodemap_entries; index++)
- if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
- return index;
-
- return -1;
-}
-
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
/*
* Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
*/
int __meminit __early_pfn_to_nid(unsigned long pfn)
{
- int i;
-
- for (i = 0; i < nr_nodemap_entries; i++) {
- unsigned long start_pfn = early_node_map[i].start_pfn;
- unsigned long end_pfn = early_node_map[i].end_pfn;
+ unsigned long start_pfn, end_pfn;
+ int i, nid;
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
if (start_pfn <= pfn && pfn < end_pfn)
- return early_node_map[i].nid;
- }
+ return nid;
/* This is a memory hole */
return -1;
}
}
#endif
-/* Basic iterator support to walk early_node_map[] */
-#define for_each_active_range_index_in_nid(i, nid) \
- for (i = first_active_region_index_in_nid(nid); i != -1; \
- i = next_active_region_index_in_nid(i, nid))
-
/**
* free_bootmem_with_active_regions - Call free_bootmem_node for each active range
* @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
* add_active_ranges() contain no holes and may be freed, this
* this function may be used instead of calling free_bootmem() manually.
*/
-void __init free_bootmem_with_active_regions(int nid,
- unsigned long max_low_pfn)
+void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
{
- int i;
-
- for_each_active_range_index_in_nid(i, nid) {
- unsigned long size_pages = 0;
- unsigned long end_pfn = early_node_map[i].end_pfn;
-
- if (early_node_map[i].start_pfn >= max_low_pfn)
- continue;
+ unsigned long start_pfn, end_pfn;
+ int i, this_nid;
- if (end_pfn > max_low_pfn)
- end_pfn = max_low_pfn;
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
+ start_pfn = min(start_pfn, max_low_pfn);
+ end_pfn = min(end_pfn, max_low_pfn);
- size_pages = end_pfn - early_node_map[i].start_pfn;
- free_bootmem_node(NODE_DATA(early_node_map[i].nid),
- PFN_PHYS(early_node_map[i].start_pfn),
- size_pages << PAGE_SHIFT);
+ if (start_pfn < end_pfn)
+ free_bootmem_node(NODE_DATA(this_nid),
+ PFN_PHYS(start_pfn),
+ (end_pfn - start_pfn) << PAGE_SHIFT);
}
}
int __init add_from_early_node_map(struct range *range, int az,
int nr_range, int nid)
{
+ unsigned long start_pfn, end_pfn;
int i;
- u64 start, end;
/* need to go over early_node_map to find out good range for node */
- for_each_active_range_index_in_nid(i, nid) {
- start = early_node_map[i].start_pfn;
- end = early_node_map[i].end_pfn;
- nr_range = add_range(range, az, nr_range, start, end);
- }
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL)
+ nr_range = add_range(range, az, nr_range, start_pfn, end_pfn);
return nr_range;
}
*/
void __init sparse_memory_present_with_active_regions(int nid)
{
- int i;
+ unsigned long start_pfn, end_pfn;
+ int i, this_nid;
- for_each_active_range_index_in_nid(i, nid)
- memory_present(early_node_map[i].nid,
- early_node_map[i].start_pfn,
- early_node_map[i].end_pfn);
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
+ memory_present(this_nid, start_pfn, end_pfn);
}
/**
void __meminit get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn)
{
+ unsigned long this_start_pfn, this_end_pfn;
int i;
+
*start_pfn = -1UL;
*end_pfn = 0;
- for_each_active_range_index_in_nid(i, nid) {
- *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
- *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
+ for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
+ *start_pfn = min(*start_pfn, this_start_pfn);
+ *end_pfn = max(*end_pfn, this_end_pfn);
}
if (*start_pfn == -1UL)
void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn)
{
+ unsigned long this_start_pfn, this_end_pfn;
int i, j;
int removed = 0;
nid, start_pfn, end_pfn);
/* Find the old active region end and shrink */
- for_each_active_range_index_in_nid(i, nid) {
- if (early_node_map[i].start_pfn >= start_pfn &&
- early_node_map[i].end_pfn <= end_pfn) {
+ for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
+ if (this_start_pfn >= start_pfn && this_end_pfn <= end_pfn) {
/* clear it */
early_node_map[i].start_pfn = 0;
early_node_map[i].end_pfn = 0;
removed = 1;
continue;
}
- if (early_node_map[i].start_pfn < start_pfn &&
- early_node_map[i].end_pfn > start_pfn) {
- unsigned long temp_end_pfn = early_node_map[i].end_pfn;
+ if (this_start_pfn < start_pfn && this_end_pfn > start_pfn) {
early_node_map[i].end_pfn = start_pfn;
- if (temp_end_pfn > end_pfn)
- add_active_range(nid, end_pfn, temp_end_pfn);
+ if (this_end_pfn > end_pfn)
+ add_active_range(nid, end_pfn, this_end_pfn);
continue;
}
- if (early_node_map[i].start_pfn >= start_pfn &&
- early_node_map[i].end_pfn > end_pfn &&
- early_node_map[i].start_pfn < end_pfn) {
+ if (this_start_pfn >= start_pfn && this_end_pfn > end_pfn &&
+ this_start_pfn < end_pfn) {
early_node_map[i].start_pfn = end_pfn;
continue;
}
unsigned long __init node_map_pfn_alignment(void)
{
unsigned long accl_mask = 0, last_end = 0;
+ unsigned long start, end, mask;
int last_nid = -1;
- int i;
-
- for_each_active_range_index_in_nid(i, MAX_NUMNODES) {
- int nid = early_node_map[i].nid;
- unsigned long start = early_node_map[i].start_pfn;
- unsigned long end = early_node_map[i].end_pfn;
- unsigned long mask;
+ int i, nid;
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
if (!start || last_nid < 0 || last_nid == nid) {
last_nid = nid;
last_end = end;
/* Find the lowest pfn for a node */
static unsigned long __init find_min_pfn_for_node(int nid)
{
- int i;
unsigned long min_pfn = ULONG_MAX;
+ unsigned long start_pfn;
+ int i;
- /* Assuming a sorted map, the first range found has the starting pfn */
- for_each_active_range_index_in_nid(i, nid)
- min_pfn = min(min_pfn, early_node_map[i].start_pfn);
+ for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
+ min_pfn = min(min_pfn, start_pfn);
if (min_pfn == ULONG_MAX) {
printk(KERN_WARNING
*/
static unsigned long __init early_calculate_totalpages(void)
{
- int i;
unsigned long totalpages = 0;
+ unsigned long start_pfn, end_pfn;
+ int i, nid;
+
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
+ unsigned long pages = end_pfn - start_pfn;
- for (i = 0; i < nr_nodemap_entries; i++) {
- unsigned long pages = early_node_map[i].end_pfn -
- early_node_map[i].start_pfn;
totalpages += pages;
if (pages)
- node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
+ node_set_state(nid, N_HIGH_MEMORY);
}
return totalpages;
}
/* Spread kernelcore memory as evenly as possible throughout nodes */
kernelcore_node = required_kernelcore / usable_nodes;
for_each_node_state(nid, N_HIGH_MEMORY) {
+ unsigned long start_pfn, end_pfn;
+
/*
* Recalculate kernelcore_node if the division per node
* now exceeds what is necessary to satisfy the requested
kernelcore_remaining = kernelcore_node;
/* Go through each range of PFNs within this node */
- for_each_active_range_index_in_nid(i, nid) {
- unsigned long start_pfn, end_pfn;
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
unsigned long size_pages;
- start_pfn = max(early_node_map[i].start_pfn,
- zone_movable_pfn[nid]);
- end_pfn = early_node_map[i].end_pfn;
+ start_pfn = max(start_pfn, zone_movable_pfn[nid]);
if (start_pfn >= end_pfn)
continue;
*/
void __init free_area_init_nodes(unsigned long *max_zone_pfn)
{
- unsigned long nid;
- int i;
+ unsigned long start_pfn, end_pfn;
+ int i, nid;
/* Sort early_node_map as initialisation assumes it is sorted */
sort_node_map();
}
/* Print out the early_node_map[] */
- printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
- for (i = 0; i < nr_nodemap_entries; i++)
- printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
- early_node_map[i].start_pfn,
- early_node_map[i].end_pfn);
+ printk("Early memory PFN ranges\n");
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
+ printk(" %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn);
/* Initialise every node */
mminit_verify_pageflags_layout();