From 206e42087a037fa3adca8908fd318a0cb64d4dee Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 12:13:07 +0100 Subject: [PATCH] x86-64, NUMA: Use common numa_nodes[] ACPI and amd are using separate nodes[] array. Add numa_nodes[] and use them in all NUMA init methods. cutoff_node() cleanup is moved from srat_64.c to numa_64.c and applied in initmem_init() regardless of init methods. Signed-off-by: Tejun Heo Cc: Yinghai Lu Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Shaohui Zheng Cc: David Rientjes Cc: Ingo Molnar Cc: H. Peter Anvin --- arch/x86/include/asm/numa_64.h | 1 + arch/x86/mm/amdtopology_64.c | 19 +++++++-------- arch/x86/mm/numa_64.c | 24 +++++++++++++++++++ arch/x86/mm/srat_64.c | 43 +++++++++------------------------- 4 files changed, 45 insertions(+), 42 deletions(-) diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index de459365d52b..d3a45147d09b 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h @@ -29,6 +29,7 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, extern nodemask_t cpu_nodes_parsed __initdata; extern nodemask_t mem_nodes_parsed __initdata; +extern struct bootnode numa_nodes[MAX_NUMNODES] __initdata; extern int __cpuinit numa_cpu_node(int cpu); diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index b6029a6b129f..f049fa67ed73 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c @@ -26,7 +26,6 @@ #include #include -static struct bootnode __initdata nodes[8]; static unsigned char __initdata nodeids[8]; static __init int find_northbridge(void) @@ -166,8 +165,8 @@ int __init amd_numa_init(void) pr_info("Node %d MemBase %016lx Limit %016lx\n", nodeid, base, limit); - nodes[nodeid].start = base; - nodes[nodeid].end = limit; + numa_nodes[nodeid].start = base; + numa_nodes[nodeid].end = limit; prevbase = base; @@ -210,8 +209,8 @@ void __init amd_get_nodes(struct bootnode *physnodes) int i; for_each_node_mask(i, mem_nodes_parsed) { - physnodes[i].start = nodes[i].start; - physnodes[i].end = nodes[i].end; + physnodes[i].start = numa_nodes[i].start; + physnodes[i].end = numa_nodes[i].end; } } @@ -221,7 +220,7 @@ static int __init find_node_by_addr(unsigned long addr) int i; for (i = 0; i < 8; i++) - if (addr >= nodes[i].start && addr < nodes[i].end) { + if (addr >= numa_nodes[i].start && addr < numa_nodes[i].end) { ret = i; break; } @@ -274,7 +273,7 @@ int __init amd_scan_nodes(void) { int i; - memnode_shift = compute_hash_shift(nodes, 8, NULL); + memnode_shift = compute_hash_shift(numa_nodes, 8, NULL); if (memnode_shift < 0) { pr_err("No NUMA node hash function found. Contact maintainer\n"); return -1; @@ -284,11 +283,11 @@ int __init amd_scan_nodes(void) /* use the coreid bits from early_identify_cpu */ for_each_node_mask(i, node_possible_map) memblock_x86_register_active_regions(i, - nodes[i].start >> PAGE_SHIFT, - nodes[i].end >> PAGE_SHIFT); + numa_nodes[i].start >> PAGE_SHIFT, + numa_nodes[i].end >> PAGE_SHIFT); init_memory_mapping_high(); for_each_node_mask(i, node_possible_map) - setup_node_bootmem(i, nodes[i].start, nodes[i].end); + setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end); numa_init_array(); return 0; diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 4404e1d649ac..a6b899f7ddd2 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -33,6 +33,8 @@ struct memnode memnode; static unsigned long __initdata nodemap_addr; static unsigned long __initdata nodemap_size; +struct bootnode numa_nodes[MAX_NUMNODES] __initdata; + /* * Given a shift value, try to populate memnodemap[] * Returns : @@ -182,6 +184,22 @@ static void * __init early_node_mem(int nodeid, unsigned long start, return NULL; } +static __init void cutoff_node(int i, unsigned long start, unsigned long end) +{ + struct bootnode *nd = &numa_nodes[i]; + + if (nd->start < start) { + nd->start = start; + if (nd->end < nd->start) + nd->start = nd->end; + } + if (nd->end > end) { + nd->end = end; + if (nd->start > nd->end) + nd->start = nd->end; + } +} + /* Initialize bootmem allocator for a node */ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) @@ -638,9 +656,15 @@ void __init initmem_init(void) nodes_clear(mem_nodes_parsed); nodes_clear(node_possible_map); nodes_clear(node_online_map); + memset(numa_nodes, 0, sizeof(numa_nodes)); if (numa_init[i]() < 0) continue; + + /* clean up the node list */ + for (j = 0; j < MAX_NUMNODES; j++) + cutoff_node(j, 0, max_pfn << PAGE_SHIFT); + #ifdef CONFIG_NUMA_EMU setup_physnodes(0, max_pfn << PAGE_SHIFT, i == 0, i == 1); if (cmdline && !numa_emulation(0, max_pfn, i == 0, i == 1)) diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 33e72ec4fa4c..bfa4a6af5cfe 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -28,7 +28,6 @@ int acpi_numa __initdata; static struct acpi_table_slit *acpi_slit; -static struct bootnode nodes[MAX_NUMNODES] __initdata; static struct bootnode nodes_add[MAX_NUMNODES]; static int num_node_memblks __initdata; @@ -55,29 +54,13 @@ static __init int conflicting_memblks(unsigned long start, unsigned long end) return -1; } -static __init void cutoff_node(int i, unsigned long start, unsigned long end) -{ - struct bootnode *nd = &nodes[i]; - - if (nd->start < start) { - nd->start = start; - if (nd->end < nd->start) - nd->start = nd->end; - } - if (nd->end > end) { - nd->end = end; - if (nd->start > nd->end) - nd->start = nd->end; - } -} - static __init void bad_srat(void) { int i; printk(KERN_ERR "SRAT: SRAT not used.\n"); acpi_numa = -1; for (i = 0; i < MAX_NUMNODES; i++) { - nodes[i].start = nodes[i].end = 0; + numa_nodes[i].start = numa_nodes[i].end = 0; nodes_add[i].start = nodes_add[i].end = 0; } remove_all_active_ranges(); @@ -276,12 +259,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) if (i == node) { printk(KERN_WARNING "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n", - pxm, start, end, nodes[i].start, nodes[i].end); + pxm, start, end, numa_nodes[i].start, numa_nodes[i].end); } else if (i >= 0) { printk(KERN_ERR "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n", pxm, start, end, node_to_pxm(i), - nodes[i].start, nodes[i].end); + numa_nodes[i].start, numa_nodes[i].end); bad_srat(); return; } @@ -290,7 +273,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) start, end); if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) { - nd = &nodes[node]; + nd = &numa_nodes[node]; if (!node_test_and_set(node, mem_nodes_parsed)) { nd->start = start; nd->end = end; @@ -347,9 +330,8 @@ void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start, int i; for_each_node_mask(i, mem_nodes_parsed) { - cutoff_node(i, start, end); - physnodes[i].start = nodes[i].start; - physnodes[i].end = nodes[i].end; + physnodes[i].start = numa_nodes[i].start; + physnodes[i].end = numa_nodes[i].end; } } #endif /* CONFIG_NUMA_EMU */ @@ -372,10 +354,6 @@ int __init acpi_scan_nodes(void) if (acpi_numa <= 0) return -1; - /* First clean up the node list */ - for (i = 0; i < MAX_NUMNODES; i++) - cutoff_node(i, 0, max_pfn << PAGE_SHIFT); - /* * Join together blocks on the same node, holes between * which don't overlap with memory on other nodes. @@ -440,7 +418,7 @@ int __init acpi_scan_nodes(void) /* for out of order entries in SRAT */ sort_node_map(); - if (!nodes_cover_memory(nodes)) { + if (!nodes_cover_memory(numa_nodes)) { bad_srat(); return -1; } @@ -449,12 +427,13 @@ int __init acpi_scan_nodes(void) /* Finally register nodes */ for_each_node_mask(i, node_possible_map) - setup_node_bootmem(i, nodes[i].start, nodes[i].end); + setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end); /* Try again in case setup_node_bootmem missed one due to missing bootmem */ for_each_node_mask(i, node_possible_map) if (!node_online(i)) - setup_node_bootmem(i, nodes[i].start, nodes[i].end); + setup_node_bootmem(i, numa_nodes[i].start, + numa_nodes[i].end); for (i = 0; i < nr_cpu_ids; i++) { int node = early_cpu_to_node(i); @@ -486,7 +465,7 @@ static int __init find_node_by_addr(unsigned long addr) * the sake of simplicity, we only use a real node's starting * address to determine which emulated node it appears on. */ - if (addr >= nodes[i].start && addr < nodes[i].end) { + if (addr >= numa_nodes[i].start && addr < numa_nodes[i].end) { ret = i; break; } -- 2.39.5