]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/x86/mm/numa_64.c
x86, memblock: Replace e820_/_early string with memblock_
[mv-sheeva.git] / arch / x86 / mm / numa_64.c
index 3d54f9f95d468f914e4c475e583be22f119fbd2a..984b1ff7db44721de2bfea103d09db7c0cd2c6c5 100644 (file)
@@ -87,16 +87,16 @@ static int __init allocate_cachealigned_memnodemap(void)
 
        addr = 0x8000;
        nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
-       nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT,
+       nodemap_addr = memblock_find_in_range(addr, max_pfn<<PAGE_SHIFT,
                                      nodemap_size, L1_CACHE_BYTES);
-       if (nodemap_addr == -1UL) {
+       if (nodemap_addr == MEMBLOCK_ERROR) {
                printk(KERN_ERR
                       "NUMA: Unable to allocate Memory to Node hash map\n");
                nodemap_addr = nodemap_size = 0;
                return -1;
        }
        memnodemap = phys_to_virt(nodemap_addr);
-       reserve_early(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
+       memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
 
        printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
               nodemap_addr, nodemap_addr + nodemap_size);
@@ -227,7 +227,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
        if (node_data[nodeid] == NULL)
                return;
        nodedata_phys = __pa(node_data[nodeid]);
-       reserve_early(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
+       memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
        printk(KERN_INFO "  NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
                nodedata_phys + pgdat_size - 1);
        nid = phys_to_nid(nodedata_phys);
@@ -246,7 +246,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
         * Find a place for the bootmem map
         * nodedata_phys could be on other nodes by alloc_bootmem,
         * so need to sure bootmap_start not to be small, otherwise
-        * early_node_mem will get that with find_e820_area instead
+        * early_node_mem will get that with memblock_find_in_range instead
         * of alloc_bootmem, that could clash with reserved range
         */
        bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
@@ -258,12 +258,12 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
        bootmap = early_node_mem(nodeid, bootmap_start, end,
                                 bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
        if (bootmap == NULL)  {
-               free_early(nodedata_phys, nodedata_phys + pgdat_size);
+               memblock_x86_free_range(nodedata_phys, nodedata_phys + pgdat_size);
                node_data[nodeid] = NULL;
                return;
        }
        bootmap_start = __pa(bootmap);
-       reserve_early(bootmap_start, bootmap_start+(bootmap_pages<<PAGE_SHIFT),
+       memblock_x86_reserve_range(bootmap_start, bootmap_start+(bootmap_pages<<PAGE_SHIFT),
                        "BOOTMAP");
 
        bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
@@ -417,7 +417,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
                nr_nodes = MAX_NUMNODES;
        }
 
-       size = (max_addr - addr - e820_hole_size(addr, max_addr)) / nr_nodes;
+       size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes;
        /*
         * Calculate the number of big nodes that can be allocated as a result
         * of consolidating the remainder.
@@ -453,7 +453,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
                         * non-reserved memory is less than the per-node size.
                         */
                        while (end - physnodes[i].start -
-                               e820_hole_size(physnodes[i].start, end) < size) {
+                               memblock_x86_hole_size(physnodes[i].start, end) < size) {
                                end += FAKE_NODE_MIN_SIZE;
                                if (end > physnodes[i].end) {
                                        end = physnodes[i].end;
@@ -467,7 +467,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
                         * this one must extend to the boundary.
                         */
                        if (end < dma32_end && dma32_end - end -
-                           e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
+                           memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
                                end = dma32_end;
 
                        /*
@@ -476,7 +476,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
                         * physical node.
                         */
                        if (physnodes[i].end - end -
-                           e820_hole_size(end, physnodes[i].end) < size)
+                           memblock_x86_hole_size(end, physnodes[i].end) < size)
                                end = physnodes[i].end;
 
                        /*
@@ -504,7 +504,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
 {
        u64 end = start + size;
 
-       while (end - start - e820_hole_size(start, end) < size) {
+       while (end - start - memblock_x86_hole_size(start, end) < size) {
                end += FAKE_NODE_MIN_SIZE;
                if (end > max_addr) {
                        end = max_addr;
@@ -533,7 +533,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
         * creates a uniform distribution of node sizes across the entire
         * machine (but not necessarily over physical nodes).
         */
-       min_size = (max_addr - addr - e820_hole_size(addr, max_addr)) /
+       min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
                                                MAX_NUMNODES;
        min_size = max(min_size, FAKE_NODE_MIN_SIZE);
        if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
@@ -566,7 +566,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
                         * this one must extend to the boundary.
                         */
                        if (end < dma32_end && dma32_end - end -
-                           e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
+                           memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
                                end = dma32_end;
 
                        /*
@@ -575,7 +575,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
                         * physical node.
                         */
                        if (physnodes[i].end - end -
-                           e820_hole_size(end, physnodes[i].end) < size)
+                           memblock_x86_hole_size(end, physnodes[i].end) < size)
                                end = physnodes[i].end;
 
                        /*
@@ -639,7 +639,7 @@ static int __init numa_emulation(unsigned long start_pfn,
         */
        remove_all_active_ranges();
        for_each_node_mask(i, node_possible_map) {
-               e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
+               memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
                                                nodes[i].end >> PAGE_SHIFT);
                setup_node_bootmem(i, nodes[i].start, nodes[i].end);
        }
@@ -692,7 +692,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
        node_set(0, node_possible_map);
        for (i = 0; i < nr_cpu_ids; i++)
                numa_set_node(i, 0);
-       e820_register_active_regions(0, start_pfn, last_pfn);
+       memblock_x86_register_active_regions(0, start_pfn, last_pfn);
        setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
 }