]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 28 Oct 2008 16:45:31 +0000 (09:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 28 Oct 2008 16:45:31 +0000 (09:45 -0700)
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, memory hotplug: remove wrong -1 in calling init_memory_mapping()
  x86: keep the /proc/meminfo page count correct
  x86/uv: memory allocation at initialization
  xen: fix Xen domU boot with batched mprotect

arch/x86/kernel/genx2apic_uv_x.c
arch/x86/mm/init_64.c
arch/x86/xen/mmu.c

index 680a06557c5e6c255fa9ea2e5c4decc155d68a73..2c7dbdb98278316a6fd7228e9a7b2eaf76f58562 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/ctype.h>
 #include <linux/init.h>
 #include <linux/sched.h>
-#include <linux/bootmem.h>
 #include <linux/module.h>
 #include <linux/hardirq.h>
 #include <asm/smp.h>
@@ -398,16 +397,16 @@ void __init uv_system_init(void)
        printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
 
        bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
-       uv_blade_info = alloc_bootmem_pages(bytes);
+       uv_blade_info = kmalloc(bytes, GFP_KERNEL);
 
        get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
 
        bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
-       uv_node_to_blade = alloc_bootmem_pages(bytes);
+       uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
        memset(uv_node_to_blade, 255, bytes);
 
        bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
-       uv_cpu_to_blade = alloc_bootmem_pages(bytes);
+       uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
        memset(uv_cpu_to_blade, 255, bytes);
 
        blade = 0;
index b8e461d4941208fab60f977471dbe088ce48d23e..f79a02f64d108dde4de4a35055240f64fae2371b 100644 (file)
@@ -350,8 +350,10 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
                 * pagetable pages as RO. So assume someone who pre-setup
                 * these mappings are more intelligent.
                 */
-               if (pte_val(*pte))
+               if (pte_val(*pte)) {
+                       pages++;
                        continue;
+               }
 
                if (0)
                        printk("   pte=%p addr=%lx pte=%016lx\n",
@@ -418,8 +420,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
                         * not differ with respect to page frame and
                         * attributes.
                         */
-                       if (page_size_mask & (1 << PG_LEVEL_2M))
+                       if (page_size_mask & (1 << PG_LEVEL_2M)) {
+                               pages++;
                                continue;
+                       }
                        new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
                }
 
@@ -499,8 +503,10 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
                         * not differ with respect to page frame and
                         * attributes.
                         */
-                       if (page_size_mask & (1 << PG_LEVEL_1G))
+                       if (page_size_mask & (1 << PG_LEVEL_1G)) {
+                               pages++;
                                continue;
+                       }
                        prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
                }
 
@@ -831,7 +837,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
        unsigned long nr_pages = size >> PAGE_SHIFT;
        int ret;
 
-       last_mapped_pfn = init_memory_mapping(start, start + size-1);
+       last_mapped_pfn = init_memory_mapping(start, start + size);
        if (last_mapped_pfn > max_pfn_mapped)
                max_pfn_mapped = last_mapped_pfn;
 
index d4d52f5a1cf7bae392801e19367030ebe58ccf79..aba77b2b7d1853a188f42696e92255091de041b2 100644 (file)
@@ -246,11 +246,21 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
 {
        unsigned long address = (unsigned long)vaddr;
        unsigned int level;
-       pte_t *pte = lookup_address(address, &level);
-       unsigned offset = address & ~PAGE_MASK;
+       pte_t *pte;
+       unsigned offset;
 
-       BUG_ON(pte == NULL);
+       /*
+        * if the PFN is in the linear mapped vaddr range, we can just use
+        * the (quick) virt_to_machine() p2m lookup
+        */
+       if (virt_addr_valid(vaddr))
+               return virt_to_machine(vaddr);
 
+       /* otherwise we have to do a (slower) full page-table walk */
+
+       pte = lookup_address(address, &level);
+       BUG_ON(pte == NULL);
+       offset = address & ~PAGE_MASK;
        return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
 }
 
@@ -410,7 +420,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
 
        xen_mc_batch();
 
-       u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
+       u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
        u.val = pte_val_ma(pte);
        xen_extend_mmu_update(&u);