]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/powerpc/mm/pgtable_64.c
powerpc/mm: Replace _PAGE_USER with _PAGE_PRIVILEGED
[karo-tx-linux.git] / arch / powerpc / mm / pgtable_64.c
index 347106080bb1e64b87d28477db24e47f3ab3ef77..603db71ff21d3b81cc2be90c26b8955b3735c7f0 100644 (file)
@@ -277,11 +277,20 @@ void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
        void *caller = __builtin_return_address(0);
 
        /* writeable implies dirty for kernel addresses */
-       if (flags & _PAGE_RW)
+       if (flags & _PAGE_WRITE)
                flags |= _PAGE_DIRTY;
 
-       /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
-       flags &= ~(_PAGE_USER | _PAGE_EXEC);
+       /* we don't want to let _PAGE_EXEC leak out */
+       flags &= ~_PAGE_EXEC;
+       /*
+        * Force kernel mapping.
+        */
+#if defined(CONFIG_PPC_BOOK3S_64)
+       flags |= _PAGE_PRIVILEGED;
+#else
+       flags &= ~_PAGE_USER;
+#endif
+
 
 #ifdef _PAGE_BAP_SR
        /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
@@ -515,29 +524,29 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
                                  unsigned long set)
 {
 
-       unsigned long old, tmp;
+       __be64 old_be, tmp;
+       unsigned long old;
 
 #ifdef CONFIG_DEBUG_VM
        WARN_ON(!pmd_trans_huge(*pmdp));
        assert_spin_locked(&mm->page_table_lock);
 #endif
 
-#ifdef PTE_ATOMIC_UPDATES
        __asm__ __volatile__(
        "1:     ldarx   %0,0,%3\n\
-               andi.   %1,%0,%6\n\
+               and   %1,%0,%6\n\
                bne-    1b \n\
                andc    %1,%0,%4 \n\
                or      %1,%1,%7\n\
                stdcx.  %1,0,%3 \n\
                bne-    1b"
-       : "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
-       : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set)
+       : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp)
+       : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp),
+         "r" (cpu_to_be64(_PAGE_BUSY)), "r" (cpu_to_be64(set))
        : "cc" );
-#else
-       old = pmd_val(*pmdp);
-       *pmdp = __pmd((old & ~clr) | set);
-#endif
+
+       old = be64_to_cpu(old_be);
+
        trace_hugepage_update(addr, old, clr, set);
        if (old & _PAGE_HASHPTE)
                hpte_do_hugepage_flush(mm, addr, pmdp, old);
@@ -664,7 +673,7 @@ void pmdp_huge_split_prepare(struct vm_area_struct *vma,
         * the translation is still valid, because we will withdraw
         * pgtable_t after this.
         */
-       pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_USER, 0);
+       pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED);
 }
 
 
@@ -676,8 +685,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                pmd_t *pmdp, pmd_t pmd)
 {
 #ifdef CONFIG_DEBUG_VM
-       WARN_ON((pmd_val(*pmdp) & (_PAGE_PRESENT | _PAGE_USER)) ==
-               (_PAGE_PRESENT | _PAGE_USER));
+       WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
        assert_spin_locked(&mm->page_table_lock);
        WARN_ON(!pmd_trans_huge(pmd));
 #endif