]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
powerpc/mm: Replace _PAGE_USER with _PAGE_PRIVILEGED
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Fri, 29 Apr 2016 13:25:34 +0000 (23:25 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 1 May 2016 08:32:26 +0000 (18:32 +1000)
_PAGE_PRIVILEGED means the page can be accessed only by the kernel. This
is done to keep pte bits similar to PowerISA 3.0 Radix PTE format. User
pages are now marked by clearing _PAGE_PRIVILEGED bit.

Previously we allowed the kernel to have a privileged page in the lower
address range (USER_REGION). With this patch such access is denied.

We also prevent a kernel access to a non-privileged page in higher
address range (ie, REGION_ID != 0).

Both the above access scenarios should never happen.

Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Jeremy Kerr <jk@ozlabs.org>
Cc: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
12 files changed:
arch/powerpc/include/asm/book3s/64/hash.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/mm/hash64_4k.c
arch/powerpc/mm/hash64_64k.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugepage-hash64.c
arch/powerpc/mm/hugetlbpage-hash64.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/pgtable.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/platforms/cell/spufs/fault.c
drivers/misc/cxl/fault.c

index fb76f9cf49c9b776a445889271315a37c2f3a6d3..5e7e74d30eb9dc17c0993f1684e47124036c3cec 100644 (file)
@@ -20,7 +20,7 @@
 #define _PAGE_READ             0x00004 /* read access allowed */
 #define _PAGE_RW               (_PAGE_READ | _PAGE_WRITE)
 #define _PAGE_RWX              (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
-#define _PAGE_USER             0x00008 /* page may be accessed by userspace */
+#define _PAGE_PRIVILEGED       0x00008 /* kernel access only */
 #define _PAGE_GUARDED          0x00010 /* G: guarded (side-effect) page */
 /* M (memory coherence) is always set in the HPTE, so we don't need it here */
 #define _PAGE_COHERENT         0x0
 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 #endif /* CONFIG_PPC_MM_SLICES */
 
-/* No separate kernel read-only */
-#define _PAGE_KERNEL_RW                (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
+/*
+ * No separate kernel read-only, user access blocked by key
+ */
+#define _PAGE_KERNEL_RW                (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
 #define _PAGE_KERNEL_RO                 _PAGE_KERNEL_RW
-#define _PAGE_KERNEL_RWX       (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
+#define _PAGE_KERNEL_RWX       (_PAGE_PRIVILEGED | _PAGE_DIRTY | \
+                                _PAGE_RW | _PAGE_EXEC)
 
 /* Strong Access Ordering */
 #define _PAGE_SAO              (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
  */
 #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
                         _PAGE_WRITETHRU | _PAGE_4K_PFN | \
-                        _PAGE_USER | _PAGE_ACCESSED |  _PAGE_READ |\
+                        _PAGE_PRIVILEGED | _PAGE_ACCESSED |  _PAGE_READ |\
                         _PAGE_WRITE |  _PAGE_DIRTY | _PAGE_EXEC | \
                         _PAGE_SOFT_DIRTY)
 /*
  *
  * Note due to the way vm flags are laid out, the bits are XWR
  */
-#define PAGE_NONE      __pgprot(_PAGE_BASE)
-#define PAGE_SHARED    __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X  __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
-                                _PAGE_EXEC)
-#define PAGE_COPY      __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ)
-#define PAGE_COPY_X    __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ| \
-                                _PAGE_EXEC)
-#define PAGE_READONLY  __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ)
-#define PAGE_READONLY_X        __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ| \
-                                _PAGE_EXEC)
+#define PAGE_NONE      __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
+#define PAGE_SHARED    __pgprot(_PAGE_BASE | _PAGE_RW)
+#define PAGE_SHARED_X  __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY      __pgprot(_PAGE_BASE | _PAGE_READ)
+#define PAGE_COPY_X    __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
+#define PAGE_READONLY  __pgprot(_PAGE_BASE | _PAGE_READ)
+#define PAGE_READONLY_X        __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
 
 #define __P000 PAGE_NONE
 #define __P001 PAGE_READONLY
@@ -419,8 +419,8 @@ static inline pte_t pte_clear_soft_dirty(pte_t pte)
  */
 static inline int pte_protnone(pte_t pte)
 {
-       return (pte_val(pte) &
-               (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
+       return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PRIVILEGED)) ==
+               (_PAGE_PRESENT | _PAGE_PRIVILEGED);
 }
 #endif /* CONFIG_NUMA_BALANCING */
 
index 60e84260a07d723605879c5024bd608187342f33..b609729e0d762868e99c875e6ec4c248cacfc980 100644 (file)
@@ -187,7 +187,7 @@ extern struct page *pgd_page(pgd_t pgd);
 
 static inline bool pte_user(pte_t pte)
 {
-       return !!(pte_val(pte) & _PAGE_USER);
+       return !(pte_val(pte) & _PAGE_PRIVILEGED);
 }
 
 #ifdef CONFIG_MEM_SOFT_DIRTY
@@ -211,6 +211,22 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
 }
 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
 
+static inline bool check_pte_access(unsigned long access, unsigned long ptev)
+{
+       /*
+        * This check for _PAGE_RWX and _PAGE_PRESENT bits
+        */
+       if (access & ~ptev)
+               return false;
+       /*
+        * This check for access to privilege space
+        */
+       if ((access & _PAGE_PRIVILEGED) != (ptev & _PAGE_PRIVILEGED))
+               return false;
+
+       return true;
+}
+
 void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
 void pgtable_cache_init(void);
 
index 491b7d137cd87de395add67305f23edf3589d71a..529e49204f6bd01cf5e1321c39bfbc9a9a80d18d 100644 (file)
@@ -37,7 +37,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
                if (unlikely(old_pte & _PAGE_BUSY))
                        return 0;
                /* If PTE permissions don't match, take page fault */
-               if (unlikely(access & ~old_pte))
+               if (unlikely(!check_pte_access(access, old_pte)))
                        return 1;
                /*
                 * Try to lock the PTE, add ACCESSED and DIRTY if it was
index 2d3472173d793c2b51fc888c2737f5f135668ade..e7782862362b05ccf44e450ea0f6c103dca18b27 100644 (file)
@@ -69,7 +69,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
                if (unlikely(old_pte & _PAGE_BUSY))
                        return 0;
                /* If PTE permissions don't match, take page fault */
-               if (unlikely(access & ~old_pte))
+               if (unlikely(!check_pte_access(access, old_pte)))
                        return 1;
                /*
                 * Try to lock the PTE, add ACCESSED and DIRTY if it was
@@ -237,7 +237,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
                if (unlikely(old_pte & _PAGE_BUSY))
                        return 0;
                /* If PTE permissions don't match, take page fault */
-               if (unlikely(access & ~old_pte))
+               if (unlikely(!check_pte_access(access, old_pte)))
                        return 1;
                /*
                 * Check if PTE has the cache-inhibit bit set
index 36e00371ba5aaf9a902877a7e361f1af495fae61..dc0f6a00ccbd2cf851086728640cfa3f3fd0486d 100644 (file)
@@ -174,7 +174,7 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
         * User area is mapped with PP=0x2 for read/write
         * or PP=0x3 for read-only (including writeable but clean pages).
         */
-       if (pteflags & _PAGE_USER) {
+       if (!(pteflags & _PAGE_PRIVILEGED)) {
                if (pteflags & _PAGE_RWX)
                        rflags |= 0x2;
                if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY)))
@@ -1090,7 +1090,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
        /* Pre-check access permissions (will be re-checked atomically
         * in __hash_page_XX but this pre-check is a fast path
         */
-       if (access & ~pte_val(*ptep)) {
+       if (!check_pte_access(access, pte_val(*ptep))) {
                DBG_LOW(" no access !\n");
                rc = 1;
                goto bail;
@@ -1228,12 +1228,16 @@ int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
        if (dsisr & DSISR_ISSTORE)
                access |= _PAGE_WRITE;
        /*
-        * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
-        * accessing a userspace segment (even from the kernel). We assume
-        * kernel addresses always have the high bit set.
+        * We set _PAGE_PRIVILEGED only when
+        * kernel mode access kernel space.
+        *
+        * _PAGE_PRIVILEGED is NOT set
+        * 1) when kernel mode access user space
+        * 2) user space access kernel space.
         */
+       access |= _PAGE_PRIVILEGED;
        if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID))
-               access |= _PAGE_USER;
+               access &= ~_PAGE_PRIVILEGED;
 
        if (trap == 0x400)
                access |= _PAGE_EXEC;
index b4b6668d1b24af9654d2fec0e5d01083e0fdcda7..6cb6bdd254bb0d7f662c1b2bf131359ec0da47c8 100644 (file)
@@ -40,7 +40,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
                if (unlikely(old_pmd & _PAGE_BUSY))
                        return 0;
                /* If PMD permissions don't match, take page fault */
-               if (unlikely(access & ~old_pmd))
+               if (unlikely(!check_pte_access(access, old_pmd)))
                        return 1;
                /*
                 * Try to lock the PTE, add ACCESSED and DIRTY if it was
index cdca743cdaf1365c7caac61eacb4ce54f0b77bf4..bf9078440256671872ae9c06998cee47ac169dd7 100644 (file)
@@ -50,8 +50,9 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
                if (unlikely(old_pte & _PAGE_BUSY))
                        return 0;
                /* If PTE permissions don't match, take page fault */
-               if (unlikely(access & ~old_pte))
+               if (unlikely(!check_pte_access(access, old_pte)))
                        return 1;
+
                /* Try to lock the PTE, add ACCESSED and DIRTY if it was
                 * a write access */
                new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
index 0bf269b00de98969ad01ec063775736615126702..6d910960217e573111d52dd5b3f883ffb00b7ceb 100644 (file)
@@ -1003,7 +1003,7 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
                end = pte_end;
 
        pte = READ_ONCE(*ptep);
-       mask = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
+       mask = _PAGE_PRESENT | _PAGE_READ;
        if (write)
                mask |= _PAGE_WRITE;
 
index ef7b922c655c15b2a104e38c225ee8caa65fa17b..125fb4b54445ed5b0355519853aeedb91aa3fb06 100644 (file)
@@ -43,9 +43,20 @@ static inline int is_exec_fault(void)
  */
 static inline int pte_looks_normal(pte_t pte)
 {
+
+#if defined(CONFIG_PPC_BOOK3S_64)
+       if ((pte_val(pte) &
+            (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE)) ==
+           _PAGE_PRESENT) {
+               if (pte_user(pte))
+                       return 1;
+       }
+       return 0;
+#else
        return (pte_val(pte) &
-           (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) ==
-           (_PAGE_PRESENT | _PAGE_USER);
+               (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) ==
+               (_PAGE_PRESENT | _PAGE_USER);
+#endif
 }
 
 static struct page *maybe_pte_to_page(pte_t pte)
index 16bc751f10dfcbc2b205499e0083b12b94db827c..603db71ff21d3b81cc2be90c26b8955b3735c7f0 100644 (file)
@@ -280,8 +280,17 @@ void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
        if (flags & _PAGE_WRITE)
                flags |= _PAGE_DIRTY;
 
-       /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
-       flags &= ~(_PAGE_USER | _PAGE_EXEC);
+       /* we don't want to let _PAGE_EXEC leak out */
+       flags &= ~_PAGE_EXEC;
+       /*
+        * Force kernel mapping.
+        */
+#if defined(CONFIG_PPC_BOOK3S_64)
+       flags |= _PAGE_PRIVILEGED;
+#else
+       flags &= ~_PAGE_USER;
+#endif
+
 
 #ifdef _PAGE_BAP_SR
        /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
@@ -664,7 +673,7 @@ void pmdp_huge_split_prepare(struct vm_area_struct *vma,
         * the translation is still valid, because we will withdraw
         * pgtable_t after this.
         */
-       pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_USER, 0);
+       pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED);
 }
 
 
index c3a3bf1745b78bd39cd5b3c3d3181a174814486b..e29e4d5afa2ddd165f31cef0e0a38ffa7babe007 100644 (file)
@@ -141,7 +141,7 @@ int spufs_handle_class1(struct spu_context *ctx)
        /* we must not hold the lock when entering copro_handle_mm_fault */
        spu_release(ctx);
 
-       access = (_PAGE_PRESENT | _PAGE_READ | _PAGE_USER);
+       access = (_PAGE_PRESENT | _PAGE_READ);
        access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_WRITE : 0UL;
        local_irq_save(flags);
        ret = hash_page(ea, access, 0x300, dsisr);
index 0feeacedcef1ea8dccd4e67fcda88d2433f93b53..377e650a2a1dc3464fa0eb3f6fdf99493a15f32c 100644 (file)
@@ -152,8 +152,10 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
        access = _PAGE_PRESENT | _PAGE_READ;
        if (dsisr & CXL_PSL_DSISR_An_S)
                access |= _PAGE_WRITE;
+
+       access |= _PAGE_PRIVILEGED;
        if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID))
-               access |= _PAGE_USER;
+               access &= ~_PAGE_PRIVILEGED;
 
        if (dsisr & DSISR_NOHPTE)
                inv_flags |= HPTE_NOHPTE_UPDATE;