]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
powerpc/e6500: Make TLB lock recursive
authorScott Wood <scottwood@freescale.com>
Fri, 7 Mar 2014 20:48:35 +0000 (14:48 -0600)
committerScott Wood <scottwood@freescale.com>
Thu, 20 Mar 2014 00:57:13 +0000 (19:57 -0500)
Once special level interrupts are supported, we may take nested TLB
misses -- so allow the same thread to acquire the lock recursively.

The lock will not be effective against the nested TLB miss handler
trying to write the same entry as the interrupted TLB miss handler, but
that's also a problem on non-threaded CPUs that lack TLB write
conditional.  This will be addressed in the patch that enables crit/mc
support by invalidating the TLB on return from level exceptions.

Signed-off-by: Scott Wood <scottwood@freescale.com>
arch/powerpc/include/asm/mmu-book3e.h
arch/powerpc/kernel/setup_64.c
arch/powerpc/mm/tlb_low_64e.S

index 89b785d1684673d0e00b7a75c132a345547f2e33..901dac6b6cb7f6bb6299c14b82158d5fc5cbcc39 100644 (file)
@@ -287,11 +287,14 @@ extern int mmu_linear_psize;
 extern int mmu_vmemmap_psize;
 
 struct tlb_core_data {
+       /*
+        * Per-core spinlock for e6500 TLB handlers (no tlbsrx.)
+        * Must be the first struct element.
+        */
+       u8 lock;
+
        /* For software way selection, as on Freescale TLB1 */
        u8 esel_next, esel_max, esel_first;
-
-       /* Per-core spinlock for e6500 TLB handlers (no tlbsrx.) */
-       u8 lock;
 };
 
 #ifdef CONFIG_PPC64
index da9c42f53bb137107bcaa243e01a183e30d0642a..4933909cc5c001045b57f8fffef44c2a42478985 100644 (file)
@@ -102,6 +102,8 @@ static void setup_tlb_core_data(void)
 {
        int cpu;
 
+       BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
+
        for_each_possible_cpu(cpu) {
                int first = cpu_first_thread_sibling(cpu);
 
index 6bf50507a4b5a40c42e5a2373f6e8d7a684f3fc0..1e50249e900b238c3f959877f1b176aeeea6292e 100644 (file)
@@ -284,7 +284,7 @@ itlb_miss_fault_bolted:
  * r14 = page table base
  * r13 = PACA
  * r11 = tlb_per_core ptr
- * r10 = crap (free to use)
+ * r10 = cpu number
  */
 tlb_miss_common_e6500:
        /*
@@ -293,15 +293,18 @@ tlb_miss_common_e6500:
         *
         * MAS6:IND should be already set based on MAS4
         */
-       addi    r10,r11,TCD_LOCK
-1:     lbarx   r15,0,r10
+1:     lbarx   r15,0,r11
+       lhz     r10,PACAPACAINDEX(r13)
        cmpdi   r15,0
+       cmpdi   cr1,r15,1       /* set cr1.eq = 0 for non-recursive */
        bne     2f
-       li      r15,1
-       stbcx.  r15,0,r10
+       stbcx.  r10,0,r11
        bne     1b
+3:
        .subsection 1
-2:     lbz     r15,0(r10)
+2:     cmpd    cr1,r15,r10     /* recursive lock due to mcheck/crit/etc? */
+       beq     cr1,3b          /* unlock will happen if cr1.eq = 0 */
+       lbz     r15,0(r11)
        cmpdi   r15,0
        bne     2b
        b       1b
@@ -379,9 +382,11 @@ tlb_miss_common_e6500:
 
 tlb_miss_done_e6500:
        .macro  tlb_unlock_e6500
+       beq     cr1,1f          /* no unlock if lock was recursively grabbed */
        li      r15,0
        isync
-       stb     r15,TCD_LOCK(r11)
+       stb     r15,0(r11)
+1:
        .endm
 
        tlb_unlock_e6500