]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
[PATCH] Invert sense of SLB class bit
authorDavid Gibson <david@gibson.dropbear.id.au>
Tue, 6 Sep 2005 04:59:47 +0000 (14:59 +1000)
committerPaul Mackerras <paulus@samba.org>
Tue, 6 Sep 2005 06:57:46 +0000 (16:57 +1000)
Currently, we set the class bit in kernel SLB entries, and clear it on
user SLB entries.  On POWER5, ERAT entries created in real mode have
the class bit clear.  So to avoid flushing kernel ERAT entries on each
context switch, this patch inverts our usage of the class bit, setting
it on user SLB entries and clearing it on kernel SLB entries.

Booted on POWER5 and G5.

Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
arch/ppc64/kernel/entry.S
arch/ppc64/mm/hugetlbpage.c
arch/ppc64/mm/slb.c
include/asm-ppc64/mmu.h

index b61572eb2a7130d7a46924f8412024b9879286e7..bf99b4a92f20864313c7d79211a5d5a6db68a7ce 100644 (file)
@@ -400,15 +400,14 @@ BEGIN_FTR_SECTION
        cmpd    cr1,r6,r9       /* or is new ESID the same as current ESID? */
        cror    eq,4*cr1+eq,eq
        beq     2f              /* if yes, don't slbie it */
-       oris    r0,r6,0x0800    /* set C (class) bit */
 
        /* Bolt in the new stack SLB entry */
        ld      r7,KSP_VSID(r4) /* Get new stack's VSID */
-       oris    r6,r6,(SLB_ESID_V)@h
-       ori     r6,r6,(SLB_NUM_BOLTED-1)@l
-       slbie   r0
-       slbie   r0              /* Workaround POWER5 < DD2.1 issue */
-       slbmte  r7,r6
+       oris    r0,r6,(SLB_ESID_V)@h
+       ori     r0,r0,(SLB_NUM_BOLTED-1)@l
+       slbie   r6
+       slbie   r6              /* Workaround POWER5 < DD2.1 issue */
+       slbmte  r7,r0
        isync
 
 2:
index e7833c80eb6824dc93abcfb3ff73dba56d53ac9d..338771ec70d7622775190417f69085496ac2e209 100644 (file)
@@ -144,7 +144,8 @@ static void flush_low_segments(void *parm)
        for (i = 0; i < NUM_LOW_AREAS; i++) {
                if (! (areas & (1U << i)))
                        continue;
-               asm volatile("slbie %0" : : "r" (i << SID_SHIFT));
+               asm volatile("slbie %0"
+                            : : "r" ((i << SID_SHIFT) | SLBIE_C));
        }
 
        asm volatile("isync" : : : "memory");
@@ -164,7 +165,8 @@ static void flush_high_segments(void *parm)
                        continue;
                for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
                        asm volatile("slbie %0"
-                                    :: "r" ((i << HTLB_AREA_SHIFT) + (j << SID_SHIFT)));
+                                    :: "r" (((i << HTLB_AREA_SHIFT)
+                                            + (j << SID_SHIFT)) | SLBIE_C));
        }
 
        asm volatile("isync" : : : "memory");
index 244150a0bc18713ee49267b4f79521d2197722fd..0473953f6a37713cdf1cc8d18597511a299c102e 100644 (file)
@@ -87,8 +87,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
                int i;
                asm volatile("isync" : : : "memory");
                for (i = 0; i < offset; i++) {
-                       esid_data = (unsigned long)get_paca()->slb_cache[i]
-                               << SID_SHIFT;
+                       esid_data = ((unsigned long)get_paca()->slb_cache[i]
+                               << SID_SHIFT) | SLBIE_C;
                        asm volatile("slbie %0" : : "r" (esid_data));
                }
                asm volatile("isync" : : : "memory");
index ad36bb28de2983c92750a8072195b34cc5db4c5f..7bc42eb087adb777df01b69350ada386055c97a7 100644 (file)
@@ -54,8 +54,10 @@ extern char initial_stab[];
 #define SLB_VSID_C             ASM_CONST(0x0000000000000080) /* class */
 #define SLB_VSID_LS            ASM_CONST(0x0000000000000070) /* size of largepage */
  
-#define SLB_VSID_KERNEL                (SLB_VSID_KP|SLB_VSID_C)
-#define SLB_VSID_USER          (SLB_VSID_KP|SLB_VSID_KS)
+#define SLB_VSID_KERNEL                (SLB_VSID_KP)
+#define SLB_VSID_USER          (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
+
+#define SLBIE_C                        (0x08000000)
 
 /*
  * Hash table