]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
MIPS: Mask out limit field when calculating wired entry count
authorPaul Burton <paul.burton@imgtec.com>
Sat, 12 Nov 2016 01:26:07 +0000 (01:26 +0000)
committerRalf Baechle <ralf@linux-mips.org>
Thu, 24 Nov 2016 15:44:16 +0000 (16:44 +0100)
Since MIPSr6 the Wired register is split into 2 fields, with the upper
16 bits of the register indicating a limit on the value that the wired
entry count in the bottom 16 bits of the register can take. This means
that simply reading the wired register doesn't get us a valid TLB entry
index any longer, and we instead need to retrieve only the lower 16 bits
of the register. Introduce a new num_wired_entries() function which does
this on MIPSr6 or higher and simply returns the value of the wired
register on older architecture revisions, and make use of it when
reading the number of wired entries.

Since commit e710d6668309 ("MIPS: tlb-r4k: If there are wired entries,
don't use TLBINVF") we have been using a non-zero number of wired
entries to determine whether we should avoid use of the tlbinvf
instruction (which would invalidate wired entries) and instead loop over
TLB entries in local_flush_tlb_all(). This loop begins with the number
of wired entries, or before this patch some large bogus TLB index on
MIPSr6 systems. Thus since the aforementioned commit some MIPSr6 systems
with FTLBs have been prone to leaving stale address translations in the
FTLB & crashing in various weird & wonderful ways when we later observe
the wrong memory.

Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: Matt Redfearn <matt.redfearn@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/14557/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/tlb.h
arch/mips/mm/init.c
arch/mips/mm/tlb-r4k.c

index 7dd2dd47909a3295b08ae5de56de7390c19ff6f1..df78b2ca70ebd8c6b57348f9e62f304394f4f3d0 100644 (file)
 #error Bad page size configuration for hugetlbfs!
 #endif
 
+/*
+ * Wired register bits
+ */
+#define MIPSR6_WIRED_LIMIT     (_ULCAST_(0xffff) << 16)
+#define MIPSR6_WIRED_WIRED     (_ULCAST_(0xffff) << 0)
+
 /*
  * Values used for computation of new tlb entries
  */
index 4a2349302b552c5a7c4772c235553b4bdbef4695..dd179fd8acdac4a6e9f47257f79263f4a6432c46 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef __ASM_TLB_H
 #define __ASM_TLB_H
 
+#include <asm/cpu-features.h>
+#include <asm/mipsregs.h>
+
 /*
  * MIPS doesn't need any special per-pte or per-vma handling, except
  * we need to flush cache for area to be unmapped.
                ((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) |               \
                 (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
 
+static inline unsigned int num_wired_entries(void)
+{
+       unsigned int wired = read_c0_wired();
+
+       if (cpu_has_mips_r6)
+               wired &= MIPSR6_WIRED_WIRED;
+
+       return wired;
+}
+
 #include <asm-generic/tlb.h>
 
 #endif /* __ASM_TLB_H */
index 3a6edecc3f385e4bd897750aa66488623f60411e..e86ebcf5c071f8c9d9737b9d1d1e21cd95204212 100644 (file)
@@ -118,7 +118,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
                writex_c0_entrylo1(entrylo);
        }
 #endif
-       tlbidx = read_c0_wired();
+       tlbidx = num_wired_entries();
        write_c0_wired(tlbidx + 1);
        write_c0_index(tlbidx);
        mtc0_tlbw_hazard();
@@ -147,7 +147,7 @@ void kunmap_coherent(void)
 
        local_irq_save(flags);
        old_ctx = read_c0_entryhi();
-       wired = read_c0_wired() - 1;
+       wired = num_wired_entries() - 1;
        write_c0_wired(wired);
        write_c0_index(wired);
        write_c0_entryhi(UNIQUE_ENTRYHI(wired));
index bba9c1484b41e1bc8c124b3a2c50e53eb27e032c..0596505770dba382d4821df12f67cd431480873a 100644 (file)
@@ -65,7 +65,7 @@ void local_flush_tlb_all(void)
        write_c0_entrylo0(0);
        write_c0_entrylo1(0);
 
-       entry = read_c0_wired();
+       entry = num_wired_entries();
 
        /*
         * Blast 'em all away.
@@ -385,7 +385,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
        old_ctx = read_c0_entryhi();
        htw_stop();
        old_pagemask = read_c0_pagemask();
-       wired = read_c0_wired();
+       wired = num_wired_entries();
        write_c0_wired(wired + 1);
        write_c0_index(wired);
        tlbw_use_hazard();      /* What is the hazard here? */
@@ -449,7 +449,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
        htw_stop();
        old_ctx = read_c0_entryhi();
        old_pagemask = read_c0_pagemask();
-       wired = read_c0_wired();
+       wired = num_wired_entries();
        if (--temp_tlb_entry < wired) {
                printk(KERN_WARNING
                       "No TLB space left for add_temporary_entry\n");