]> git.karo-electronics.de Git - linux-beck.git/commitdiff
sh64: Wire up the shared __flush_xxx_region() flushers.
authorPaul Mundt <lethal@linux-sh.org>
Fri, 14 Aug 2009 17:00:54 +0000 (02:00 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Fri, 14 Aug 2009 17:00:54 +0000 (02:00 +0900)
Now with all of the prep work out of the way, kill off the SH-5 variants
and use the SH-4 version directly. This also takes advantage of the
unrolling that was previously done for the new version.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/mm/Makefile_64
arch/sh/mm/cache-sh5.c

index 2863ffb7006d12408e5e070621eb8f7b3af0ec4f..66c39106d0a88c198bbe9c6b4822c0eb86605a9e 100644 (file)
@@ -9,7 +9,7 @@ mmu-$(CONFIG_MMU)       := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \
                           extable_64.o
 
 ifndef CONFIG_CACHE_OFF
-obj-y                  += cache-sh5.o
+obj-y                  += cache-sh5.o flush-sh4.o
 endif
 
 obj-y                  += $(mmu-y)
index 3e2d7321b636aaa74dad3624efaa27a475a79cc2..698113fce8147dc9d8f3df5d9f18edf30ceadb00 100644 (file)
@@ -539,54 +539,6 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
                sh64_dcache_purge_user_pages(mm, start, end);
        }
 }
-
-/*
- * Purge the range of addresses from the D-cache.
- *
- * The addresses lie in the superpage mapping. There's no harm if we
- * overpurge at either end - just a small performance loss.
- */
-void __flush_purge_region(void *start, int size)
-{
-       unsigned long long ullend, addr, aligned_start;
-
-       aligned_start = (unsigned long long)(signed long long)(signed long) start;
-       addr = L1_CACHE_ALIGN(aligned_start);
-       ullend = (unsigned long long) (signed long long) (signed long) start + size;
-
-       while (addr <= ullend) {
-               __asm__ __volatile__ ("ocbp %0, 0" : : "r" (addr));
-               addr += L1_CACHE_BYTES;
-       }
-}
-
-void __flush_wback_region(void *start, int size)
-{
-       unsigned long long ullend, addr, aligned_start;
-
-       aligned_start = (unsigned long long)(signed long long)(signed long) start;
-       addr = L1_CACHE_ALIGN(aligned_start);
-       ullend = (unsigned long long) (signed long long) (signed long) start + size;
-
-       while (addr < ullend) {
-               __asm__ __volatile__ ("ocbwb %0, 0" : : "r" (addr));
-               addr += L1_CACHE_BYTES;
-       }
-}
-
-void __flush_invalidate_region(void *start, int size)
-{
-       unsigned long long ullend, addr, aligned_start;
-
-       aligned_start = (unsigned long long)(signed long long)(signed long) start;
-       addr = L1_CACHE_ALIGN(aligned_start);
-       ullend = (unsigned long long) (signed long long) (signed long) start + size;
-
-       while (addr < ullend) {
-               __asm__ __volatile__ ("ocbi %0, 0" : : "r" (addr));
-               addr += L1_CACHE_BYTES;
-       }
-}
 #endif /* !CONFIG_DCACHE_DISABLED */
 
 /*