2 * arch/sh/mm/cache-sh4.c
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001 - 2009 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow
7 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
16 #include <linux/mutex.h>
18 #include <linux/highmem.h>
19 #include <asm/pgtable.h>
20 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
24 * The maximum number of pages we support up to when doing ranged dcache
25 * flushing. Anything exceeding this will simply flush the dcache in its
28 #define MAX_ICACHE_PAGES 32
30 static void __flush_cache_one(unsigned long addr, unsigned long phys,
31 unsigned long exec_offset);
34 * Write back the range of D-cache, and purge the I-cache.
36 * Called from kernel/module.c:sys_init_module and routine for a.out format,
37 * signal handler code and kprobes code
39 static void __uses_jump_to_uncached sh4_flush_icache_range(void *args)
41 struct flusher_data *data = args;
42 unsigned long start, end;
43 unsigned long flags, v;
49 /* If there are too many pages then just blow away the caches */
50 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
51 local_flush_cache_all(NULL);
56 * Selectively flush d-cache then invalidate the i-cache.
57 * This is inefficient, so only use this for small ranges.
59 start &= ~(L1_CACHE_BYTES-1);
60 end += L1_CACHE_BYTES-1;
61 end &= ~(L1_CACHE_BYTES-1);
63 local_irq_save(flags);
66 for (v = start; v < end; v += L1_CACHE_BYTES) {
67 unsigned long icacheaddr;
71 icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v &
72 cpu_data->icache.entry_mask);
74 /* Clear i-cache line valid-bit */
75 for (i = 0; i < cpu_data->icache.ways; i++) {
76 __raw_writel(0, icacheaddr);
77 icacheaddr += cpu_data->icache.way_incr;
82 local_irq_restore(flags);
85 static inline void flush_cache_one(unsigned long start, unsigned long phys)
87 unsigned long flags, exec_offset = 0;
90 * All types of SH-4 require PC to be uncached to operate on the I-cache.
91 * Some types of SH-4 require PC to be uncached to operate on the D-cache.
93 if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
94 (start < CACHE_OC_ADDRESS_ARRAY))
95 exec_offset = cached_to_uncached;
97 local_irq_save(flags);
98 __flush_cache_one(start | SH_CACHE_ASSOC, phys, exec_offset);
99 local_irq_restore(flags);
103 * Write back & invalidate the D-cache of the page.
104 * (To avoid "alias" issues)
106 static void sh4_flush_dcache_page(void *arg)
108 struct page *page = arg;
110 struct address_space *mapping = page_mapping(page);
112 if (mapping && !mapping_mapped(mapping))
113 set_bit(PG_dcache_dirty, &page->flags);
117 unsigned long phys = page_to_phys(page);
118 unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
121 /* Loop all the D-cache */
122 n = boot_cpu_data.dcache.n_aliases;
123 for (i = 0; i <= n; i++, addr += PAGE_SIZE)
124 flush_cache_one(addr, phys);
130 /* TODO: Selective icache invalidation through IC address array.. */
131 static void __uses_jump_to_uncached flush_icache_all(void)
133 unsigned long flags, ccr;
135 local_irq_save(flags);
140 ccr |= CCR_CACHE_ICI;
144 * back_to_cached() will take care of the barrier for us, don't add
149 local_irq_restore(flags);
152 static void flush_dcache_all(void)
154 unsigned long addr, end_addr, entry_offset;
156 end_addr = CACHE_OC_ADDRESS_ARRAY +
157 (current_cpu_data.dcache.sets <<
158 current_cpu_data.dcache.entry_shift) *
159 current_cpu_data.dcache.ways;
161 entry_offset = 1 << current_cpu_data.dcache.entry_shift;
163 for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) {
164 __raw_writel(0, addr); addr += entry_offset;
165 __raw_writel(0, addr); addr += entry_offset;
166 __raw_writel(0, addr); addr += entry_offset;
167 __raw_writel(0, addr); addr += entry_offset;
168 __raw_writel(0, addr); addr += entry_offset;
169 __raw_writel(0, addr); addr += entry_offset;
170 __raw_writel(0, addr); addr += entry_offset;
171 __raw_writel(0, addr); addr += entry_offset;
175 static void sh4_flush_cache_all(void *unused)
182 * Note : (RPC) since the caches are physically tagged, the only point
183 * of flush_cache_mm for SH-4 is to get rid of aliases from the
184 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
185 * lines can stay resident so long as the virtual address they were
186 * accessed with (hence cache set) is in accord with the physical
187 * address (i.e. tag). It's no different here.
189 * Caller takes mm->mmap_sem.
191 static void sh4_flush_cache_mm(void *arg)
193 struct mm_struct *mm = arg;
195 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
202 * Write back and invalidate I/D-caches for the page.
204 * ADDR: Virtual Address (U0 address)
205 * PFN: Physical page number
207 static void sh4_flush_cache_page(void *args)
209 struct flusher_data *data = args;
210 struct vm_area_struct *vma;
212 unsigned long address, pfn, phys;
213 int map_coherent = 0;
221 address = data->addr1 & PAGE_MASK;
223 phys = pfn << PAGE_SHIFT;
224 page = pfn_to_page(pfn);
226 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
229 pgd = pgd_offset(vma->vm_mm, address);
230 pud = pud_offset(pgd, address);
231 pmd = pmd_offset(pud, address);
232 pte = pte_offset_kernel(pmd, address);
234 /* If the page isn't present, there is nothing to do here. */
235 if (!(pte_val(*pte) & _PAGE_PRESENT))
238 if ((vma->vm_mm == current->active_mm))
242 * Use kmap_coherent or kmap_atomic to do flushes for
243 * another ASID than the current one.
245 map_coherent = (current_cpu_data.dcache.n_aliases &&
246 !test_bit(PG_dcache_dirty, &page->flags) &&
249 vaddr = kmap_coherent(page, address);
251 vaddr = kmap_atomic(page, KM_USER0);
253 address = (unsigned long)vaddr;
256 if (pages_do_alias(address, phys))
257 flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
258 (address & shm_align_mask), phys);
260 if (vma->vm_flags & VM_EXEC)
265 kunmap_coherent(vaddr);
267 kunmap_atomic(vaddr, KM_USER0);
272 * Write back and invalidate D-caches.
274 * START, END: Virtual Address (U0 address)
276 * NOTE: We need to flush the _physical_ page entry.
277 * Flushing the cache lines for U0 only isn't enough.
278 * We need to flush for P1 too, which may contain aliases.
280 static void sh4_flush_cache_range(void *args)
282 struct flusher_data *data = args;
283 struct vm_area_struct *vma;
284 unsigned long start, end;
290 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
294 * If cache is only 4k-per-way, there are never any 'aliases'. Since
295 * the cache is physically tagged, the data can just be left in there.
297 if (boot_cpu_data.dcache.n_aliases == 0)
302 if (vma->vm_flags & VM_EXEC)
309 * @addr: address in memory mapped cache array
310 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
311 * set i.e. associative write)
312 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
315 * The offset into the cache array implied by 'addr' selects the
316 * 'colour' of the virtual address range that will be flushed. The
317 * operation (purge/write-back) is selected by the lower 2 bits of
320 static void __flush_cache_one(unsigned long addr, unsigned long phys,
321 unsigned long exec_offset)
324 unsigned long base_addr = addr;
325 struct cache_info *dcache;
326 unsigned long way_incr;
327 unsigned long a, ea, p;
328 unsigned long temp_pc;
330 dcache = &boot_cpu_data.dcache;
331 /* Write this way for better assembly. */
332 way_count = dcache->ways;
333 way_incr = dcache->way_incr;
336 * Apply exec_offset (i.e. branch to P2 if required.).
340 * If I write "=r" for the (temp_pc), it puts this in r6 hence
341 * trashing exec_offset before it's been added on - why? Hence
342 * "=&r" as a 'workaround'
344 asm volatile("mov.l 1f, %0\n\t"
350 "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
353 * We know there will be >=1 iteration, so write as do-while to avoid
354 * pointless nead-of-loop check for 0 iterations.
357 ea = base_addr + PAGE_SIZE;
362 *(volatile unsigned long *)a = p;
364 * Next line: intentionally not p+32, saves an add, p
365 * will do since only the cache tag bits need to
368 *(volatile unsigned long *)(a+32) = p;
373 base_addr += way_incr;
374 } while (--way_count != 0);
377 extern void __weak sh4__flush_region_init(void);
380 * SH-4 has virtually indexed and physically tagged cache.
382 void __init sh4_cache_init(void)
384 printk("PVR=%08x CVR=%08x PRR=%08x\n",
389 local_flush_icache_range = sh4_flush_icache_range;
390 local_flush_dcache_page = sh4_flush_dcache_page;
391 local_flush_cache_all = sh4_flush_cache_all;
392 local_flush_cache_mm = sh4_flush_cache_mm;
393 local_flush_cache_dup_mm = sh4_flush_cache_mm;
394 local_flush_cache_page = sh4_flush_cache_page;
395 local_flush_cache_range = sh4_flush_cache_range;
397 sh4__flush_region_init();