4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2002 - 2010 Paul Mundt
7 * Released under the terms of the GNU GPL v2.0.
10 #include <linux/init.h>
11 #include <linux/mutex.h>
13 #include <linux/smp.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
19 void (*local_flush_cache_all)(void *args) = cache_noop;
20 void (*local_flush_cache_mm)(void *args) = cache_noop;
21 void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
22 void (*local_flush_cache_page)(void *args) = cache_noop;
23 void (*local_flush_cache_range)(void *args) = cache_noop;
24 void (*local_flush_dcache_page)(void *args) = cache_noop;
25 void (*local_flush_icache_range)(void *args) = cache_noop;
26 void (*local_flush_icache_page)(void *args) = cache_noop;
27 void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
29 void (*__flush_wback_region)(void *start, int size);
30 EXPORT_SYMBOL(__flush_wback_region);
31 void (*__flush_purge_region)(void *start, int size);
32 EXPORT_SYMBOL(__flush_purge_region);
33 void (*__flush_invalidate_region)(void *start, int size);
34 EXPORT_SYMBOL(__flush_invalidate_region);
36 static inline void noop__flush_region(void *start, int size)
40 static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
46 * It's possible that this gets called early on when IRQs are
47 * still disabled due to ioremapping by the boot CPU, so don't
48 * even attempt IPIs unless there are other CPUs online.
50 if (num_online_cpus() > 1)
51 smp_call_function(func, info, wait);
58 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
59 unsigned long vaddr, void *dst, const void *src,
62 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
63 test_bit(PG_dcache_clean, &page->flags)) {
64 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
65 memcpy(vto, src, len);
68 memcpy(dst, src, len);
69 if (boot_cpu_data.dcache.n_aliases)
70 clear_bit(PG_dcache_clean, &page->flags);
73 if (vma->vm_flags & VM_EXEC)
74 flush_cache_page(vma, vaddr, page_to_pfn(page));
77 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
78 unsigned long vaddr, void *dst, const void *src,
81 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
82 test_bit(PG_dcache_clean, &page->flags)) {
83 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
84 memcpy(dst, vfrom, len);
85 kunmap_coherent(vfrom);
87 memcpy(dst, src, len);
88 if (boot_cpu_data.dcache.n_aliases)
89 clear_bit(PG_dcache_clean, &page->flags);
93 void copy_user_highpage(struct page *to, struct page *from,
94 unsigned long vaddr, struct vm_area_struct *vma)
98 vto = kmap_atomic(to, KM_USER1);
100 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
101 test_bit(PG_dcache_clean, &from->flags)) {
102 vfrom = kmap_coherent(from, vaddr);
103 copy_page(vto, vfrom);
104 kunmap_coherent(vfrom);
106 vfrom = kmap_atomic(from, KM_USER0);
107 copy_page(vto, vfrom);
108 kunmap_atomic(vfrom, KM_USER0);
111 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
112 __flush_purge_region(vto, PAGE_SIZE);
114 kunmap_atomic(vto, KM_USER1);
115 /* Make sure this page is cleared on other CPU's too before using it */
118 EXPORT_SYMBOL(copy_user_highpage);
120 void clear_user_highpage(struct page *page, unsigned long vaddr)
122 void *kaddr = kmap_atomic(page, KM_USER0);
126 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
127 __flush_purge_region(kaddr, PAGE_SIZE);
129 kunmap_atomic(kaddr, KM_USER0);
131 EXPORT_SYMBOL(clear_user_highpage);
133 void __update_cache(struct vm_area_struct *vma,
134 unsigned long address, pte_t pte)
137 unsigned long pfn = pte_pfn(pte);
139 if (!boot_cpu_data.dcache.n_aliases)
142 page = pfn_to_page(pfn);
143 if (pfn_valid(pfn)) {
144 int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
146 __flush_purge_region(page_address(page), PAGE_SIZE);
150 void __flush_anon_page(struct page *page, unsigned long vmaddr)
152 unsigned long addr = (unsigned long) page_address(page);
154 if (pages_do_alias(addr, vmaddr)) {
155 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
156 test_bit(PG_dcache_clean, &page->flags)) {
159 kaddr = kmap_coherent(page, vmaddr);
160 /* XXX.. For now kunmap_coherent() does a purge */
161 /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
162 kunmap_coherent(kaddr);
164 __flush_purge_region((void *)addr, PAGE_SIZE);
168 void flush_cache_all(void)
170 cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
172 EXPORT_SYMBOL(flush_cache_all);
174 void flush_cache_mm(struct mm_struct *mm)
176 if (boot_cpu_data.dcache.n_aliases == 0)
179 cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
182 void flush_cache_dup_mm(struct mm_struct *mm)
184 if (boot_cpu_data.dcache.n_aliases == 0)
187 cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
190 void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
193 struct flusher_data data;
199 cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
202 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
205 struct flusher_data data;
211 cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
213 EXPORT_SYMBOL(flush_cache_range);
215 void flush_dcache_page(struct page *page)
217 cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
219 EXPORT_SYMBOL(flush_dcache_page);
221 void flush_icache_range(unsigned long start, unsigned long end)
223 struct flusher_data data;
229 cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
232 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
234 /* Nothing uses the VMA, so just pass the struct page along */
235 cacheop_on_each_cpu(local_flush_icache_page, page, 1);
238 void flush_cache_sigtramp(unsigned long address)
240 cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
243 static void compute_alias(struct cache_info *c)
245 c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
246 c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
249 static void __init emit_cache_params(void)
251 printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
252 boot_cpu_data.icache.ways,
253 boot_cpu_data.icache.sets,
254 boot_cpu_data.icache.way_incr);
255 printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
256 boot_cpu_data.icache.entry_mask,
257 boot_cpu_data.icache.alias_mask,
258 boot_cpu_data.icache.n_aliases);
259 printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
260 boot_cpu_data.dcache.ways,
261 boot_cpu_data.dcache.sets,
262 boot_cpu_data.dcache.way_incr);
263 printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
264 boot_cpu_data.dcache.entry_mask,
265 boot_cpu_data.dcache.alias_mask,
266 boot_cpu_data.dcache.n_aliases);
269 * Emit Secondary Cache parameters if the CPU has a probed L2.
271 if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
272 printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
273 boot_cpu_data.scache.ways,
274 boot_cpu_data.scache.sets,
275 boot_cpu_data.scache.way_incr);
276 printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
277 boot_cpu_data.scache.entry_mask,
278 boot_cpu_data.scache.alias_mask,
279 boot_cpu_data.scache.n_aliases);
283 void __init cpu_cache_init(void)
285 unsigned int cache_disabled = 0;
288 cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
291 compute_alias(&boot_cpu_data.icache);
292 compute_alias(&boot_cpu_data.dcache);
293 compute_alias(&boot_cpu_data.scache);
295 __flush_wback_region = noop__flush_region;
296 __flush_purge_region = noop__flush_region;
297 __flush_invalidate_region = noop__flush_region;
300 * No flushing is necessary in the disabled cache case so we can
301 * just keep the noop functions in local_flush_..() and __flush_..()
303 if (unlikely(cache_disabled))
306 if (boot_cpu_data.family == CPU_FAMILY_SH2) {
307 extern void __weak sh2_cache_init(void);
312 if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
313 extern void __weak sh2a_cache_init(void);
318 if (boot_cpu_data.family == CPU_FAMILY_SH3) {
319 extern void __weak sh3_cache_init(void);
323 if ((boot_cpu_data.type == CPU_SH7705) &&
324 (boot_cpu_data.dcache.sets == 512)) {
325 extern void __weak sh7705_cache_init(void);
331 if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
332 (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
333 (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
334 extern void __weak sh4_cache_init(void);
338 if ((boot_cpu_data.type == CPU_SH7786) ||
339 (boot_cpu_data.type == CPU_SHX3)) {
340 extern void __weak shx3_cache_init(void);
346 if (boot_cpu_data.family == CPU_FAMILY_SH5) {
347 extern void __weak sh5_cache_init(void);