4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2002 - 2009 Paul Mundt
7 * Released under the terms of the GNU GPL v2.0.
10 #include <linux/init.h>
11 #include <linux/mutex.h>
13 #include <linux/highmem.h>
14 #include <linux/module.h>
15 #include <asm/mmu_context.h>
16 #include <asm/cacheflush.h>
18 void (*flush_cache_all)(void);
19 void (*flush_cache_mm)(struct mm_struct *mm);
20 void (*flush_cache_dup_mm)(struct mm_struct *mm);
21 void (*flush_cache_page)(struct vm_area_struct *vma,
22 unsigned long addr, unsigned long pfn);
23 void (*flush_cache_range)(struct vm_area_struct *vma,
24 unsigned long start, unsigned long end);
25 void (*flush_dcache_page)(struct page *page);
26 void (*flush_icache_range)(unsigned long start, unsigned long end);
27 void (*flush_icache_page)(struct vm_area_struct *vma,
29 void (*flush_cache_sigtramp)(unsigned long address);
30 void (*__flush_wback_region)(void *start, int size);
31 void (*__flush_purge_region)(void *start, int size);
32 void (*__flush_invalidate_region)(void *start, int size);
34 static inline void noop_flush_cache_all(void)
38 static inline void noop_flush_cache_mm(struct mm_struct *mm)
42 static inline void noop_flush_cache_page(struct vm_area_struct *vma,
43 unsigned long addr, unsigned long pfn)
47 static inline void noop_flush_cache_range(struct vm_area_struct *vma,
48 unsigned long start, unsigned long end)
52 static inline void noop_flush_dcache_page(struct page *page)
56 static inline void noop_flush_icache_range(unsigned long start,
61 static inline void noop_flush_icache_page(struct vm_area_struct *vma,
66 static inline void noop_flush_cache_sigtramp(unsigned long address)
70 static inline void noop__flush_region(void *start, int size)
74 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
75 unsigned long vaddr, void *dst, const void *src,
78 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
79 !test_bit(PG_dcache_dirty, &page->flags)) {
80 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
81 memcpy(vto, src, len);
84 memcpy(dst, src, len);
85 if (boot_cpu_data.dcache.n_aliases)
86 set_bit(PG_dcache_dirty, &page->flags);
89 if (vma->vm_flags & VM_EXEC)
90 flush_cache_page(vma, vaddr, page_to_pfn(page));
93 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
94 unsigned long vaddr, void *dst, const void *src,
97 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
98 !test_bit(PG_dcache_dirty, &page->flags)) {
99 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
100 memcpy(dst, vfrom, len);
103 memcpy(dst, src, len);
104 if (boot_cpu_data.dcache.n_aliases)
105 set_bit(PG_dcache_dirty, &page->flags);
109 void copy_user_highpage(struct page *to, struct page *from,
110 unsigned long vaddr, struct vm_area_struct *vma)
114 vto = kmap_atomic(to, KM_USER1);
116 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
117 !test_bit(PG_dcache_dirty, &from->flags)) {
118 vfrom = kmap_coherent(from, vaddr);
119 copy_page(vto, vfrom);
122 vfrom = kmap_atomic(from, KM_USER0);
123 copy_page(vto, vfrom);
124 kunmap_atomic(vfrom, KM_USER0);
127 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
128 __flush_wback_region(vto, PAGE_SIZE);
130 kunmap_atomic(vto, KM_USER1);
131 /* Make sure this page is cleared on other CPU's too before using it */
134 EXPORT_SYMBOL(copy_user_highpage);
136 void clear_user_highpage(struct page *page, unsigned long vaddr)
138 void *kaddr = kmap_atomic(page, KM_USER0);
142 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
143 __flush_wback_region(kaddr, PAGE_SIZE);
145 kunmap_atomic(kaddr, KM_USER0);
147 EXPORT_SYMBOL(clear_user_highpage);
149 void __update_cache(struct vm_area_struct *vma,
150 unsigned long address, pte_t pte)
153 unsigned long pfn = pte_pfn(pte);
155 if (!boot_cpu_data.dcache.n_aliases)
158 page = pfn_to_page(pfn);
159 if (pfn_valid(pfn) && page_mapping(page)) {
160 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
162 unsigned long addr = (unsigned long)page_address(page);
164 if (pages_do_alias(addr, address & PAGE_MASK))
165 __flush_wback_region((void *)addr, PAGE_SIZE);
170 void __flush_anon_page(struct page *page, unsigned long vmaddr)
172 unsigned long addr = (unsigned long) page_address(page);
174 if (pages_do_alias(addr, vmaddr)) {
175 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
176 !test_bit(PG_dcache_dirty, &page->flags)) {
179 kaddr = kmap_coherent(page, vmaddr);
180 __flush_wback_region((void *)kaddr, PAGE_SIZE);
183 __flush_wback_region((void *)addr, PAGE_SIZE);
187 static void compute_alias(struct cache_info *c)
189 c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
190 c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
193 static void __init emit_cache_params(void)
195 printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
196 boot_cpu_data.icache.ways,
197 boot_cpu_data.icache.sets,
198 boot_cpu_data.icache.way_incr);
199 printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
200 boot_cpu_data.icache.entry_mask,
201 boot_cpu_data.icache.alias_mask,
202 boot_cpu_data.icache.n_aliases);
203 printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
204 boot_cpu_data.dcache.ways,
205 boot_cpu_data.dcache.sets,
206 boot_cpu_data.dcache.way_incr);
207 printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
208 boot_cpu_data.dcache.entry_mask,
209 boot_cpu_data.dcache.alias_mask,
210 boot_cpu_data.dcache.n_aliases);
213 * Emit Secondary Cache parameters if the CPU has a probed L2.
215 if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
216 printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
217 boot_cpu_data.scache.ways,
218 boot_cpu_data.scache.sets,
219 boot_cpu_data.scache.way_incr);
220 printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
221 boot_cpu_data.scache.entry_mask,
222 boot_cpu_data.scache.alias_mask,
223 boot_cpu_data.scache.n_aliases);
227 void __init cpu_cache_init(void)
229 compute_alias(&boot_cpu_data.icache);
230 compute_alias(&boot_cpu_data.dcache);
231 compute_alias(&boot_cpu_data.scache);
233 flush_cache_all = noop_flush_cache_all;
234 flush_cache_mm = noop_flush_cache_mm;
235 flush_cache_dup_mm = noop_flush_cache_mm;
236 flush_cache_page = noop_flush_cache_page;
237 flush_cache_range = noop_flush_cache_range;
238 flush_dcache_page = noop_flush_dcache_page;
239 flush_icache_range = noop_flush_icache_range;
240 flush_icache_page = noop_flush_icache_page;
241 flush_cache_sigtramp = noop_flush_cache_sigtramp;
243 __flush_wback_region = noop__flush_region;
244 __flush_purge_region = noop__flush_region;
245 __flush_invalidate_region = noop__flush_region;
247 if (boot_cpu_data.family == CPU_FAMILY_SH2) {
248 extern void __weak sh2_cache_init(void);
253 if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
254 (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
255 (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
256 extern void __weak sh4_cache_init(void);