1 #ifndef _M68K_CACHEFLUSH_H
2 #define _M68K_CACHEFLUSH_H
6 #include <asm/mcfsim.h>
10 #define FLUSH_I_AND_D (0x00000808)
11 #define FLUSH_I (0x00000008)
13 #ifndef ICACHE_MAX_ADDR
14 #define ICACHE_MAX_ADDR 0
15 #define ICACHE_SET_MASK 0
16 #define DCACHE_MAX_ADDR 0
17 #define DCACHE_SETMASK 0
27 * ColdFire architecture has no way to clear individual cache lines, so we
28 * are stuck invalidating all the cache entries when we want a clear operation.
30 static inline void clear_cf_icache(unsigned long start, unsigned long end)
32 __asm__ __volatile__ (
36 : "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA));
39 static inline void clear_cf_dcache(unsigned long start, unsigned long end)
41 __asm__ __volatile__ (
45 : "r" (CACHE_MODE | CACR_DCINVA));
48 static inline void clear_cf_bcache(unsigned long start, unsigned long end)
50 __asm__ __volatile__ (
54 : "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA | CACR_DCINVA));
58 * Use the ColdFire cpushl instruction to push (and invalidate) cache lines.
59 * The start and end addresses are cache line numbers not memory addresses.
61 static inline void flush_cf_icache(unsigned long start, unsigned long end)
65 for (set = start; set <= end; set += (0x10 - 3)) {
66 __asm__ __volatile__ (
67 "cpushl %%ic,(%0)\n\t"
69 "cpushl %%ic,(%0)\n\t"
71 "cpushl %%ic,(%0)\n\t"
79 static inline void flush_cf_dcache(unsigned long start, unsigned long end)
83 for (set = start; set <= end; set += (0x10 - 3)) {
84 __asm__ __volatile__ (
85 "cpushl %%dc,(%0)\n\t"
87 "cpushl %%dc,(%0)\n\t"
89 "cpushl %%dc,(%0)\n\t"
97 static inline void flush_cf_bcache(unsigned long start, unsigned long end)
101 for (set = start; set <= end; set += (0x10 - 3)) {
102 __asm__ __volatile__ (
103 "cpushl %%bc,(%0)\n\t"
105 "cpushl %%bc,(%0)\n\t"
107 "cpushl %%bc,(%0)\n\t"
116 * Cache handling functions
119 static inline void flush_icache(void)
121 if (CPU_IS_COLDFIRE) {
122 flush_cf_icache(0, ICACHE_MAX_ADDR);
123 } else if (CPU_IS_040_OR_060) {
124 asm volatile ( "nop\n"
130 asm volatile ( "movec %%cacr,%0\n"
139 * invalidate the cache for the specified memory range.
140 * It starts at the physical address specified for
141 * the given number of bytes.
143 extern void cache_clear(unsigned long paddr, int len);
145 * push any dirty cache in the specified memory range.
146 * It starts at the physical address specified for
147 * the given number of bytes.
149 extern void cache_push(unsigned long paddr, int len);
152 * push and invalidate pages in the specified user virtual
155 extern void cache_push_v(unsigned long vaddr, int len);
157 /* This is needed whenever the virtual mapping of the current
159 #define __flush_cache_all() \
161 if (CPU_IS_COLDFIRE) { \
162 flush_cf_dcache(0, DCACHE_MAX_ADDR); \
163 } else if (CPU_IS_040_OR_060) { \
164 __asm__ __volatile__("nop\n\t" \
169 unsigned long _tmp; \
170 __asm__ __volatile__("movec %%cacr,%0\n\t" \
174 : "di" (FLUSH_I_AND_D)); \
178 #define __flush_cache_030() \
180 if (CPU_IS_020_OR_030) { \
181 unsigned long _tmp; \
182 __asm__ __volatile__("movec %%cacr,%0\n\t" \
186 : "di" (FLUSH_I_AND_D)); \
190 #define flush_cache_all() __flush_cache_all()
192 #define flush_cache_vmap(start, end) flush_cache_all()
193 #define flush_cache_vunmap(start, end) flush_cache_all()
195 static inline void flush_cache_mm(struct mm_struct *mm)
197 if (mm == current->mm)
201 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
203 /* flush_cache_range/flush_cache_page must be macros to avoid
204 a dependency on linux/mm.h, which includes this file... */
205 static inline void flush_cache_range(struct vm_area_struct *vma,
209 if (vma->vm_mm == current->mm)
213 static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
215 if (vma->vm_mm == current->mm)
220 /* Push the page at kernel virtual address and clear the icache */
221 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
222 static inline void __flush_page_to_ram(void *vaddr)
224 if (CPU_IS_COLDFIRE) {
225 unsigned long addr, start, end;
226 addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
227 start = addr & ICACHE_SET_MASK;
228 end = (addr + PAGE_SIZE - 1) & ICACHE_SET_MASK;
230 flush_cf_bcache(0, end);
231 end = ICACHE_MAX_ADDR;
233 flush_cf_bcache(start, end);
234 } else if (CPU_IS_040_OR_060) {
235 __asm__ __volatile__("nop\n\t"
237 "cpushp %%bc,(%0)\n\t"
239 : : "a" (__pa(vaddr)));
242 __asm__ __volatile__("movec %%cacr,%0\n\t"
250 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
251 #define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
252 #define flush_dcache_mmap_lock(mapping) do { } while (0)
253 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
254 #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
256 extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
257 unsigned long addr, int len);
258 extern void flush_icache_range(unsigned long address, unsigned long endaddr);
260 static inline void copy_to_user_page(struct vm_area_struct *vma,
261 struct page *page, unsigned long vaddr,
262 void *dst, void *src, int len)
264 flush_cache_page(vma, vaddr, page_to_pfn(page));
265 memcpy(dst, src, len);
266 flush_icache_user_range(vma, page, vaddr, len);
268 static inline void copy_from_user_page(struct vm_area_struct *vma,
269 struct page *page, unsigned long vaddr,
270 void *dst, void *src, int len)
272 flush_cache_page(vma, vaddr, page_to_pfn(page));
273 memcpy(dst, src, len);
276 #endif /* _M68K_CACHEFLUSH_H */