1 #ifndef _METAG_CACHEFLUSH_H
2 #define _METAG_CACHEFLUSH_H
5 #include <linux/sched.h>
8 #include <asm/l2cache.h>
9 #include <asm/metag_isa.h>
10 #include <asm/metag_mem.h>
12 void metag_cache_probe(void);
14 void metag_data_cache_flush_all(const void *start);
15 void metag_code_cache_flush_all(const void *start);
18 * Routines to flush physical cache lines that may be used to cache data or code
19 * normally accessed via the linear address range supplied. The region flushed
20 * must either lie in local or global address space determined by the top bit of
21 * the pStart address. If Bytes is >= 4K then the whole of the related cache
22 * state will be flushed rather than a limited range.
24 void metag_data_cache_flush(const void *start, int bytes);
25 void metag_code_cache_flush(const void *start, int bytes);
27 #ifdef CONFIG_METAG_META12
29 /* Write through, virtually tagged, split I/D cache. */
31 static inline void __flush_cache_all(void)
33 metag_code_cache_flush_all((void *) PAGE_OFFSET);
34 metag_data_cache_flush_all((void *) PAGE_OFFSET);
37 #define flush_cache_all() __flush_cache_all()
39 /* flush the entire user address space referenced in this mm structure */
40 static inline void flush_cache_mm(struct mm_struct *mm)
42 if (mm == current->mm)
46 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
48 /* flush a range of addresses from this mm */
49 static inline void flush_cache_range(struct vm_area_struct *vma,
50 unsigned long start, unsigned long end)
52 flush_cache_mm(vma->vm_mm);
55 static inline void flush_cache_page(struct vm_area_struct *vma,
56 unsigned long vmaddr, unsigned long pfn)
58 flush_cache_mm(vma->vm_mm);
61 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
62 static inline void flush_dcache_page(struct page *page)
64 metag_data_cache_flush_all((void *) PAGE_OFFSET);
67 #define flush_dcache_mmap_lock(mapping) do { } while (0)
68 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
70 static inline void flush_icache_page(struct vm_area_struct *vma,
73 metag_code_cache_flush(page_to_virt(page), PAGE_SIZE);
76 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
78 metag_data_cache_flush_all((void *) PAGE_OFFSET);
81 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
83 metag_data_cache_flush_all((void *) PAGE_OFFSET);
88 /* Write through, physically tagged, split I/D cache. */
90 #define flush_cache_all() do { } while (0)
91 #define flush_cache_mm(mm) do { } while (0)
92 #define flush_cache_dup_mm(mm) do { } while (0)
93 #define flush_cache_range(vma, start, end) do { } while (0)
94 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
95 #define flush_dcache_mmap_lock(mapping) do { } while (0)
96 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
97 #define flush_icache_page(vma, pg) do { } while (0)
98 #define flush_cache_vmap(start, end) do { } while (0)
99 #define flush_cache_vunmap(start, end) do { } while (0)
101 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
102 static inline void flush_dcache_page(struct page *page)
104 /* FIXME: We can do better than this. All we are trying to do is
105 * make the i-cache coherent, we should use the PG_arch_1 bit like
109 metag_out32(1, SYSC_ICACHE_FLUSH);
111 metag_code_cache_flush_all((void *) PAGE_OFFSET);
117 /* Push n pages at kernel virtual address and clear the icache */
118 static inline void flush_icache_range(unsigned long address,
119 unsigned long endaddr)
122 metag_out32(1, SYSC_ICACHE_FLUSH);
124 metag_code_cache_flush((void *) address, endaddr - address);
128 static inline void flush_cache_sigtramp(unsigned long addr, int size)
131 * Flush the icache in case there was previously some code
132 * fetched from this address, perhaps a previous sigtramp.
134 * We don't need to flush the dcache, it's write through and
135 * we just wrote the sigtramp code through it.
138 metag_out32(1, SYSC_ICACHE_FLUSH);
140 metag_code_cache_flush((void *) addr, size);
144 #ifdef CONFIG_METAG_L2C
147 * Perform a single specific CACHEWD operation on an address, masking lower bits
150 static inline void cachewd_line(void *addr, unsigned int data)
152 unsigned long masked = (unsigned long)addr & -0x40;
153 __builtin_meta2_cachewd((void *)masked, data);
156 /* Perform a certain CACHEW op on each cache line in a range */
157 static inline void cachew_region_op(void *start, unsigned long size,
160 unsigned long offset = (unsigned long)start & 0x3f;
168 __builtin_meta2_cachewd(start, op);
173 /* prevent write fence and flushbacks being reordered in L2 */
174 static inline void l2c_fence_flush(void *addr)
177 * Synchronise by reading back and re-flushing.
178 * It is assumed this access will miss, as the caller should have just
179 * flushed the cache line.
181 (void)(volatile u8 *)addr;
182 cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
185 /* prevent write fence and writebacks being reordered in L2 */
186 static inline void l2c_fence(void *addr)
189 * A write back has occurred, but not necessarily an invalidate, so the
190 * readback in l2c_fence_flush() would hit in the cache and have no
191 * effect. Therefore fully flush the line first.
193 cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
194 l2c_fence_flush(addr);
197 /* Used to keep memory consistent when doing DMA. */
198 static inline void flush_dcache_region(void *start, unsigned long size)
200 /* metag_data_cache_flush won't flush L2 cache lines if size >= 4096 */
201 if (meta_l2c_is_enabled()) {
202 cachew_region_op(start, size, CACHEW_FLUSH_L1D_L2);
203 if (meta_l2c_is_writeback())
204 l2c_fence_flush(start + size - 1);
206 metag_data_cache_flush(start, size);
210 /* Write back dirty lines to memory (or do nothing if no writeback caches) */
211 static inline void writeback_dcache_region(void *start, unsigned long size)
213 if (meta_l2c_is_enabled() && meta_l2c_is_writeback()) {
214 cachew_region_op(start, size, CACHEW_WRITEBACK_L1D_L2);
215 l2c_fence(start + size - 1);
219 /* Invalidate (may also write back if necessary) */
220 static inline void invalidate_dcache_region(void *start, unsigned long size)
222 if (meta_l2c_is_enabled())
223 cachew_region_op(start, size, CACHEW_INVALIDATE_L1D_L2);
225 metag_data_cache_flush(start, size);
228 #define flush_dcache_region(s, l) metag_data_cache_flush((s), (l))
229 #define writeback_dcache_region(s, l) do {} while (0)
230 #define invalidate_dcache_region(s, l) flush_dcache_region((s), (l))
233 static inline void copy_to_user_page(struct vm_area_struct *vma,
234 struct page *page, unsigned long vaddr,
235 void *dst, const void *src,
238 memcpy(dst, src, len);
239 flush_icache_range((unsigned long)dst, (unsigned long)dst + len);
242 static inline void copy_from_user_page(struct vm_area_struct *vma,
243 struct page *page, unsigned long vaddr,
244 void *dst, const void *src,
247 memcpy(dst, src, len);
250 #endif /* _METAG_CACHEFLUSH_H */