2 * r2300.c: R2000 and R3000 specific mmu/cache code.
4 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
6 * with a lot of changes to make this thing work for R3000s
7 * Tx39XX R4k style caches added. HK
8 * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
9 * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
17 #include <asm/cacheops.h>
19 #include <asm/pgtable.h>
20 #include <asm/mmu_context.h>
21 #include <asm/system.h>
22 #include <asm/isadep.h>
24 #include <asm/bootinfo.h>
27 /* For R3000 cores with R4000 style caches */
28 static unsigned long icache_size, dcache_size; /* Size in bytes */
30 #include <asm/r4kcache.h>
32 extern int r3k_have_wired_reg; /* in r3k-tlb.c */
34 /* This sequence is required to ensure icache is disabled immediately */
35 #define TX39_STOP_STREAMING() \
36 __asm__ __volatile__( \
38 ".set noreorder\n\t" \
45 /* TX39H-style cache flush routines. */
46 static void tx39h_flush_icache_all(void)
48 unsigned long flags, config;
50 /* disable icache (set ICE#) */
51 local_irq_save(flags);
52 config = read_c0_conf();
53 write_c0_conf(config & ~TX39_CONF_ICE);
54 TX39_STOP_STREAMING();
56 write_c0_conf(config);
57 local_irq_restore(flags);
60 static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
62 /* Catch bad driver code */
66 blast_inv_dcache_range(addr, addr + size);
71 static inline void tx39_blast_dcache_page(unsigned long addr)
73 if (current_cpu_type() != CPU_TX3912)
74 blast_dcache16_page(addr);
77 static inline void tx39_blast_dcache_page_indexed(unsigned long addr)
79 blast_dcache16_page_indexed(addr);
82 static inline void tx39_blast_dcache(void)
87 static inline void tx39_blast_icache_page(unsigned long addr)
89 unsigned long flags, config;
90 /* disable icache (set ICE#) */
91 local_irq_save(flags);
92 config = read_c0_conf();
93 write_c0_conf(config & ~TX39_CONF_ICE);
94 TX39_STOP_STREAMING();
95 blast_icache16_page(addr);
96 write_c0_conf(config);
97 local_irq_restore(flags);
100 static inline void tx39_blast_icache_page_indexed(unsigned long addr)
102 unsigned long flags, config;
103 /* disable icache (set ICE#) */
104 local_irq_save(flags);
105 config = read_c0_conf();
106 write_c0_conf(config & ~TX39_CONF_ICE);
107 TX39_STOP_STREAMING();
108 blast_icache16_page_indexed(addr);
109 write_c0_conf(config);
110 local_irq_restore(flags);
113 static inline void tx39_blast_icache(void)
115 unsigned long flags, config;
116 /* disable icache (set ICE#) */
117 local_irq_save(flags);
118 config = read_c0_conf();
119 write_c0_conf(config & ~TX39_CONF_ICE);
120 TX39_STOP_STREAMING();
122 write_c0_conf(config);
123 local_irq_restore(flags);
126 static void tx39__flush_cache_vmap(void)
131 static void tx39__flush_cache_vunmap(void)
136 static inline void tx39_flush_cache_all(void)
138 if (!cpu_has_dc_aliases)
144 static inline void tx39___flush_cache_all(void)
150 static void tx39_flush_cache_mm(struct mm_struct *mm)
152 if (!cpu_has_dc_aliases)
155 if (cpu_context(smp_processor_id(), mm) != 0)
159 static void tx39_flush_cache_range(struct vm_area_struct *vma,
160 unsigned long start, unsigned long end)
162 if (!cpu_has_dc_aliases)
164 if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
170 static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
172 int exec = vma->vm_flags & VM_EXEC;
173 struct mm_struct *mm = vma->vm_mm;
180 * If ownes no valid ASID yet, cannot possibly have gotten
181 * this page into the cache.
183 if (cpu_context(smp_processor_id(), mm) == 0)
187 pgdp = pgd_offset(mm, page);
188 pudp = pud_offset(pgdp, page);
189 pmdp = pmd_offset(pudp, page);
190 ptep = pte_offset(pmdp, page);
193 * If the page isn't marked valid, the page cannot possibly be
196 if (!(pte_val(*ptep) & _PAGE_PRESENT))
200 * Doing flushes for another ASID than the current one is
201 * too difficult since stupid R4k caches do a TLB translation
202 * for every cache flush operation. So we do indexed flushes
203 * in that case, which doesn't overly flush the cache too much.
205 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
206 if (cpu_has_dc_aliases || exec)
207 tx39_blast_dcache_page(page);
209 tx39_blast_icache_page(page);
215 * Do indexed flush, too much work to get the (possible) TLB refills
218 if (cpu_has_dc_aliases || exec)
219 tx39_blast_dcache_page_indexed(page);
221 tx39_blast_icache_page_indexed(page);
224 static void local_tx39_flush_data_cache_page(void * addr)
226 tx39_blast_dcache_page((unsigned long)addr);
229 static void tx39_flush_data_cache_page(unsigned long addr)
231 tx39_blast_dcache_page(addr);
234 static void tx39_flush_icache_range(unsigned long start, unsigned long end)
236 if (end - start > dcache_size)
239 protected_blast_dcache_range(start, end);
241 if (end - start > icache_size)
244 unsigned long flags, config;
245 /* disable icache (set ICE#) */
246 local_irq_save(flags);
247 config = read_c0_conf();
248 write_c0_conf(config & ~TX39_CONF_ICE);
249 TX39_STOP_STREAMING();
250 protected_blast_icache_range(start, end);
251 write_c0_conf(config);
252 local_irq_restore(flags);
256 static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
260 if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
263 tx39_blast_dcache_page(addr);
265 } while(addr != end);
266 } else if (size > dcache_size) {
269 blast_dcache_range(addr, addr + size);
273 static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
277 if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
280 tx39_blast_dcache_page(addr);
282 } while(addr != end);
283 } else if (size > dcache_size) {
286 blast_inv_dcache_range(addr, addr + size);
290 static void tx39_flush_cache_sigtramp(unsigned long addr)
292 unsigned long ic_lsize = current_cpu_data.icache.linesz;
293 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
294 unsigned long config;
297 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
299 /* disable icache (set ICE#) */
300 local_irq_save(flags);
301 config = read_c0_conf();
302 write_c0_conf(config & ~TX39_CONF_ICE);
303 TX39_STOP_STREAMING();
304 protected_flush_icache_line(addr & ~(ic_lsize - 1));
305 write_c0_conf(config);
306 local_irq_restore(flags);
309 static __init void tx39_probe_cache(void)
311 unsigned long config;
313 config = read_c0_conf();
315 icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >>
316 TX39_CONF_ICS_SHIFT));
317 dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >>
318 TX39_CONF_DCS_SHIFT));
320 current_cpu_data.icache.linesz = 16;
321 switch (current_cpu_type()) {
323 current_cpu_data.icache.ways = 1;
324 current_cpu_data.dcache.ways = 1;
325 current_cpu_data.dcache.linesz = 4;
329 current_cpu_data.icache.ways = 2;
330 current_cpu_data.dcache.ways = 2;
331 current_cpu_data.dcache.linesz = 16;
336 current_cpu_data.icache.ways = 1;
337 current_cpu_data.dcache.ways = 1;
338 current_cpu_data.dcache.linesz = 16;
343 void __cpuinit tx39_cache_init(void)
345 extern void build_clear_page(void);
346 extern void build_copy_page(void);
347 unsigned long config;
349 config = read_c0_conf();
350 config &= ~TX39_CONF_WBON;
351 write_c0_conf(config);
355 switch (current_cpu_type()) {
357 /* TX39/H core (writethru direct-map cache) */
358 __flush_cache_vmap = tx39__flush_cache_vmap;
359 __flush_cache_vunmap = tx39__flush_cache_vunmap;
360 flush_cache_all = tx39h_flush_icache_all;
361 __flush_cache_all = tx39h_flush_icache_all;
362 flush_cache_mm = (void *) tx39h_flush_icache_all;
363 flush_cache_range = (void *) tx39h_flush_icache_all;
364 flush_cache_page = (void *) tx39h_flush_icache_all;
365 flush_icache_range = (void *) tx39h_flush_icache_all;
366 local_flush_icache_range = (void *) tx39h_flush_icache_all;
368 flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
369 local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
370 flush_data_cache_page = (void *) tx39h_flush_icache_all;
372 _dma_cache_wback_inv = tx39h_dma_cache_wback_inv;
374 shm_align_mask = PAGE_SIZE - 1;
381 /* TX39/H2,H3 core (writeback 2way-set-associative cache) */
382 r3k_have_wired_reg = 1;
383 write_c0_wired(0); /* set 8 on reset... */
384 /* board-dependent init code may set WBON */
386 __flush_cache_vmap = tx39__flush_cache_vmap;
387 __flush_cache_vunmap = tx39__flush_cache_vunmap;
389 flush_cache_all = tx39_flush_cache_all;
390 __flush_cache_all = tx39___flush_cache_all;
391 flush_cache_mm = tx39_flush_cache_mm;
392 flush_cache_range = tx39_flush_cache_range;
393 flush_cache_page = tx39_flush_cache_page;
394 flush_icache_range = tx39_flush_icache_range;
395 local_flush_icache_range = tx39_flush_icache_range;
397 flush_cache_sigtramp = tx39_flush_cache_sigtramp;
398 local_flush_data_cache_page = local_tx39_flush_data_cache_page;
399 flush_data_cache_page = tx39_flush_data_cache_page;
401 _dma_cache_wback_inv = tx39_dma_cache_wback_inv;
402 _dma_cache_wback = tx39_dma_cache_wback_inv;
403 _dma_cache_inv = tx39_dma_cache_inv;
405 shm_align_mask = max_t(unsigned long,
406 (dcache_size / current_cpu_data.dcache.ways) - 1,
412 current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
413 current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
415 current_cpu_data.icache.sets =
416 current_cpu_data.icache.waysize / current_cpu_data.icache.linesz;
417 current_cpu_data.dcache.sets =
418 current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz;
420 if (current_cpu_data.dcache.waysize > PAGE_SIZE)
421 current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES;
423 current_cpu_data.icache.waybit = 0;
424 current_cpu_data.dcache.waybit = 0;
426 printk("Primary instruction cache %ldkB, linesize %d bytes\n",
427 icache_size >> 10, current_cpu_data.icache.linesz);
428 printk("Primary data cache %ldkB, linesize %d bytes\n",
429 dcache_size >> 10, current_cpu_data.dcache.linesz);
433 tx39h_flush_icache_all();