2 * Cache control for MicroBlaze cache memories
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
13 #include <asm/cacheflush.h>
14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h>
18 static inline void __enable_icache_msr(void)
20 __asm__ __volatile__ (" msrset r0, %0; \
22 : : "i" (MSR_ICE) : "memory");
25 static inline void __disable_icache_msr(void)
27 __asm__ __volatile__ (" msrclr r0, %0; \
29 : : "i" (MSR_ICE) : "memory");
32 static inline void __enable_dcache_msr(void)
34 __asm__ __volatile__ (" msrset r0, %0; \
41 static inline void __disable_dcache_msr(void)
43 __asm__ __volatile__ (" msrclr r0, %0; \
50 static inline void __enable_icache_nomsr(void)
52 __asm__ __volatile__ (" mfs r12, rmsr; \
62 static inline void __disable_icache_nomsr(void)
64 __asm__ __volatile__ (" mfs r12, rmsr; \
74 static inline void __enable_dcache_nomsr(void)
76 __asm__ __volatile__ (" mfs r12, rmsr; \
86 static inline void __disable_dcache_nomsr(void)
88 __asm__ __volatile__ (" mfs r12, rmsr; \
99 /* Helper macro for computing the limits of cache range loops */
100 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
102 int align = ~(cache_line_length - 1); \
103 end = min(start + cache_size, end); \
105 end = ((end & align) + cache_line_length); \
109 * Helper macro to loop over the specified cache_size/line_length and
110 * execute 'op' on that cacheline
112 #define CACHE_ALL_LOOP(cache_size, line_length, op) \
114 unsigned int len = cache_size; \
115 int step = -line_length; \
118 __asm__ __volatile__ (" 1: " #op " %0, r0; \
121 " : : "r" (len), "r" (step) \
126 #define CACHE_ALL_LOOP2(cache_size, line_length, op) \
128 unsigned int len = cache_size; \
129 int step = -line_length; \
132 __asm__ __volatile__ (" 1: " #op " r0, %0; \
135 " : : "r" (len), "r" (step) \
139 /* for wdc.flush/clear */
140 #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
142 int step = -line_length; \
143 int count = end - start; \
144 BUG_ON(count <= 0); \
146 __asm__ __volatile__ (" 1: " #op " %0, %1; \
149 " : : "r" (start), "r" (count), \
150 "r" (step) : "memory"); \
153 /* It is used only first parameter for OP - for wic, wdc */
154 #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
157 BUG_ON(end - start <= 0); \
159 __asm__ __volatile__ (" 1: " #op " %1, r0; \
163 " : : "r" (temp), "r" (start), "r" (end),\
164 "r" (line_length) : "memory"); \
169 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
175 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
176 (unsigned int)start, (unsigned int) end);
178 CACHE_LOOP_LIMITS(start, end,
179 cpuinfo.icache_line_length, cpuinfo.icache_size);
181 local_irq_save(flags);
182 __disable_icache_msr();
185 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
187 for (i = start; i < end; i += cpuinfo.icache_line_length)
188 __asm__ __volatile__ ("wic %0, r0;" \
191 __enable_icache_msr();
192 local_irq_restore(flags);
195 static void __flush_icache_range_nomsr_irq(unsigned long start,
202 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
203 (unsigned int)start, (unsigned int) end);
205 CACHE_LOOP_LIMITS(start, end,
206 cpuinfo.icache_line_length, cpuinfo.icache_size);
208 local_irq_save(flags);
209 __disable_icache_nomsr();
212 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
214 for (i = start; i < end; i += cpuinfo.icache_line_length)
215 __asm__ __volatile__ ("wic %0, r0;" \
219 __enable_icache_nomsr();
220 local_irq_restore(flags);
223 static void __flush_icache_range_noirq(unsigned long start,
229 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
230 (unsigned int)start, (unsigned int) end);
232 CACHE_LOOP_LIMITS(start, end,
233 cpuinfo.icache_line_length, cpuinfo.icache_size);
235 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
237 for (i = start; i < end; i += cpuinfo.icache_line_length)
238 __asm__ __volatile__ ("wic %0, r0;" \
243 static void __flush_icache_all_msr_irq(void)
249 pr_debug("%s\n", __func__);
251 local_irq_save(flags);
252 __disable_icache_msr();
254 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
256 for (i = 0; i < cpuinfo.icache_size;
257 i += cpuinfo.icache_line_length)
258 __asm__ __volatile__ ("wic %0, r0;" \
261 __enable_icache_msr();
262 local_irq_restore(flags);
265 static void __flush_icache_all_nomsr_irq(void)
271 pr_debug("%s\n", __func__);
273 local_irq_save(flags);
274 __disable_icache_nomsr();
276 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
278 for (i = 0; i < cpuinfo.icache_size;
279 i += cpuinfo.icache_line_length)
280 __asm__ __volatile__ ("wic %0, r0;" \
283 __enable_icache_nomsr();
284 local_irq_restore(flags);
287 static void __flush_icache_all_noirq(void)
292 pr_debug("%s\n", __func__);
294 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
296 for (i = 0; i < cpuinfo.icache_size;
297 i += cpuinfo.icache_line_length)
298 __asm__ __volatile__ ("wic %0, r0;" \
303 static void __invalidate_dcache_all_msr_irq(void)
309 pr_debug("%s\n", __func__);
311 local_irq_save(flags);
312 __disable_dcache_msr();
314 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
316 for (i = 0; i < cpuinfo.dcache_size;
317 i += cpuinfo.dcache_line_length)
318 __asm__ __volatile__ ("wdc %0, r0;" \
321 __enable_dcache_msr();
322 local_irq_restore(flags);
325 static void __invalidate_dcache_all_nomsr_irq(void)
331 pr_debug("%s\n", __func__);
333 local_irq_save(flags);
334 __disable_dcache_nomsr();
336 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
338 for (i = 0; i < cpuinfo.dcache_size;
339 i += cpuinfo.dcache_line_length)
340 __asm__ __volatile__ ("wdc %0, r0;" \
343 __enable_dcache_nomsr();
344 local_irq_restore(flags);
347 static void __invalidate_dcache_all_noirq_wt(void)
352 pr_debug("%s\n", __func__);
354 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
356 for (i = 0; i < cpuinfo.dcache_size;
357 i += cpuinfo.dcache_line_length)
358 __asm__ __volatile__ ("wdc %0, r0;" \
363 /* FIXME this is weird - should be only wdc but not work
364 * MS: I am getting bus errors and other weird things */
365 static void __invalidate_dcache_all_wb(void)
370 pr_debug("%s\n", __func__);
372 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
375 for (i = 0; i < cpuinfo.dcache_size;
376 i += cpuinfo.dcache_line_length)
377 __asm__ __volatile__ ("wdc.clear %0, r0;" \
382 static void __invalidate_dcache_range_wb(unsigned long start,
388 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
389 (unsigned int)start, (unsigned int) end);
391 CACHE_LOOP_LIMITS(start, end,
392 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
394 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
396 for (i = start; i < end; i += cpuinfo.icache_line_length)
397 __asm__ __volatile__ ("wdc.clear %0, r0;" \
402 static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
408 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
409 (unsigned int)start, (unsigned int) end);
410 CACHE_LOOP_LIMITS(start, end,
411 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
414 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
416 for (i = start; i < end; i += cpuinfo.icache_line_length)
417 __asm__ __volatile__ ("wdc %0, r0;" \
422 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
429 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
430 (unsigned int)start, (unsigned int) end);
431 CACHE_LOOP_LIMITS(start, end,
432 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
434 local_irq_save(flags);
435 __disable_dcache_msr();
438 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
440 for (i = start; i < end; i += cpuinfo.icache_line_length)
441 __asm__ __volatile__ ("wdc %0, r0;" \
445 __enable_dcache_msr();
446 local_irq_restore(flags);
449 static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
456 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
457 (unsigned int)start, (unsigned int) end);
459 CACHE_LOOP_LIMITS(start, end,
460 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
462 local_irq_save(flags);
463 __disable_dcache_nomsr();
466 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
468 for (i = start; i < end; i += cpuinfo.icache_line_length)
469 __asm__ __volatile__ ("wdc %0, r0;" \
473 __enable_dcache_nomsr();
474 local_irq_restore(flags);
477 static void __flush_dcache_all_wb(void)
482 pr_debug("%s\n", __func__);
484 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
487 for (i = 0; i < cpuinfo.dcache_size;
488 i += cpuinfo.dcache_line_length)
489 __asm__ __volatile__ ("wdc.flush %0, r0;" \
494 static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
499 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
500 (unsigned int)start, (unsigned int) end);
502 CACHE_LOOP_LIMITS(start, end,
503 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
505 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
507 for (i = start; i < end; i += cpuinfo.icache_line_length)
508 __asm__ __volatile__ ("wdc.flush %0, r0;" \
513 /* struct for wb caches and for wt caches */
516 /* new wb cache model */
517 const struct scache wb_msr = {
518 .ie = __enable_icache_msr,
519 .id = __disable_icache_msr,
520 .ifl = __flush_icache_all_noirq,
521 .iflr = __flush_icache_range_noirq,
522 .iin = __flush_icache_all_noirq,
523 .iinr = __flush_icache_range_noirq,
524 .de = __enable_dcache_msr,
525 .dd = __disable_dcache_msr,
526 .dfl = __flush_dcache_all_wb,
527 .dflr = __flush_dcache_range_wb,
528 .din = __invalidate_dcache_all_wb,
529 .dinr = __invalidate_dcache_range_wb,
532 /* There is only difference in ie, id, de, dd functions */
533 const struct scache wb_nomsr = {
534 .ie = __enable_icache_nomsr,
535 .id = __disable_icache_nomsr,
536 .ifl = __flush_icache_all_noirq,
537 .iflr = __flush_icache_range_noirq,
538 .iin = __flush_icache_all_noirq,
539 .iinr = __flush_icache_range_noirq,
540 .de = __enable_dcache_nomsr,
541 .dd = __disable_dcache_nomsr,
542 .dfl = __flush_dcache_all_wb,
543 .dflr = __flush_dcache_range_wb,
544 .din = __invalidate_dcache_all_wb,
545 .dinr = __invalidate_dcache_range_wb,
548 /* Old wt cache model with disabling irq and turn off cache */
549 const struct scache wt_msr = {
550 .ie = __enable_icache_msr,
551 .id = __disable_icache_msr,
552 .ifl = __flush_icache_all_msr_irq,
553 .iflr = __flush_icache_range_msr_irq,
554 .iin = __flush_icache_all_msr_irq,
555 .iinr = __flush_icache_range_msr_irq,
556 .de = __enable_dcache_msr,
557 .dd = __disable_dcache_msr,
558 .dfl = __invalidate_dcache_all_msr_irq,
559 .dflr = __invalidate_dcache_range_msr_irq_wt,
560 .din = __invalidate_dcache_all_msr_irq,
561 .dinr = __invalidate_dcache_range_msr_irq_wt,
564 const struct scache wt_nomsr = {
565 .ie = __enable_icache_nomsr,
566 .id = __disable_icache_nomsr,
567 .ifl = __flush_icache_all_nomsr_irq,
568 .iflr = __flush_icache_range_nomsr_irq,
569 .iin = __flush_icache_all_nomsr_irq,
570 .iinr = __flush_icache_range_nomsr_irq,
571 .de = __enable_dcache_nomsr,
572 .dd = __disable_dcache_nomsr,
573 .dfl = __invalidate_dcache_all_nomsr_irq,
574 .dflr = __invalidate_dcache_range_nomsr_irq,
575 .din = __invalidate_dcache_all_nomsr_irq,
576 .dinr = __invalidate_dcache_range_nomsr_irq,
579 /* New wt cache model for newer Microblaze versions */
580 const struct scache wt_msr_noirq = {
581 .ie = __enable_icache_msr,
582 .id = __disable_icache_msr,
583 .ifl = __flush_icache_all_noirq,
584 .iflr = __flush_icache_range_noirq,
585 .iin = __flush_icache_all_noirq,
586 .iinr = __flush_icache_range_noirq,
587 .de = __enable_dcache_msr,
588 .dd = __disable_dcache_msr,
589 .dfl = __invalidate_dcache_all_noirq_wt,
590 .dflr = __invalidate_dcache_range_nomsr_wt,
591 .din = __invalidate_dcache_all_noirq_wt,
592 .dinr = __invalidate_dcache_range_nomsr_wt,
595 const struct scache wt_nomsr_noirq = {
596 .ie = __enable_icache_nomsr,
597 .id = __disable_icache_nomsr,
598 .ifl = __flush_icache_all_noirq,
599 .iflr = __flush_icache_range_noirq,
600 .iin = __flush_icache_all_noirq,
601 .iinr = __flush_icache_range_noirq,
602 .de = __enable_dcache_nomsr,
603 .dd = __disable_dcache_nomsr,
604 .dfl = __invalidate_dcache_all_noirq_wt,
605 .dflr = __invalidate_dcache_range_nomsr_wt,
606 .din = __invalidate_dcache_all_noirq_wt,
607 .dinr = __invalidate_dcache_range_nomsr_wt,
610 /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
611 #define CPUVER_7_20_A 0x0c
612 #define CPUVER_7_20_D 0x0f
614 #define INFO(s) printk(KERN_INFO "cache: " s "\n");
616 void microblaze_cache_init(void)
618 if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
619 if (cpuinfo.dcache_wb) {
621 mbc = (struct scache *)&wb_msr;
622 if (cpuinfo.ver_code < CPUVER_7_20_D) {
623 /* MS: problem with signal handling - hw bug */
624 INFO("WB won't work properly");
627 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
628 INFO("wt_msr_noirq");
629 mbc = (struct scache *)&wt_msr_noirq;
632 mbc = (struct scache *)&wt_msr;
636 if (cpuinfo.dcache_wb) {
638 mbc = (struct scache *)&wb_nomsr;
639 if (cpuinfo.ver_code < CPUVER_7_20_D) {
640 /* MS: problem with signal handling - hw bug */
641 INFO("WB won't work properly");
644 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
645 INFO("wt_nomsr_noirq");
646 mbc = (struct scache *)&wt_nomsr_noirq;
649 mbc = (struct scache *)&wt_nomsr;