4 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/cache.h>
16 #include <linux/mmu_context.h>
17 #include <linux/syscalls.h>
18 #include <linux/uaccess.h>
19 #include <linux/pagemap.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cachectl.h>
22 #include <asm/setup.h>
24 static int l2_line_sz;
26 volatile int slc_enable = 1;
28 void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr,
29 unsigned long sz, const int cacheop);
31 void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz);
32 void (*__dma_cache_inv)(unsigned long start, unsigned long sz);
33 void (*__dma_cache_wback)(unsigned long start, unsigned long sz);
35 char *arc_cache_mumbojumbo(int c, char *buf, int len)
38 struct cpuinfo_arc_cache *p;
40 #define IS_USED_RUN(v) ((v) ? "" : "(disabled) ")
41 #define PR_CACHE(p, cfg, str) \
43 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
45 n += scnprintf(buf + n, len - n, \
46 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
47 (p)->sz_k, (p)->assoc, (p)->line_len, \
48 (p)->vipt ? "VIPT" : "PIPT", \
49 (p)->alias ? " aliasing" : "", \
50 IS_ENABLED(cfg) ? "" : " (not used)");
52 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
53 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
55 p = &cpuinfo_arc700[c].slc;
57 n += scnprintf(buf + n, len - n,
58 "SLC\t\t: %uK, %uB Line%s\n",
59 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
62 n += scnprintf(buf + n, len - n, "IOC\t\t: exists\n");
68 * Read the Cache Build Confuration Registers, Decode them and save into
69 * the cpuinfo structure for later use.
70 * No Validation done here, simply read/convert the BCRs
72 void read_decode_cache_bcr(void)
74 struct cpuinfo_arc_cache *p_ic, *p_dc, *p_slc;
75 unsigned int cpu = smp_processor_id();
77 #ifdef CONFIG_CPU_BIG_ENDIAN
78 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
80 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
84 struct bcr_generic sbcr;
87 #ifdef CONFIG_CPU_BIG_ENDIAN
88 unsigned int pad:24, way:2, lsz:2, sz:4;
90 unsigned int sz:4, lsz:2, way:2, pad:24;
94 struct bcr_clust_cfg {
95 #ifdef CONFIG_CPU_BIG_ENDIAN
96 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
98 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
102 p_ic = &cpuinfo_arc700[cpu].icache;
103 READ_BCR(ARC_REG_IC_BCR, ibcr);
109 BUG_ON(ibcr.config != 3);
110 p_ic->assoc = 2; /* Fixed to 2w set assoc */
111 } else if (ibcr.ver >= 4) {
112 p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
115 p_ic->line_len = 8 << ibcr.line_len;
116 p_ic->sz_k = 1 << (ibcr.sz - 1);
117 p_ic->ver = ibcr.ver;
119 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
122 p_dc = &cpuinfo_arc700[cpu].dcache;
123 READ_BCR(ARC_REG_DC_BCR, dbcr);
129 BUG_ON(dbcr.config != 2);
130 p_dc->assoc = 4; /* Fixed to 4w set assoc */
132 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
133 } else if (dbcr.ver >= 4) {
134 p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
136 p_dc->alias = 0; /* PIPT so can't VIPT alias */
139 p_dc->line_len = 16 << dbcr.line_len;
140 p_dc->sz_k = 1 << (dbcr.sz - 1);
141 p_dc->ver = dbcr.ver;
147 p_slc = &cpuinfo_arc700[cpu].slc;
148 READ_BCR(ARC_REG_SLC_BCR, sbcr);
150 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
151 p_slc->ver = sbcr.ver;
152 p_slc->sz_k = 128 << slc_cfg.sz;
153 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
156 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
162 * Line Operation on {I,D}-Cache
167 #define OP_FLUSH_N_INV 0x3
168 #define OP_INV_IC 0x4
171 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
173 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
174 * The orig Cache Management Module "CDU" only required paddr to invalidate a
175 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
176 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
177 * the exact same line.
179 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
180 * paddr alone could not be used to correctly index the cache.
183 * MMU v1/v2 (Fixed Page Size 8k)
185 * The solution was to provide CDU with these additonal vaddr bits. These
186 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
187 * standard page size of 8k.
188 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
189 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
190 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
191 * represent the offset within cache-line. The adv of using this "clumsy"
192 * interface for additional info was no new reg was needed in CDU programming
195 * 17:13 represented the max num of bits passable, actual bits needed were
196 * fewer, based on the num-of-aliases possible.
197 * -for 2 alias possibility, only bit 13 needed (32K cache)
198 * -for 4 alias possibility, bits 14:13 needed (64K cache)
203 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
204 * only support 8k (default), 16k and 4k.
205 * However from hardware perspective, smaller page sizes aggrevate aliasing
206 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
207 * the existing scheme of piggybacking won't work for certain configurations.
208 * Two new registers IC_PTAG and DC_PTAG inttoduced.
209 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
213 void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
214 unsigned long sz, const int op)
216 unsigned int aux_cmd;
218 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
220 if (op == OP_INV_IC) {
221 aux_cmd = ARC_REG_IC_IVIL;
223 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
224 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
227 /* Ensure we properly floor/ceil the non-line aligned/sized requests
228 * and have @paddr - aligned to cache line and integral @num_lines.
229 * This however can be avoided for page sized since:
230 * -@paddr will be cache-line aligned already (being page aligned)
231 * -@sz will be integral multiple of line size (being page sized).
234 sz += paddr & ~CACHE_LINE_MASK;
235 paddr &= CACHE_LINE_MASK;
236 vaddr &= CACHE_LINE_MASK;
239 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
241 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
242 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
244 while (num_lines-- > 0) {
245 write_aux_reg(aux_cmd, paddr);
246 paddr += L1_CACHE_BYTES;
251 void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
252 unsigned long sz, const int op)
254 unsigned int aux_cmd, aux_tag;
256 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
258 if (op == OP_INV_IC) {
259 aux_cmd = ARC_REG_IC_IVIL;
260 aux_tag = ARC_REG_IC_PTAG;
262 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
263 aux_tag = ARC_REG_DC_PTAG;
266 /* Ensure we properly floor/ceil the non-line aligned/sized requests
267 * and have @paddr - aligned to cache line and integral @num_lines.
268 * This however can be avoided for page sized since:
269 * -@paddr will be cache-line aligned already (being page aligned)
270 * -@sz will be integral multiple of line size (being page sized).
273 sz += paddr & ~CACHE_LINE_MASK;
274 paddr &= CACHE_LINE_MASK;
275 vaddr &= CACHE_LINE_MASK;
277 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
280 * MMUv3, cache ops require paddr in PTAG reg
281 * if V-P const for loop, PTAG can be written once outside loop
284 write_aux_reg(aux_tag, paddr);
286 while (num_lines-- > 0) {
288 write_aux_reg(aux_tag, paddr);
289 paddr += L1_CACHE_BYTES;
292 write_aux_reg(aux_cmd, vaddr);
293 vaddr += L1_CACHE_BYTES;
298 * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache
299 * maintenance ops (in IVIL reg), as long as icache doesn't alias.
301 * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is
302 * specified in PTAG (similar to MMU v3)
305 void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
306 unsigned long sz, const int cacheop)
308 unsigned int aux_cmd;
310 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
312 if (cacheop == OP_INV_IC) {
313 aux_cmd = ARC_REG_IC_IVIL;
315 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
316 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
319 /* Ensure we properly floor/ceil the non-line aligned/sized requests
320 * and have @paddr - aligned to cache line and integral @num_lines.
321 * This however can be avoided for page sized since:
322 * -@paddr will be cache-line aligned already (being page aligned)
323 * -@sz will be integral multiple of line size (being page sized).
326 sz += paddr & ~CACHE_LINE_MASK;
327 paddr &= CACHE_LINE_MASK;
330 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
332 while (num_lines-- > 0) {
333 write_aux_reg(aux_cmd, paddr);
334 paddr += L1_CACHE_BYTES;
338 #if (CONFIG_ARC_MMU_VER < 3)
339 #define __cache_line_loop __cache_line_loop_v2
340 #elif (CONFIG_ARC_MMU_VER == 3)
341 #define __cache_line_loop __cache_line_loop_v3
342 #elif (CONFIG_ARC_MMU_VER > 3)
343 #define __cache_line_loop __cache_line_loop_v4
346 #ifdef CONFIG_ARC_HAS_DCACHE
348 /***************************************************************
349 * Machine specific helpers for Entire D-Cache or Per Line ops
352 static inline void __before_dc_op(const int op)
354 if (op == OP_FLUSH_N_INV) {
355 /* Dcache provides 2 cmd: FLUSH or INV
356 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
357 * flush-n-inv is achieved by INV cmd but with IM=1
358 * So toggle INV sub-mode depending on op request and default
360 const unsigned int ctl = ARC_REG_DC_CTRL;
361 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
365 static inline void __after_dc_op(const int op)
368 const unsigned int ctl = ARC_REG_DC_CTRL;
371 /* flush / flush-n-inv both wait */
372 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
375 /* Switch back to default Invalidate mode */
376 if (op == OP_FLUSH_N_INV)
377 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
382 * Operation on Entire D-Cache
383 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
384 * Note that constant propagation ensures all the checks are gone
387 static inline void __dc_entire_op(const int op)
393 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
394 aux = ARC_REG_DC_IVDC;
396 aux = ARC_REG_DC_FLSH;
398 write_aux_reg(aux, 0x1);
403 /* For kernel mappings cache operation: index is same as paddr */
404 #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
407 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
409 static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
410 unsigned long sz, const int op)
414 local_irq_save(flags);
418 __cache_line_loop(paddr, vaddr, sz, op);
422 local_irq_restore(flags);
427 #define __dc_entire_op(op)
428 #define __dc_line_op(paddr, vaddr, sz, op)
429 #define __dc_line_op_k(paddr, sz, op)
431 #endif /* CONFIG_ARC_HAS_DCACHE */
433 #ifdef CONFIG_ARC_HAS_ICACHE
435 static inline void __ic_entire_inv(void)
437 write_aux_reg(ARC_REG_IC_IVIC, 1);
438 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
442 __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
447 local_irq_save(flags);
448 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC);
449 local_irq_restore(flags);
454 #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
459 unsigned long paddr, vaddr;
463 static void __ic_line_inv_vaddr_helper(void *info)
465 struct ic_inv_args *ic_inv = info;
467 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
470 static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
473 struct ic_inv_args ic_inv = {
479 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
482 #endif /* CONFIG_SMP */
484 #else /* !CONFIG_ARC_HAS_ICACHE */
486 #define __ic_entire_inv()
487 #define __ic_line_inv_vaddr(pstart, vstart, sz)
489 #endif /* CONFIG_ARC_HAS_ICACHE */
491 noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
493 #ifdef CONFIG_ISA_ARCV2
495 * SLC is shared between all cores and concurrent aux operations from
496 * multiple cores need to be serialized using a spinlock
497 * A concurrent operation can be silently ignored and/or the old/new
498 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
501 static DEFINE_SPINLOCK(lock);
505 spin_lock_irqsave(&lock, flags);
508 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
509 * - b'000 (default) is Flush,
510 * - b'001 is Invalidate if CTRL.IM == 0
511 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
513 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
515 /* Don't rely on default value of IM bit */
516 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
517 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
522 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
524 ctrl &= ~SLC_CTRL_RGN_OP_INV;
526 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
529 * Lower bits are ignored, no need to clip
530 * END needs to be setup before START (latter triggers the operation)
531 * END can't be same as START, so add (l2_line_sz - 1) to sz
533 write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
534 write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
536 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
538 spin_unlock_irqrestore(&lock, flags);
542 /***********************************************************
547 * Handle cache congruency of kernel and userspace mappings of page when kernel
548 * writes-to/reads-from
550 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
551 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
552 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
553 * -In SMP, if hardware caches are coherent
555 * There's a corollary case, where kernel READs from a userspace mapped page.
556 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
558 void flush_dcache_page(struct page *page)
560 struct address_space *mapping;
562 if (!cache_is_vipt_aliasing()) {
563 clear_bit(PG_dc_clean, &page->flags);
567 /* don't handle anon pages here */
568 mapping = page_mapping(page);
573 * pagecache page, file not yet mapped to userspace
574 * Make a note that K-mapping is dirty
576 if (!mapping_mapped(mapping)) {
577 clear_bit(PG_dc_clean, &page->flags);
578 } else if (page_mapped(page)) {
580 /* kernel reading from page with U-mapping */
581 unsigned long paddr = (unsigned long)page_address(page);
582 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
584 if (addr_not_cache_congruent(paddr, vaddr))
585 __flush_dcache_page(paddr, vaddr);
588 EXPORT_SYMBOL(flush_dcache_page);
591 * DMA ops for systems with L1 cache only
592 * Make memory coherent with L1 cache by flushing/invalidating L1 lines
594 static void __dma_cache_wback_inv_l1(unsigned long start, unsigned long sz)
596 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
599 static void __dma_cache_inv_l1(unsigned long start, unsigned long sz)
601 __dc_line_op_k(start, sz, OP_INV);
604 static void __dma_cache_wback_l1(unsigned long start, unsigned long sz)
606 __dc_line_op_k(start, sz, OP_FLUSH);
610 * DMA ops for systems with both L1 and L2 caches, but without IOC
611 * Both L1 and L2 lines need to be explicity flushed/invalidated
613 static void __dma_cache_wback_inv_slc(unsigned long start, unsigned long sz)
615 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
616 slc_op(start, sz, OP_FLUSH_N_INV);
619 static void __dma_cache_inv_slc(unsigned long start, unsigned long sz)
621 __dc_line_op_k(start, sz, OP_INV);
622 slc_op(start, sz, OP_INV);
625 static void __dma_cache_wback_slc(unsigned long start, unsigned long sz)
627 __dc_line_op_k(start, sz, OP_FLUSH);
628 slc_op(start, sz, OP_FLUSH);
632 * DMA ops for systems with IOC
633 * IOC hardware snoops all DMA traffic keeping the caches consistent with
634 * memory - eliding need for any explicit cache maintenance of DMA buffers
636 static void __dma_cache_wback_inv_ioc(unsigned long start, unsigned long sz) {}
637 static void __dma_cache_inv_ioc(unsigned long start, unsigned long sz) {}
638 static void __dma_cache_wback_ioc(unsigned long start, unsigned long sz) {}
643 void dma_cache_wback_inv(unsigned long start, unsigned long sz)
645 __dma_cache_wback_inv(start, sz);
647 EXPORT_SYMBOL(dma_cache_wback_inv);
649 void dma_cache_inv(unsigned long start, unsigned long sz)
651 __dma_cache_inv(start, sz);
653 EXPORT_SYMBOL(dma_cache_inv);
655 void dma_cache_wback(unsigned long start, unsigned long sz)
657 __dma_cache_wback(start, sz);
659 EXPORT_SYMBOL(dma_cache_wback);
662 * This is API for making I/D Caches consistent when modifying
663 * kernel code (loadable modules, kprobes, kgdb...)
664 * This is called on insmod, with kernel virtual address for CODE of
665 * the module. ARC cache maintenance ops require PHY address thus we
666 * need to convert vmalloc addr to PHY addr
668 void flush_icache_range(unsigned long kstart, unsigned long kend)
672 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
674 /* Shortcut for bigger flush ranges.
675 * Here we don't care if this was kernel virtual or phy addr
677 tot_sz = kend - kstart;
678 if (tot_sz > PAGE_SIZE) {
683 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
684 if (likely(kstart > PAGE_OFFSET)) {
686 * The 2nd arg despite being paddr will be used to index icache
687 * This is OK since no alternate virtual mappings will exist
688 * given the callers for this case: kprobe/kgdb in built-in
691 __sync_icache_dcache(kstart, kstart, kend - kstart);
696 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
697 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
698 * handling of kernel vaddr.
700 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
701 * it still needs to handle a 2 page scenario, where the range
702 * straddles across 2 virtual pages and hence need for loop
705 unsigned int off, sz;
706 unsigned long phy, pfn;
708 off = kstart % PAGE_SIZE;
709 pfn = vmalloc_to_pfn((void *)kstart);
710 phy = (pfn << PAGE_SHIFT) + off;
711 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
712 __sync_icache_dcache(phy, kstart, sz);
717 EXPORT_SYMBOL(flush_icache_range);
720 * General purpose helper to make I and D cache lines consistent.
721 * @paddr is phy addr of region
722 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
723 * However in one instance, when called by kprobe (for a breakpt in
724 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
725 * use a paddr to index the cache (despite VIPT). This is fine since since a
726 * builtin kernel page will not have any virtual mappings.
727 * kprobe on loadable module will be kernel vaddr.
729 void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
731 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
732 __ic_line_inv_vaddr(paddr, vaddr, len);
735 /* wrapper to compile time eliminate alignment checks in flush loop */
736 void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
738 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
742 * wrapper to clearout kernel or userspace mappings of a page
743 * For kernel mappings @vaddr == @paddr
745 void __flush_dcache_page(unsigned long paddr, unsigned long vaddr)
747 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
750 noinline void flush_cache_all(void)
754 local_irq_save(flags);
757 __dc_entire_op(OP_FLUSH_N_INV);
759 local_irq_restore(flags);
763 #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
765 void flush_cache_mm(struct mm_struct *mm)
770 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
773 unsigned int paddr = pfn << PAGE_SHIFT;
775 u_vaddr &= PAGE_MASK;
777 __flush_dcache_page(paddr, u_vaddr);
779 if (vma->vm_flags & VM_EXEC)
780 __inv_icache_page(paddr, u_vaddr);
783 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
789 void flush_anon_page(struct vm_area_struct *vma, struct page *page,
790 unsigned long u_vaddr)
792 /* TBD: do we really need to clear the kernel mapping */
793 __flush_dcache_page(page_address(page), u_vaddr);
794 __flush_dcache_page(page_address(page), page_address(page));
800 void copy_user_highpage(struct page *to, struct page *from,
801 unsigned long u_vaddr, struct vm_area_struct *vma)
803 unsigned long kfrom = (unsigned long)page_address(from);
804 unsigned long kto = (unsigned long)page_address(to);
805 int clean_src_k_mappings = 0;
808 * If SRC page was already mapped in userspace AND it's U-mapping is
809 * not congruent with K-mapping, sync former to physical page so that
810 * K-mapping in memcpy below, sees the right data
812 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
813 * equally valid for SRC page as well
815 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
816 __flush_dcache_page(kfrom, u_vaddr);
817 clean_src_k_mappings = 1;
820 copy_page((void *)kto, (void *)kfrom);
823 * Mark DST page K-mapping as dirty for a later finalization by
824 * update_mmu_cache(). Although the finalization could have been done
825 * here as well (given that both vaddr/paddr are available).
826 * But update_mmu_cache() already has code to do that for other
827 * non copied user pages (e.g. read faults which wire in pagecache page
830 clear_bit(PG_dc_clean, &to->flags);
833 * if SRC was already usermapped and non-congruent to kernel mapping
834 * sync the kernel mapping back to physical page
836 if (clean_src_k_mappings) {
837 __flush_dcache_page(kfrom, kfrom);
838 set_bit(PG_dc_clean, &from->flags);
840 clear_bit(PG_dc_clean, &from->flags);
844 void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
847 clear_bit(PG_dc_clean, &page->flags);
851 /**********************************************************************
852 * Explicit Cache flush request from user space via syscall
853 * Needed for JITs which generate code on the fly
855 SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
857 /* TBD: optimize this */
862 void arc_cache_init(void)
864 unsigned int __maybe_unused cpu = smp_processor_id();
867 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
869 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
870 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
873 panic("cache support enabled but non-existent cache\n");
875 if (ic->line_len != L1_CACHE_BYTES)
876 panic("ICache line [%d] != kernel Config [%d]",
877 ic->line_len, L1_CACHE_BYTES);
879 if (ic->ver != CONFIG_ARC_MMU_VER)
880 panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
881 ic->ver, CONFIG_ARC_MMU_VER);
884 * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
885 * pair to provide vaddr/paddr respectively, just as in MMU v3
887 if (is_isa_arcv2() && ic->alias)
888 _cache_line_loop_ic_fn = __cache_line_loop_v3;
890 _cache_line_loop_ic_fn = __cache_line_loop;
893 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
894 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
897 panic("cache support enabled but non-existent cache\n");
899 if (dc->line_len != L1_CACHE_BYTES)
900 panic("DCache line [%d] != kernel Config [%d]",
901 dc->line_len, L1_CACHE_BYTES);
903 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
904 if (is_isa_arcompact()) {
905 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
907 if (dc->alias && !handled)
908 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
909 else if (!dc->alias && handled)
910 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
914 if (is_isa_arcv2() && l2_line_sz && !slc_enable) {
916 /* IM set : flush before invalidate */
917 write_aux_reg(ARC_REG_SLC_CTRL,
918 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM);
920 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
922 /* Important to wait for flush to complete */
923 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
924 write_aux_reg(ARC_REG_SLC_CTRL,
925 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
928 if (is_isa_arcv2() && ioc_exists) {
929 /* IO coherency base - 0x8z */
930 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
931 /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
932 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
933 /* Enable partial writes */
934 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
935 /* Enable IO coherency */
936 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
938 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
939 __dma_cache_inv = __dma_cache_inv_ioc;
940 __dma_cache_wback = __dma_cache_wback_ioc;
941 } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
942 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
943 __dma_cache_inv = __dma_cache_inv_slc;
944 __dma_cache_wback = __dma_cache_wback_slc;
946 __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
947 __dma_cache_inv = __dma_cache_inv_l1;
948 __dma_cache_wback = __dma_cache_wback_l1;