2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
4 * Copyright (C) 2007 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/spinlock.h>
24 #include <linux/of_address.h>
26 #include <asm/cacheflush.h>
27 #include <asm/hardware/cache-l2x0.h>
28 #include "cache-tauros3.h"
29 #include "cache-aurora-l2.h"
31 #define CACHE_LINE_SIZE 32
33 static void __iomem *l2x0_base;
34 static DEFINE_RAW_SPINLOCK(l2x0_lock);
35 static u32 l2x0_way_mask; /* Bitmask of active ways */
37 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
39 /* Aurora don't have the cache ID register available, so we have to
40 * pass it though the device tree */
41 static u32 cache_id_part_number_from_dt;
43 struct l2x0_regs l2x0_saved_regs;
46 void (*setup)(const struct device_node *, u32 *, u32 *);
48 struct outer_cache_fns outer_cache;
51 static bool of_init = false;
53 static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
55 /* wait for cache operation by line or way to complete */
56 while (readl_relaxed(reg) & mask)
60 #ifdef CONFIG_CACHE_PL310
61 static inline void cache_wait(void __iomem *reg, unsigned long mask)
63 /* cache operations by line are atomic on PL310 */
66 #define cache_wait cache_wait_way
69 static inline void cache_sync(void)
71 void __iomem *base = l2x0_base;
73 writel_relaxed(0, base + sync_reg_offset);
74 cache_wait(base + L2X0_CACHE_SYNC, 1);
77 static inline void l2x0_clean_line(unsigned long addr)
79 void __iomem *base = l2x0_base;
80 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
81 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
84 static inline void l2x0_inv_line(unsigned long addr)
86 void __iomem *base = l2x0_base;
87 cache_wait(base + L2X0_INV_LINE_PA, 1);
88 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
91 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
92 static inline void debug_writel(unsigned long val)
94 if (outer_cache.set_debug)
95 outer_cache.set_debug(val);
98 static void pl310_set_debug(unsigned long val)
100 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
103 /* Optimised out for non-errata case */
104 static inline void debug_writel(unsigned long val)
108 #define pl310_set_debug NULL
111 #ifdef CONFIG_PL310_ERRATA_588369
112 static inline void l2x0_flush_line(unsigned long addr)
114 void __iomem *base = l2x0_base;
116 /* Clean by PA followed by Invalidate by PA */
117 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
118 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
119 cache_wait(base + L2X0_INV_LINE_PA, 1);
120 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
124 static inline void l2x0_flush_line(unsigned long addr)
126 void __iomem *base = l2x0_base;
127 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
128 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
132 static void l2x0_cache_sync(void)
136 raw_spin_lock_irqsave(&l2x0_lock, flags);
138 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
141 static void __l2x0_flush_all(void)
144 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
145 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
150 static void l2x0_flush_all(void)
155 raw_spin_lock_irqsave(&l2x0_lock, flags);
157 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
160 static void l2x0_clean_all(void)
165 raw_spin_lock_irqsave(&l2x0_lock, flags);
166 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
167 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
169 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
172 static void l2x0_inv_all(void)
176 /* invalidate all ways */
177 raw_spin_lock_irqsave(&l2x0_lock, flags);
178 /* Invalidating when L2 is enabled is a nono */
179 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
180 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
181 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
183 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
186 static void l2x0_inv_range(unsigned long start, unsigned long end)
188 void __iomem *base = l2x0_base;
191 raw_spin_lock_irqsave(&l2x0_lock, flags);
192 if (start & (CACHE_LINE_SIZE - 1)) {
193 start &= ~(CACHE_LINE_SIZE - 1);
195 l2x0_flush_line(start);
197 start += CACHE_LINE_SIZE;
200 if (end & (CACHE_LINE_SIZE - 1)) {
201 end &= ~(CACHE_LINE_SIZE - 1);
203 l2x0_flush_line(end);
207 while (start < end) {
208 unsigned long blk_end = start + min(end - start, 4096UL);
210 while (start < blk_end) {
211 l2x0_inv_line(start);
212 start += CACHE_LINE_SIZE;
216 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
217 raw_spin_lock_irqsave(&l2x0_lock, flags);
220 cache_wait(base + L2X0_INV_LINE_PA, 1);
222 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
225 static void l2x0_clean_range(unsigned long start, unsigned long end)
227 void __iomem *base = l2x0_base;
230 if ((end - start) >= l2x0_size) {
235 raw_spin_lock_irqsave(&l2x0_lock, flags);
236 start &= ~(CACHE_LINE_SIZE - 1);
237 while (start < end) {
238 unsigned long blk_end = start + min(end - start, 4096UL);
240 while (start < blk_end) {
241 l2x0_clean_line(start);
242 start += CACHE_LINE_SIZE;
246 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
247 raw_spin_lock_irqsave(&l2x0_lock, flags);
250 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
252 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
255 static void l2x0_flush_range(unsigned long start, unsigned long end)
257 void __iomem *base = l2x0_base;
260 if ((end - start) >= l2x0_size) {
265 raw_spin_lock_irqsave(&l2x0_lock, flags);
266 start &= ~(CACHE_LINE_SIZE - 1);
267 while (start < end) {
268 unsigned long blk_end = start + min(end - start, 4096UL);
271 while (start < blk_end) {
272 l2x0_flush_line(start);
273 start += CACHE_LINE_SIZE;
278 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
279 raw_spin_lock_irqsave(&l2x0_lock, flags);
282 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
284 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
287 static void l2x0_disable(void)
291 raw_spin_lock_irqsave(&l2x0_lock, flags);
293 writel_relaxed(0, l2x0_base + L2X0_CTRL);
295 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
298 static void l2x0_unlock(u32 cache_id)
303 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
304 case L2X0_CACHE_ID_PART_L310:
307 case AURORA_CACHE_ID:
311 /* L210 and unknown types */
316 for (i = 0; i < lockregs; i++) {
317 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
318 i * L2X0_LOCKDOWN_STRIDE);
319 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
320 i * L2X0_LOCKDOWN_STRIDE);
324 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
330 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
334 if (cache_id_part_number_from_dt)
335 cache_id = cache_id_part_number_from_dt;
337 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
338 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
343 /* Determine the number of ways */
344 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
345 case L2X0_CACHE_ID_PART_L310:
351 #ifdef CONFIG_PL310_ERRATA_753970
352 /* Unmapped register. */
353 sync_reg_offset = L2X0_DUMMY_REG;
355 if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
356 outer_cache.set_debug = pl310_set_debug;
358 case L2X0_CACHE_ID_PART_L210:
359 ways = (aux >> 13) & 0xf;
363 case AURORA_CACHE_ID:
364 sync_reg_offset = AURORA_SYNC_REG;
365 ways = (aux >> 13) & 0xf;
366 ways = 2 << ((ways + 1) >> 2);
367 way_size_shift = AURORA_WAY_SIZE_SHIFT;
371 /* Assume unknown chips have 8 ways */
373 type = "L2x0 series";
377 l2x0_way_mask = (1 << ways) - 1;
380 * L2 cache Size = Way size * Number of ways
382 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
383 way_size = 1 << (way_size + way_size_shift);
385 l2x0_size = ways * way_size * SZ_1K;
388 * Check if l2x0 controller is already enabled.
389 * If you are booting from non-secure mode
390 * accessing the below registers will fault.
392 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
393 /* Make sure that I&D is not locked down when starting */
394 l2x0_unlock(cache_id);
396 /* l2x0 controller is disabled */
397 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
402 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
405 /* Re-read it in case some bits are reserved. */
406 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
408 /* Save the value for resuming. */
409 l2x0_saved_regs.aux_ctrl = aux;
412 outer_cache.inv_range = l2x0_inv_range;
413 outer_cache.clean_range = l2x0_clean_range;
414 outer_cache.flush_range = l2x0_flush_range;
415 outer_cache.sync = l2x0_cache_sync;
416 outer_cache.flush_all = l2x0_flush_all;
417 outer_cache.inv_all = l2x0_inv_all;
418 outer_cache.disable = l2x0_disable;
421 pr_info("%s cache controller enabled\n", type);
422 pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
423 ways, cache_id, aux, l2x0_size >> 10);
427 static int l2_wt_override;
430 * Note that the end addresses passed to Linux primitives are
431 * noninclusive, while the hardware cache range operations use
432 * inclusive start and end addresses.
434 static unsigned long calc_range_end(unsigned long start, unsigned long end)
437 * Limit the number of cache lines processed at once,
438 * since cache range operations stall the CPU pipeline
441 if (end > start + MAX_RANGE_SIZE)
442 end = start + MAX_RANGE_SIZE;
445 * Cache range operations can't straddle a page boundary.
447 if (end > PAGE_ALIGN(start+1))
448 end = PAGE_ALIGN(start+1);
454 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
455 * and range operations only do a TLB lookup on the start address.
457 static void aurora_pa_range(unsigned long start, unsigned long end,
458 unsigned long offset)
462 raw_spin_lock_irqsave(&l2x0_lock, flags);
463 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
464 writel_relaxed(end, l2x0_base + offset);
465 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
470 static void aurora_inv_range(unsigned long start, unsigned long end)
473 * round start and end adresses up to cache line size
475 start &= ~(CACHE_LINE_SIZE - 1);
476 end = ALIGN(end, CACHE_LINE_SIZE);
479 * Invalidate all full cache lines between 'start' and 'end'.
481 while (start < end) {
482 unsigned long range_end = calc_range_end(start, end);
483 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
484 AURORA_INVAL_RANGE_REG);
489 static void aurora_clean_range(unsigned long start, unsigned long end)
492 * If L2 is forced to WT, the L2 will always be clean and we
493 * don't need to do anything here.
495 if (!l2_wt_override) {
496 start &= ~(CACHE_LINE_SIZE - 1);
497 end = ALIGN(end, CACHE_LINE_SIZE);
498 while (start != end) {
499 unsigned long range_end = calc_range_end(start, end);
500 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
501 AURORA_CLEAN_RANGE_REG);
507 static void aurora_flush_range(unsigned long start, unsigned long end)
509 start &= ~(CACHE_LINE_SIZE - 1);
510 end = ALIGN(end, CACHE_LINE_SIZE);
511 while (start != end) {
512 unsigned long range_end = calc_range_end(start, end);
514 * If L2 is forced to WT, the L2 will always be clean and we
515 * just need to invalidate.
518 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
519 AURORA_INVAL_RANGE_REG);
521 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
522 AURORA_FLUSH_RANGE_REG);
528 * For certain Broadcom SoCs, depending on the address range, different offsets
529 * need to be added to the address before passing it to L2 for
530 * invalidation/clean/flush
532 * Section Address Range Offset EMI
533 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
534 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
535 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
537 * When the start and end addresses have crossed two different sections, we
538 * need to break the L2 operation into two, each within its own section.
539 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
540 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
541 * 0xC0000000 - 0xC0001000
544 * By breaking a single L2 operation into two, we may potentially suffer some
545 * performance hit, but keep in mind the cross section case is very rare
548 * We do not need to handle the case when the start address is in
549 * Section 1 and the end address is in Section 3, since it is not a valid use
553 * Section 1 in practical terms can no longer be used on rev A2. Because of
554 * that the code does not need to handle section 1 at all.
557 #define BCM_SYS_EMI_START_ADDR 0x40000000UL
558 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
560 #define BCM_SYS_EMI_OFFSET 0x40000000UL
561 #define BCM_VC_EMI_OFFSET 0x80000000UL
563 static inline int bcm_addr_is_sys_emi(unsigned long addr)
565 return (addr >= BCM_SYS_EMI_START_ADDR) &&
566 (addr < BCM_VC_EMI_SEC3_START_ADDR);
569 static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
571 if (bcm_addr_is_sys_emi(addr))
572 return addr + BCM_SYS_EMI_OFFSET;
574 return addr + BCM_VC_EMI_OFFSET;
577 static void bcm_inv_range(unsigned long start, unsigned long end)
579 unsigned long new_start, new_end;
581 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
583 if (unlikely(end <= start))
586 new_start = bcm_l2_phys_addr(start);
587 new_end = bcm_l2_phys_addr(end);
589 /* normal case, no cross section between start and end */
590 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
591 l2x0_inv_range(new_start, new_end);
595 /* They cross sections, so it can only be a cross from section
598 l2x0_inv_range(new_start,
599 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
600 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
604 static void bcm_clean_range(unsigned long start, unsigned long end)
606 unsigned long new_start, new_end;
608 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
610 if (unlikely(end <= start))
613 if ((end - start) >= l2x0_size) {
618 new_start = bcm_l2_phys_addr(start);
619 new_end = bcm_l2_phys_addr(end);
621 /* normal case, no cross section between start and end */
622 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
623 l2x0_clean_range(new_start, new_end);
627 /* They cross sections, so it can only be a cross from section
630 l2x0_clean_range(new_start,
631 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
632 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
636 static void bcm_flush_range(unsigned long start, unsigned long end)
638 unsigned long new_start, new_end;
640 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
642 if (unlikely(end <= start))
645 if ((end - start) >= l2x0_size) {
650 new_start = bcm_l2_phys_addr(start);
651 new_end = bcm_l2_phys_addr(end);
653 /* normal case, no cross section between start and end */
654 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
655 l2x0_flush_range(new_start, new_end);
659 /* They cross sections, so it can only be a cross from section
662 l2x0_flush_range(new_start,
663 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
664 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
668 static void __init l2x0_of_setup(const struct device_node *np,
669 u32 *aux_val, u32 *aux_mask)
671 u32 data[2] = { 0, 0 };
674 u32 val = 0, mask = 0;
676 of_property_read_u32(np, "arm,tag-latency", &tag);
678 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
679 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
682 of_property_read_u32_array(np, "arm,data-latency",
683 data, ARRAY_SIZE(data));
684 if (data[0] && data[1]) {
685 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
686 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
687 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
688 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
691 of_property_read_u32(np, "arm,dirty-latency", &dirty);
693 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
694 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
702 static void __init pl310_of_setup(const struct device_node *np,
703 u32 *aux_val, u32 *aux_mask)
705 u32 data[3] = { 0, 0, 0 };
706 u32 tag[3] = { 0, 0, 0 };
707 u32 filter[2] = { 0, 0 };
709 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
710 if (tag[0] && tag[1] && tag[2])
712 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
713 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
714 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
715 l2x0_base + L2X0_TAG_LATENCY_CTRL);
717 of_property_read_u32_array(np, "arm,data-latency",
718 data, ARRAY_SIZE(data));
719 if (data[0] && data[1] && data[2])
721 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
722 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
723 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
724 l2x0_base + L2X0_DATA_LATENCY_CTRL);
726 of_property_read_u32_array(np, "arm,filter-ranges",
727 filter, ARRAY_SIZE(filter));
729 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
730 l2x0_base + L2X0_ADDR_FILTER_END);
731 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
732 l2x0_base + L2X0_ADDR_FILTER_START);
736 static void __init pl310_save(void)
738 u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
739 L2X0_CACHE_ID_RTL_MASK;
741 l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
742 L2X0_TAG_LATENCY_CTRL);
743 l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
744 L2X0_DATA_LATENCY_CTRL);
745 l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
746 L2X0_ADDR_FILTER_END);
747 l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
748 L2X0_ADDR_FILTER_START);
750 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
752 * From r2p0, there is Prefetch offset/control register
754 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
757 * From r3p0, there is Power control register
759 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
760 l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
765 static void aurora_save(void)
767 l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL);
768 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
771 static void __init tauros3_save(void)
773 l2x0_saved_regs.aux2_ctrl =
774 readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL);
775 l2x0_saved_regs.prefetch_ctrl =
776 readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
779 static void l2x0_resume(void)
781 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
782 /* restore aux ctrl and enable l2 */
783 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
785 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
790 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
794 static void pl310_resume(void)
798 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
799 /* restore pl310 setup */
800 writel_relaxed(l2x0_saved_regs.tag_latency,
801 l2x0_base + L2X0_TAG_LATENCY_CTRL);
802 writel_relaxed(l2x0_saved_regs.data_latency,
803 l2x0_base + L2X0_DATA_LATENCY_CTRL);
804 writel_relaxed(l2x0_saved_regs.filter_end,
805 l2x0_base + L2X0_ADDR_FILTER_END);
806 writel_relaxed(l2x0_saved_regs.filter_start,
807 l2x0_base + L2X0_ADDR_FILTER_START);
809 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
810 L2X0_CACHE_ID_RTL_MASK;
812 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
813 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
814 l2x0_base + L2X0_PREFETCH_CTRL);
815 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
816 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
817 l2x0_base + L2X0_POWER_CTRL);
824 static void aurora_resume(void)
826 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
827 writel_relaxed(l2x0_saved_regs.aux_ctrl,
828 l2x0_base + L2X0_AUX_CTRL);
829 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
833 static void tauros3_resume(void)
835 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
836 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
837 l2x0_base + TAUROS3_AUX2_CTRL);
838 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
839 l2x0_base + L2X0_PREFETCH_CTRL);
845 static void __init aurora_broadcast_l2_commands(void)
848 /* Enable Broadcasting of cache commands to L2*/
849 __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
850 u |= AURORA_CTRL_FW; /* Set the FW bit */
851 __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
855 static void __init aurora_of_setup(const struct device_node *np,
856 u32 *aux_val, u32 *aux_mask)
858 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
859 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
861 of_property_read_u32(np, "cache-id-part",
862 &cache_id_part_number_from_dt);
864 /* Determine and save the write policy */
865 l2_wt_override = of_property_read_bool(np, "wt-override");
867 if (l2_wt_override) {
868 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
869 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
877 static const struct l2x0_of_data pl310_data = {
878 .setup = pl310_of_setup,
881 .resume = pl310_resume,
882 .inv_range = l2x0_inv_range,
883 .clean_range = l2x0_clean_range,
884 .flush_range = l2x0_flush_range,
885 .sync = l2x0_cache_sync,
886 .flush_all = l2x0_flush_all,
887 .inv_all = l2x0_inv_all,
888 .disable = l2x0_disable,
892 static const struct l2x0_of_data l2x0_data = {
893 .setup = l2x0_of_setup,
896 .resume = l2x0_resume,
897 .inv_range = l2x0_inv_range,
898 .clean_range = l2x0_clean_range,
899 .flush_range = l2x0_flush_range,
900 .sync = l2x0_cache_sync,
901 .flush_all = l2x0_flush_all,
902 .inv_all = l2x0_inv_all,
903 .disable = l2x0_disable,
907 static const struct l2x0_of_data aurora_with_outer_data = {
908 .setup = aurora_of_setup,
911 .resume = aurora_resume,
912 .inv_range = aurora_inv_range,
913 .clean_range = aurora_clean_range,
914 .flush_range = aurora_flush_range,
915 .sync = l2x0_cache_sync,
916 .flush_all = l2x0_flush_all,
917 .inv_all = l2x0_inv_all,
918 .disable = l2x0_disable,
922 static const struct l2x0_of_data aurora_no_outer_data = {
923 .setup = aurora_of_setup,
926 .resume = aurora_resume,
930 static const struct l2x0_of_data tauros3_data = {
932 .save = tauros3_save,
933 /* Tauros3 broadcasts L1 cache operations to L2 */
935 .resume = tauros3_resume,
939 static const struct l2x0_of_data bcm_l2x0_data = {
940 .setup = pl310_of_setup,
943 .resume = pl310_resume,
944 .inv_range = bcm_inv_range,
945 .clean_range = bcm_clean_range,
946 .flush_range = bcm_flush_range,
947 .sync = l2x0_cache_sync,
948 .flush_all = l2x0_flush_all,
949 .inv_all = l2x0_inv_all,
950 .disable = l2x0_disable,
954 static const struct of_device_id l2x0_ids[] __initconst = {
955 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
956 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
957 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
958 { .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */
959 .data = (void *)&bcm_l2x0_data},
960 { .compatible = "brcm,bcm11351-a2-pl310-cache",
961 .data = (void *)&bcm_l2x0_data},
962 { .compatible = "marvell,aurora-outer-cache",
963 .data = (void *)&aurora_with_outer_data},
964 { .compatible = "marvell,aurora-system-cache",
965 .data = (void *)&aurora_no_outer_data},
966 { .compatible = "marvell,tauros3-cache",
967 .data = (void *)&tauros3_data },
971 int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
973 struct device_node *np;
974 const struct l2x0_of_data *data;
977 np = of_find_matching_node(NULL, l2x0_ids);
981 if (of_address_to_resource(np, 0, &res))
984 l2x0_base = ioremap(res.start, resource_size(&res));
988 l2x0_saved_regs.phy_base = res.start;
990 data = of_match_node(l2x0_ids, np)->data;
992 /* L2 configuration can only be changed if the cache is disabled */
993 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
995 data->setup(np, &aux_val, &aux_mask);
997 /* For aurora cache in no outer mode select the
998 * correct mode using the coprocessor*/
999 if (data == &aurora_no_outer_data)
1000 aurora_broadcast_l2_commands();
1007 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
1008 l2x0_init(l2x0_base, aux_val, aux_mask);