2 * arch/arm/mm/cache-l2x0.c - L210/L220/L310 cache controller support
4 * Copyright (C) 2007 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/smp.h>
23 #include <linux/spinlock.h>
24 #include <linux/log2.h>
27 #include <linux/of_address.h>
29 #include <asm/cacheflush.h>
31 #include <asm/cputype.h>
32 #include <asm/hardware/cache-l2x0.h>
33 #include "cache-tauros3.h"
34 #include "cache-aurora-l2.h"
36 struct l2c_init_data {
40 void (*of_parse)(const struct device_node *, u32 *, u32 *);
41 void (*enable)(void __iomem *, unsigned);
42 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
43 void (*save)(void __iomem *);
44 void (*configure)(void __iomem *);
45 void (*unlock)(void __iomem *, unsigned);
46 struct outer_cache_fns outer_cache;
49 #define CACHE_LINE_SIZE 32
51 static void __iomem *l2x0_base;
52 static const struct l2c_init_data *l2x0_data;
53 static DEFINE_RAW_SPINLOCK(l2x0_lock);
54 static u32 l2x0_way_mask; /* Bitmask of active ways */
56 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
58 struct l2x0_regs l2x0_saved_regs;
61 * Common code for all cache controllers.
63 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
65 /* wait for cache operation by line or way to complete */
66 while (readl_relaxed(reg) & mask)
71 * By default, we write directly to secure registers. Platforms must
72 * override this if they are running non-secure.
74 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
76 if (val == readl_relaxed(base + reg))
78 if (outer_cache.write_sec)
79 outer_cache.write_sec(val, reg);
81 writel_relaxed(val, base + reg);
85 * This should only be called when we have a requirement that the
86 * register be written due to a work-around, as platforms running
87 * in non-secure mode may not be able to access this register.
89 static inline void l2c_set_debug(void __iomem *base, unsigned long val)
91 l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
94 static void __l2c_op_way(void __iomem *reg)
96 writel_relaxed(l2x0_way_mask, reg);
97 l2c_wait_mask(reg, l2x0_way_mask);
100 static inline void l2c_unlock(void __iomem *base, unsigned num)
104 for (i = 0; i < num; i++) {
105 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
106 i * L2X0_LOCKDOWN_STRIDE);
107 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
108 i * L2X0_LOCKDOWN_STRIDE);
112 static void l2c_configure(void __iomem *base)
114 l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
118 * Enable the L2 cache controller. This function must only be
119 * called when the cache controller is known to be disabled.
121 static void l2c_enable(void __iomem *base, unsigned num_lock)
125 if (outer_cache.configure)
126 outer_cache.configure(&l2x0_saved_regs);
128 l2x0_data->configure(base);
130 l2x0_data->unlock(base, num_lock);
132 local_irq_save(flags);
133 __l2c_op_way(base + L2X0_INV_WAY);
134 writel_relaxed(0, base + sync_reg_offset);
135 l2c_wait_mask(base + sync_reg_offset, 1);
136 local_irq_restore(flags);
138 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
141 static void l2c_disable(void)
143 void __iomem *base = l2x0_base;
145 outer_cache.flush_all();
146 l2c_write_sec(0, base, L2X0_CTRL);
150 static void l2c_save(void __iomem *base)
152 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
155 static void l2c_resume(void)
157 void __iomem *base = l2x0_base;
159 /* Do not touch the controller if already enabled. */
160 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
161 l2c_enable(base, l2x0_data->num_lock);
165 * L2C-210 specific code.
167 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
168 * ensure that no background operation is running. The way operations
169 * are all background tasks.
171 * While a background operation is in progress, any new operation is
172 * ignored (unspecified whether this causes an error.) Thankfully, not
175 * Never has a different sync register other than L2X0_CACHE_SYNC, but
176 * we use sync_reg_offset here so we can share some of this with L2C-310.
178 static void __l2c210_cache_sync(void __iomem *base)
180 writel_relaxed(0, base + sync_reg_offset);
183 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
186 while (start < end) {
187 writel_relaxed(start, reg);
188 start += CACHE_LINE_SIZE;
192 static void l2c210_inv_range(unsigned long start, unsigned long end)
194 void __iomem *base = l2x0_base;
196 if (start & (CACHE_LINE_SIZE - 1)) {
197 start &= ~(CACHE_LINE_SIZE - 1);
198 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
199 start += CACHE_LINE_SIZE;
202 if (end & (CACHE_LINE_SIZE - 1)) {
203 end &= ~(CACHE_LINE_SIZE - 1);
204 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
207 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
208 __l2c210_cache_sync(base);
211 static void l2c210_clean_range(unsigned long start, unsigned long end)
213 void __iomem *base = l2x0_base;
215 start &= ~(CACHE_LINE_SIZE - 1);
216 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
217 __l2c210_cache_sync(base);
220 static void l2c210_flush_range(unsigned long start, unsigned long end)
222 void __iomem *base = l2x0_base;
224 start &= ~(CACHE_LINE_SIZE - 1);
225 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
226 __l2c210_cache_sync(base);
229 static void l2c210_flush_all(void)
231 void __iomem *base = l2x0_base;
233 BUG_ON(!irqs_disabled());
235 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
236 __l2c210_cache_sync(base);
239 static void l2c210_sync(void)
241 __l2c210_cache_sync(l2x0_base);
244 static const struct l2c_init_data l2c210_data __initconst = {
248 .enable = l2c_enable,
250 .configure = l2c_configure,
251 .unlock = l2c_unlock,
253 .inv_range = l2c210_inv_range,
254 .clean_range = l2c210_clean_range,
255 .flush_range = l2c210_flush_range,
256 .flush_all = l2c210_flush_all,
257 .disable = l2c_disable,
259 .resume = l2c_resume,
264 * L2C-220 specific code.
266 * All operations are background operations: they have to be waited for.
267 * Conflicting requests generate a slave error (which will cause an
268 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
269 * sync register here.
271 * However, we can re-use the l2c210_resume call.
273 static inline void __l2c220_cache_sync(void __iomem *base)
275 writel_relaxed(0, base + L2X0_CACHE_SYNC);
276 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
279 static void l2c220_op_way(void __iomem *base, unsigned reg)
283 raw_spin_lock_irqsave(&l2x0_lock, flags);
284 __l2c_op_way(base + reg);
285 __l2c220_cache_sync(base);
286 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
289 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
290 unsigned long end, unsigned long flags)
292 raw_spinlock_t *lock = &l2x0_lock;
294 while (start < end) {
295 unsigned long blk_end = start + min(end - start, 4096UL);
297 while (start < blk_end) {
298 l2c_wait_mask(reg, 1);
299 writel_relaxed(start, reg);
300 start += CACHE_LINE_SIZE;
304 raw_spin_unlock_irqrestore(lock, flags);
305 raw_spin_lock_irqsave(lock, flags);
312 static void l2c220_inv_range(unsigned long start, unsigned long end)
314 void __iomem *base = l2x0_base;
317 raw_spin_lock_irqsave(&l2x0_lock, flags);
318 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
319 if (start & (CACHE_LINE_SIZE - 1)) {
320 start &= ~(CACHE_LINE_SIZE - 1);
321 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
322 start += CACHE_LINE_SIZE;
325 if (end & (CACHE_LINE_SIZE - 1)) {
326 end &= ~(CACHE_LINE_SIZE - 1);
327 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
328 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
332 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
334 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
335 __l2c220_cache_sync(base);
336 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
339 static void l2c220_clean_range(unsigned long start, unsigned long end)
341 void __iomem *base = l2x0_base;
344 start &= ~(CACHE_LINE_SIZE - 1);
345 if ((end - start) >= l2x0_size) {
346 l2c220_op_way(base, L2X0_CLEAN_WAY);
350 raw_spin_lock_irqsave(&l2x0_lock, flags);
351 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
353 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
354 __l2c220_cache_sync(base);
355 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
358 static void l2c220_flush_range(unsigned long start, unsigned long end)
360 void __iomem *base = l2x0_base;
363 start &= ~(CACHE_LINE_SIZE - 1);
364 if ((end - start) >= l2x0_size) {
365 l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
369 raw_spin_lock_irqsave(&l2x0_lock, flags);
370 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
372 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
373 __l2c220_cache_sync(base);
374 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
377 static void l2c220_flush_all(void)
379 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
382 static void l2c220_sync(void)
386 raw_spin_lock_irqsave(&l2x0_lock, flags);
387 __l2c220_cache_sync(l2x0_base);
388 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
391 static void l2c220_enable(void __iomem *base, unsigned num_lock)
394 * Always enable non-secure access to the lockdown registers -
395 * we write to them as part of the L2C enable sequence so they
396 * need to be accessible.
398 l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN;
400 l2c_enable(base, num_lock);
403 static void l2c220_unlock(void __iomem *base, unsigned num_lock)
405 if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN)
406 l2c_unlock(base, num_lock);
409 static const struct l2c_init_data l2c220_data = {
413 .enable = l2c220_enable,
415 .configure = l2c_configure,
416 .unlock = l2c220_unlock,
418 .inv_range = l2c220_inv_range,
419 .clean_range = l2c220_clean_range,
420 .flush_range = l2c220_flush_range,
421 .flush_all = l2c220_flush_all,
422 .disable = l2c_disable,
424 .resume = l2c_resume,
429 * L2C-310 specific code.
431 * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
432 * and the way operations are all background tasks. However, issuing an
433 * operation while a background operation is in progress results in a
434 * SLVERR response. We can reuse:
436 * __l2c210_cache_sync (using sync_reg_offset)
438 * l2c210_inv_range (if 588369 is not applicable)
440 * l2c210_flush_range (if 588369 is not applicable)
441 * l2c210_flush_all (if 727915 is not applicable)
444 * 588369: PL310 R0P0->R1P0, fixed R2P0.
445 * Affects: all clean+invalidate operations
446 * clean and invalidate skips the invalidate step, so we need to issue
447 * separate operations. We also require the above debug workaround
448 * enclosing this code fragment on affected parts. On unaffected parts,
449 * we must not use this workaround without the debug register writes
450 * to avoid exposing a problem similar to 727915.
452 * 727915: PL310 R2P0->R3P0, fixed R3P1.
453 * Affects: clean+invalidate by way
454 * clean and invalidate by way runs in the background, and a store can
455 * hit the line between the clean operation and invalidate operation,
456 * resulting in the store being lost.
458 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
459 * Affects: 8x64-bit (double fill) line fetches
460 * double fill line fetches can fail to cause dirty data to be evicted
461 * from the cache before the new data overwrites the second line.
463 * 753970: PL310 R3P0, fixed R3P1.
465 * prevents merging writes after the sync operation, until another L2C
466 * operation is performed (or a number of other conditions.)
468 * 769419: PL310 R0P0->R3P1, fixed R3P2.
469 * Affects: store buffer
470 * store buffer is not automatically drained.
472 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
474 void __iomem *base = l2x0_base;
476 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
479 /* Erratum 588369 for both clean+invalidate operations */
480 raw_spin_lock_irqsave(&l2x0_lock, flags);
481 l2c_set_debug(base, 0x03);
483 if (start & (CACHE_LINE_SIZE - 1)) {
484 start &= ~(CACHE_LINE_SIZE - 1);
485 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
486 writel_relaxed(start, base + L2X0_INV_LINE_PA);
487 start += CACHE_LINE_SIZE;
490 if (end & (CACHE_LINE_SIZE - 1)) {
491 end &= ~(CACHE_LINE_SIZE - 1);
492 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
493 writel_relaxed(end, base + L2X0_INV_LINE_PA);
496 l2c_set_debug(base, 0x00);
497 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
500 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
501 __l2c210_cache_sync(base);
504 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
506 raw_spinlock_t *lock = &l2x0_lock;
508 void __iomem *base = l2x0_base;
510 raw_spin_lock_irqsave(lock, flags);
511 while (start < end) {
512 unsigned long blk_end = start + min(end - start, 4096UL);
514 l2c_set_debug(base, 0x03);
515 while (start < blk_end) {
516 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
517 writel_relaxed(start, base + L2X0_INV_LINE_PA);
518 start += CACHE_LINE_SIZE;
520 l2c_set_debug(base, 0x00);
523 raw_spin_unlock_irqrestore(lock, flags);
524 raw_spin_lock_irqsave(lock, flags);
527 raw_spin_unlock_irqrestore(lock, flags);
528 __l2c210_cache_sync(base);
531 static void l2c310_flush_all_erratum(void)
533 void __iomem *base = l2x0_base;
536 raw_spin_lock_irqsave(&l2x0_lock, flags);
537 l2c_set_debug(base, 0x03);
538 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
539 l2c_set_debug(base, 0x00);
540 __l2c210_cache_sync(base);
541 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
544 static void __init l2c310_save(void __iomem *base)
550 l2x0_saved_regs.tag_latency = readl_relaxed(base +
551 L310_TAG_LATENCY_CTRL);
552 l2x0_saved_regs.data_latency = readl_relaxed(base +
553 L310_DATA_LATENCY_CTRL);
554 l2x0_saved_regs.filter_end = readl_relaxed(base +
555 L310_ADDR_FILTER_END);
556 l2x0_saved_regs.filter_start = readl_relaxed(base +
557 L310_ADDR_FILTER_START);
559 revision = readl_relaxed(base + L2X0_CACHE_ID) &
560 L2X0_CACHE_ID_RTL_MASK;
562 /* From r2p0, there is Prefetch offset/control register */
563 if (revision >= L310_CACHE_ID_RTL_R2P0)
564 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
567 /* From r3p0, there is Power control register */
568 if (revision >= L310_CACHE_ID_RTL_R3P0)
569 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
573 static void l2c310_configure(void __iomem *base)
579 /* restore pl310 setup */
580 l2c_write_sec(l2x0_saved_regs.tag_latency, base,
581 L310_TAG_LATENCY_CTRL);
582 l2c_write_sec(l2x0_saved_regs.data_latency, base,
583 L310_DATA_LATENCY_CTRL);
584 l2c_write_sec(l2x0_saved_regs.filter_end, base,
585 L310_ADDR_FILTER_END);
586 l2c_write_sec(l2x0_saved_regs.filter_start, base,
587 L310_ADDR_FILTER_START);
589 revision = readl_relaxed(base + L2X0_CACHE_ID) &
590 L2X0_CACHE_ID_RTL_MASK;
592 if (revision >= L310_CACHE_ID_RTL_R2P0)
593 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
595 if (revision >= L310_CACHE_ID_RTL_R3P0)
596 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
600 static int l2c310_starting_cpu(unsigned int cpu)
602 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
606 static int l2c310_dying_cpu(unsigned int cpu)
608 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
612 static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
614 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
615 bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
616 u32 aux = l2x0_saved_regs.aux_ctrl;
618 if (rev >= L310_CACHE_ID_RTL_R2P0) {
620 aux |= L310_AUX_CTRL_EARLY_BRESP;
621 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
622 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
623 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
624 aux &= ~L310_AUX_CTRL_EARLY_BRESP;
629 u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
630 u32 acr = get_auxcr();
632 pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
634 if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
635 pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
637 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
638 pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
640 if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
641 aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
642 pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
644 } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
645 pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
646 aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
650 * Always enable non-secure access to the lockdown registers -
651 * we write to them as part of the L2C enable sequence so they
652 * need to be accessible.
654 l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN;
656 l2c_enable(base, num_lock);
658 /* Read back resulting AUX_CTRL value as it could have been altered. */
659 aux = readl_relaxed(base + L2X0_AUX_CTRL);
661 if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
662 u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
664 pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
665 aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
666 aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
667 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
670 /* r3p0 or later has power control register */
671 if (rev >= L310_CACHE_ID_RTL_R3P0) {
674 power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
675 pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
676 power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
677 power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
680 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
681 cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
682 "AP_ARM_L2X0_STARTING", l2c310_starting_cpu,
686 static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
687 struct outer_cache_fns *fns)
689 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
690 const char *errata[8];
693 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
694 revision < L310_CACHE_ID_RTL_R2P0 &&
695 /* For bcm compatibility */
696 fns->inv_range == l2c210_inv_range) {
697 fns->inv_range = l2c310_inv_range_erratum;
698 fns->flush_range = l2c310_flush_range_erratum;
699 errata[n++] = "588369";
702 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
703 revision >= L310_CACHE_ID_RTL_R2P0 &&
704 revision < L310_CACHE_ID_RTL_R3P1) {
705 fns->flush_all = l2c310_flush_all_erratum;
706 errata[n++] = "727915";
709 if (revision >= L310_CACHE_ID_RTL_R3P0 &&
710 revision < L310_CACHE_ID_RTL_R3P2) {
711 u32 val = l2x0_saved_regs.prefetch_ctrl;
712 /* I don't think bit23 is required here... but iMX6 does so */
713 if (val & (BIT(30) | BIT(23))) {
714 val &= ~(BIT(30) | BIT(23));
715 l2x0_saved_regs.prefetch_ctrl = val;
716 errata[n++] = "752271";
720 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
721 revision == L310_CACHE_ID_RTL_R3P0) {
722 sync_reg_offset = L2X0_DUMMY_REG;
723 errata[n++] = "753970";
726 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
727 errata[n++] = "769419";
732 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
733 for (i = 0; i < n; i++)
734 pr_cont(" %s", errata[i]);
735 pr_cont(" enabled\n");
739 static void l2c310_disable(void)
742 * If full-line-of-zeros is enabled, we must first disable it in the
743 * Cortex-A9 auxiliary control register before disabling the L2 cache.
745 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
746 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
751 static void l2c310_resume(void)
755 /* Re-enable full-line-of-zeros for Cortex-A9 */
756 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
757 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
760 static void l2c310_unlock(void __iomem *base, unsigned num_lock)
762 if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN)
763 l2c_unlock(base, num_lock);
766 static const struct l2c_init_data l2c310_init_fns __initconst = {
770 .enable = l2c310_enable,
771 .fixup = l2c310_fixup,
773 .configure = l2c310_configure,
774 .unlock = l2c310_unlock,
776 .inv_range = l2c210_inv_range,
777 .clean_range = l2c210_clean_range,
778 .flush_range = l2c210_flush_range,
779 .flush_all = l2c210_flush_all,
780 .disable = l2c310_disable,
782 .resume = l2c310_resume,
786 static int __init __l2c_init(const struct l2c_init_data *data,
787 u32 aux_val, u32 aux_mask, u32 cache_id, bool nosync)
789 struct outer_cache_fns fns;
790 unsigned way_size_bits, ways;
794 * Save the pointer globally so that callbacks which do not receive
795 * context from callers can access the structure.
797 l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
802 * Sanity check the aux values. aux_mask is the bits we preserve
803 * from reading the hardware register, and aux_val is the bits we
806 if (aux_val & aux_mask)
807 pr_alert("L2C: platform provided aux values permit register corruption.\n");
809 old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
814 pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
817 /* Determine the number of ways */
818 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
819 case L2X0_CACHE_ID_PART_L310:
820 if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
821 pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
828 case L2X0_CACHE_ID_PART_L210:
829 case L2X0_CACHE_ID_PART_L220:
830 ways = (aux >> 13) & 0xf;
833 case AURORA_CACHE_ID:
834 ways = (aux >> 13) & 0xf;
835 ways = 2 << ((ways + 1) >> 2);
839 /* Assume unknown chips have 8 ways */
844 l2x0_way_mask = (1 << ways) - 1;
847 * way_size_0 is the size that a way_size value of zero would be
848 * given the calculation: way_size = way_size_0 << way_size_bits.
849 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
850 * then way_size_0 would be 8k.
852 * L2 cache size = number of ways * way size.
854 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
855 L2C_AUX_CTRL_WAY_SIZE_SHIFT;
856 l2x0_size = ways * (data->way_size_0 << way_size_bits);
858 fns = data->outer_cache;
859 fns.write_sec = outer_cache.write_sec;
860 fns.configure = outer_cache.configure;
862 data->fixup(l2x0_base, cache_id, &fns);
864 pr_info("L2C: disabling outer sync\n");
869 * Check if l2x0 controller is already enabled. If we are booting
870 * in non-secure mode accessing the below registers will fault.
872 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
873 l2x0_saved_regs.aux_ctrl = aux;
875 data->enable(l2x0_base, data->num_lock);
881 * It is strange to save the register state before initialisation,
882 * but hey, this is what the DT implementations decided to do.
885 data->save(l2x0_base);
887 /* Re-read it in case some bits are reserved. */
888 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
890 pr_info("%s cache controller enabled, %d ways, %d kB\n",
891 data->type, ways, l2x0_size >> 10);
892 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
893 data->type, cache_id, aux);
898 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
900 const struct l2c_init_data *data;
905 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
907 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
909 case L2X0_CACHE_ID_PART_L210:
913 case L2X0_CACHE_ID_PART_L220:
917 case L2X0_CACHE_ID_PART_L310:
918 data = &l2c310_init_fns;
922 /* Read back current (default) hardware configuration */
924 data->save(l2x0_base);
926 __l2c_init(data, aux_val, aux_mask, cache_id, false);
930 static int l2_wt_override;
932 /* Aurora don't have the cache ID register available, so we have to
933 * pass it though the device tree */
934 static u32 cache_id_part_number_from_dt;
937 * l2x0_cache_size_of_parse() - read cache size parameters from DT
938 * @np: the device tree node for the l2 cache
939 * @aux_val: pointer to machine-supplied auxilary register value, to
940 * be augmented by the call (bits to be set to 1)
941 * @aux_mask: pointer to machine-supplied auxilary register mask, to
942 * be augmented by the call (bits to be set to 0)
943 * @associativity: variable to return the calculated associativity in
944 * @max_way_size: the maximum size in bytes for the cache ways
946 static int __init l2x0_cache_size_of_parse(const struct device_node *np,
947 u32 *aux_val, u32 *aux_mask,
951 u32 mask = 0, val = 0;
952 u32 cache_size = 0, sets = 0;
953 u32 way_size_bits = 1;
958 of_property_read_u32(np, "cache-size", &cache_size);
959 of_property_read_u32(np, "cache-sets", &sets);
960 of_property_read_u32(np, "cache-block-size", &block_size);
961 of_property_read_u32(np, "cache-line-size", &line_size);
963 if (!cache_size || !sets)
966 /* All these l2 caches have the same line = block size actually */
969 /* If linesize is not given, it is equal to blocksize */
970 line_size = block_size;
972 /* Fall back to known size */
973 pr_warn("L2C OF: no cache block/line size given: "
974 "falling back to default size %d bytes\n",
976 line_size = CACHE_LINE_SIZE;
980 if (line_size != CACHE_LINE_SIZE)
981 pr_warn("L2C OF: DT supplied line size %d bytes does "
982 "not match hardware line size of %d bytes\n",
988 * set size = cache size / sets
989 * ways = cache size / (sets * line size)
990 * way size = cache size / (cache size / (sets * line size))
991 * way size = sets * line size
992 * associativity = ways = cache size / way size
994 way_size = sets * line_size;
995 *associativity = cache_size / way_size;
997 if (way_size > max_way_size) {
998 pr_err("L2C OF: set size %dKB is too large\n", way_size);
1002 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
1003 cache_size, cache_size >> 10);
1004 pr_info("L2C OF: override line size: %d bytes\n", line_size);
1005 pr_info("L2C OF: override way size: %d bytes (%dKB)\n",
1006 way_size, way_size >> 10);
1007 pr_info("L2C OF: override associativity: %d\n", *associativity);
1010 * Calculates the bits 17:19 to set for way size:
1011 * 512KB -> 6, 256KB -> 5, ... 16KB -> 1
1013 way_size_bits = ilog2(way_size >> 10) - 3;
1014 if (way_size_bits < 1 || way_size_bits > 6) {
1015 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
1020 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
1021 val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT);
1030 static void __init l2x0_of_parse(const struct device_node *np,
1031 u32 *aux_val, u32 *aux_mask)
1033 u32 data[2] = { 0, 0 };
1036 u32 val = 0, mask = 0;
1040 of_property_read_u32(np, "arm,tag-latency", &tag);
1042 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
1043 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
1046 of_property_read_u32_array(np, "arm,data-latency",
1047 data, ARRAY_SIZE(data));
1048 if (data[0] && data[1]) {
1049 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
1050 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
1051 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
1052 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
1055 of_property_read_u32(np, "arm,dirty-latency", &dirty);
1057 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
1058 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
1061 if (of_property_read_bool(np, "arm,parity-enable")) {
1062 mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1063 val |= L2C_AUX_CTRL_PARITY_ENABLE;
1064 } else if (of_property_read_bool(np, "arm,parity-disable")) {
1065 mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1068 if (of_property_read_bool(np, "arm,shared-override")) {
1069 mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
1070 val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
1073 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
1078 pr_err("l2x0 of: cache setting yield too high associativity\n");
1079 pr_err("l2x0 of: %d calculated, max 8\n", assoc);
1081 mask |= L2X0_AUX_CTRL_ASSOC_MASK;
1082 val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT);
1090 static const struct l2c_init_data of_l2c210_data __initconst = {
1092 .way_size_0 = SZ_8K,
1094 .of_parse = l2x0_of_parse,
1095 .enable = l2c_enable,
1097 .configure = l2c_configure,
1098 .unlock = l2c_unlock,
1100 .inv_range = l2c210_inv_range,
1101 .clean_range = l2c210_clean_range,
1102 .flush_range = l2c210_flush_range,
1103 .flush_all = l2c210_flush_all,
1104 .disable = l2c_disable,
1105 .sync = l2c210_sync,
1106 .resume = l2c_resume,
1110 static const struct l2c_init_data of_l2c220_data __initconst = {
1112 .way_size_0 = SZ_8K,
1114 .of_parse = l2x0_of_parse,
1115 .enable = l2c220_enable,
1117 .configure = l2c_configure,
1118 .unlock = l2c220_unlock,
1120 .inv_range = l2c220_inv_range,
1121 .clean_range = l2c220_clean_range,
1122 .flush_range = l2c220_flush_range,
1123 .flush_all = l2c220_flush_all,
1124 .disable = l2c_disable,
1125 .sync = l2c220_sync,
1126 .resume = l2c_resume,
1130 static void __init l2c310_of_parse(const struct device_node *np,
1131 u32 *aux_val, u32 *aux_mask)
1133 u32 data[3] = { 0, 0, 0 };
1134 u32 tag[3] = { 0, 0, 0 };
1135 u32 filter[2] = { 0, 0 };
1142 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
1143 if (tag[0] && tag[1] && tag[2])
1144 l2x0_saved_regs.tag_latency =
1145 L310_LATENCY_CTRL_RD(tag[0] - 1) |
1146 L310_LATENCY_CTRL_WR(tag[1] - 1) |
1147 L310_LATENCY_CTRL_SETUP(tag[2] - 1);
1149 of_property_read_u32_array(np, "arm,data-latency",
1150 data, ARRAY_SIZE(data));
1151 if (data[0] && data[1] && data[2])
1152 l2x0_saved_regs.data_latency =
1153 L310_LATENCY_CTRL_RD(data[0] - 1) |
1154 L310_LATENCY_CTRL_WR(data[1] - 1) |
1155 L310_LATENCY_CTRL_SETUP(data[2] - 1);
1157 of_property_read_u32_array(np, "arm,filter-ranges",
1158 filter, ARRAY_SIZE(filter));
1160 l2x0_saved_regs.filter_end =
1161 ALIGN(filter[0] + filter[1], SZ_1M);
1162 l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1))
1163 | L310_ADDR_FILTER_EN;
1166 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
1170 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1171 *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
1172 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1175 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1176 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1179 pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
1185 if (of_property_read_bool(np, "arm,shared-override")) {
1186 *aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
1187 *aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
1190 if (of_property_read_bool(np, "arm,parity-enable")) {
1191 *aux_val |= L2C_AUX_CTRL_PARITY_ENABLE;
1192 *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1193 } else if (of_property_read_bool(np, "arm,parity-disable")) {
1194 *aux_val &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1195 *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1198 prefetch = l2x0_saved_regs.prefetch_ctrl;
1200 ret = of_property_read_u32(np, "arm,double-linefill", &val);
1203 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL;
1205 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
1206 } else if (ret != -EINVAL) {
1207 pr_err("L2C-310 OF arm,double-linefill property value is missing\n");
1210 ret = of_property_read_u32(np, "arm,double-linefill-incr", &val);
1213 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
1215 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
1216 } else if (ret != -EINVAL) {
1217 pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n");
1220 ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val);
1223 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
1225 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
1226 } else if (ret != -EINVAL) {
1227 pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n");
1230 ret = of_property_read_u32(np, "arm,prefetch-drop", &val);
1233 prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP;
1235 prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP;
1236 } else if (ret != -EINVAL) {
1237 pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n");
1240 ret = of_property_read_u32(np, "arm,prefetch-offset", &val);
1242 prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK;
1243 prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK;
1244 } else if (ret != -EINVAL) {
1245 pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
1248 ret = of_property_read_u32(np, "prefetch-data", &val);
1251 prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
1253 prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
1254 } else if (ret != -EINVAL) {
1255 pr_err("L2C-310 OF prefetch-data property value is missing\n");
1258 ret = of_property_read_u32(np, "prefetch-instr", &val);
1261 prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
1263 prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
1264 } else if (ret != -EINVAL) {
1265 pr_err("L2C-310 OF prefetch-instr property value is missing\n");
1268 l2x0_saved_regs.prefetch_ctrl = prefetch;
1270 power = l2x0_saved_regs.pwr_ctrl |
1271 L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN;
1273 ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val);
1276 power &= ~L310_DYNAMIC_CLK_GATING_EN;
1277 } else if (ret != -EINVAL) {
1278 pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n");
1280 ret = of_property_read_u32(np, "arm,standby-mode", &val);
1283 power &= ~L310_STNDBY_MODE_EN;
1284 } else if (ret != -EINVAL) {
1285 pr_err("L2C-310 OF standby-mode property value is missing or invalid\n");
1288 l2x0_saved_regs.pwr_ctrl = power;
1291 static const struct l2c_init_data of_l2c310_data __initconst = {
1293 .way_size_0 = SZ_8K,
1295 .of_parse = l2c310_of_parse,
1296 .enable = l2c310_enable,
1297 .fixup = l2c310_fixup,
1298 .save = l2c310_save,
1299 .configure = l2c310_configure,
1300 .unlock = l2c310_unlock,
1302 .inv_range = l2c210_inv_range,
1303 .clean_range = l2c210_clean_range,
1304 .flush_range = l2c210_flush_range,
1305 .flush_all = l2c210_flush_all,
1306 .disable = l2c310_disable,
1307 .sync = l2c210_sync,
1308 .resume = l2c310_resume,
1313 * This is a variant of the of_l2c310_data with .sync set to
1314 * NULL. Outer sync operations are not needed when the system is I/O
1315 * coherent, and potentially harmful in certain situations (PCIe/PL310
1316 * deadlock on Armada 375/38x due to hardware I/O coherency). The
1317 * other operations are kept because they are infrequent (therefore do
1318 * not cause the deadlock in practice) and needed for secondary CPU
1319 * boot and other power management activities.
1321 static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
1322 .type = "L2C-310 Coherent",
1323 .way_size_0 = SZ_8K,
1325 .of_parse = l2c310_of_parse,
1326 .enable = l2c310_enable,
1327 .fixup = l2c310_fixup,
1328 .save = l2c310_save,
1329 .configure = l2c310_configure,
1330 .unlock = l2c310_unlock,
1332 .inv_range = l2c210_inv_range,
1333 .clean_range = l2c210_clean_range,
1334 .flush_range = l2c210_flush_range,
1335 .flush_all = l2c210_flush_all,
1336 .disable = l2c310_disable,
1337 .resume = l2c310_resume,
1342 * Note that the end addresses passed to Linux primitives are
1343 * noninclusive, while the hardware cache range operations use
1344 * inclusive start and end addresses.
1346 static unsigned long aurora_range_end(unsigned long start, unsigned long end)
1349 * Limit the number of cache lines processed at once,
1350 * since cache range operations stall the CPU pipeline
1353 if (end > start + MAX_RANGE_SIZE)
1354 end = start + MAX_RANGE_SIZE;
1357 * Cache range operations can't straddle a page boundary.
1359 if (end > PAGE_ALIGN(start+1))
1360 end = PAGE_ALIGN(start+1);
1365 static void aurora_pa_range(unsigned long start, unsigned long end,
1366 unsigned long offset)
1368 void __iomem *base = l2x0_base;
1369 unsigned long range_end;
1370 unsigned long flags;
1373 * round start and end adresses up to cache line size
1375 start &= ~(CACHE_LINE_SIZE - 1);
1376 end = ALIGN(end, CACHE_LINE_SIZE);
1379 * perform operation on all full cache lines between 'start' and 'end'
1381 while (start < end) {
1382 range_end = aurora_range_end(start, end);
1384 raw_spin_lock_irqsave(&l2x0_lock, flags);
1385 writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
1386 writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset);
1387 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1389 writel_relaxed(0, base + AURORA_SYNC_REG);
1393 static void aurora_inv_range(unsigned long start, unsigned long end)
1395 aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
1398 static void aurora_clean_range(unsigned long start, unsigned long end)
1401 * If L2 is forced to WT, the L2 will always be clean and we
1402 * don't need to do anything here.
1404 if (!l2_wt_override)
1405 aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG);
1408 static void aurora_flush_range(unsigned long start, unsigned long end)
1411 aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
1413 aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG);
1416 static void aurora_flush_all(void)
1418 void __iomem *base = l2x0_base;
1419 unsigned long flags;
1421 /* clean all ways */
1422 raw_spin_lock_irqsave(&l2x0_lock, flags);
1423 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
1424 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1426 writel_relaxed(0, base + AURORA_SYNC_REG);
1429 static void aurora_cache_sync(void)
1431 writel_relaxed(0, l2x0_base + AURORA_SYNC_REG);
1434 static void aurora_disable(void)
1436 void __iomem *base = l2x0_base;
1437 unsigned long flags;
1439 raw_spin_lock_irqsave(&l2x0_lock, flags);
1440 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
1441 writel_relaxed(0, base + AURORA_SYNC_REG);
1442 l2c_write_sec(0, base, L2X0_CTRL);
1444 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1447 static void aurora_save(void __iomem *base)
1449 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1450 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1454 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
1455 * broadcasting of cache commands to L2.
1457 static void __init aurora_enable_no_outer(void __iomem *base,
1462 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
1463 u |= AURORA_CTRL_FW; /* Set the FW bit */
1464 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1468 l2c_enable(base, num_lock);
1471 static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1472 struct outer_cache_fns *fns)
1474 sync_reg_offset = AURORA_SYNC_REG;
1477 static void __init aurora_of_parse(const struct device_node *np,
1478 u32 *aux_val, u32 *aux_mask)
1480 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1481 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
1483 of_property_read_u32(np, "cache-id-part",
1484 &cache_id_part_number_from_dt);
1486 /* Determine and save the write policy */
1487 l2_wt_override = of_property_read_bool(np, "wt-override");
1489 if (l2_wt_override) {
1490 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1491 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1499 static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
1501 .way_size_0 = SZ_4K,
1503 .of_parse = aurora_of_parse,
1504 .enable = l2c_enable,
1505 .fixup = aurora_fixup,
1506 .save = aurora_save,
1507 .configure = l2c_configure,
1508 .unlock = l2c_unlock,
1510 .inv_range = aurora_inv_range,
1511 .clean_range = aurora_clean_range,
1512 .flush_range = aurora_flush_range,
1513 .flush_all = aurora_flush_all,
1514 .disable = aurora_disable,
1515 .sync = aurora_cache_sync,
1516 .resume = l2c_resume,
1520 static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
1522 .way_size_0 = SZ_4K,
1524 .of_parse = aurora_of_parse,
1525 .enable = aurora_enable_no_outer,
1526 .fixup = aurora_fixup,
1527 .save = aurora_save,
1528 .configure = l2c_configure,
1529 .unlock = l2c_unlock,
1531 .resume = l2c_resume,
1536 * For certain Broadcom SoCs, depending on the address range, different offsets
1537 * need to be added to the address before passing it to L2 for
1538 * invalidation/clean/flush
1540 * Section Address Range Offset EMI
1541 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
1542 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
1543 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
1545 * When the start and end addresses have crossed two different sections, we
1546 * need to break the L2 operation into two, each within its own section.
1547 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
1548 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
1549 * 0xC0000000 - 0xC0001000
1552 * By breaking a single L2 operation into two, we may potentially suffer some
1553 * performance hit, but keep in mind the cross section case is very rare
1556 * We do not need to handle the case when the start address is in
1557 * Section 1 and the end address is in Section 3, since it is not a valid use
1561 * Section 1 in practical terms can no longer be used on rev A2. Because of
1562 * that the code does not need to handle section 1 at all.
1565 #define BCM_SYS_EMI_START_ADDR 0x40000000UL
1566 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
1568 #define BCM_SYS_EMI_OFFSET 0x40000000UL
1569 #define BCM_VC_EMI_OFFSET 0x80000000UL
1571 static inline int bcm_addr_is_sys_emi(unsigned long addr)
1573 return (addr >= BCM_SYS_EMI_START_ADDR) &&
1574 (addr < BCM_VC_EMI_SEC3_START_ADDR);
1577 static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1579 if (bcm_addr_is_sys_emi(addr))
1580 return addr + BCM_SYS_EMI_OFFSET;
1582 return addr + BCM_VC_EMI_OFFSET;
1585 static void bcm_inv_range(unsigned long start, unsigned long end)
1587 unsigned long new_start, new_end;
1589 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1591 if (unlikely(end <= start))
1594 new_start = bcm_l2_phys_addr(start);
1595 new_end = bcm_l2_phys_addr(end);
1597 /* normal case, no cross section between start and end */
1598 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1599 l2c210_inv_range(new_start, new_end);
1603 /* They cross sections, so it can only be a cross from section
1606 l2c210_inv_range(new_start,
1607 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1608 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1612 static void bcm_clean_range(unsigned long start, unsigned long end)
1614 unsigned long new_start, new_end;
1616 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1618 if (unlikely(end <= start))
1621 new_start = bcm_l2_phys_addr(start);
1622 new_end = bcm_l2_phys_addr(end);
1624 /* normal case, no cross section between start and end */
1625 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1626 l2c210_clean_range(new_start, new_end);
1630 /* They cross sections, so it can only be a cross from section
1633 l2c210_clean_range(new_start,
1634 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1635 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1639 static void bcm_flush_range(unsigned long start, unsigned long end)
1641 unsigned long new_start, new_end;
1643 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1645 if (unlikely(end <= start))
1648 if ((end - start) >= l2x0_size) {
1649 outer_cache.flush_all();
1653 new_start = bcm_l2_phys_addr(start);
1654 new_end = bcm_l2_phys_addr(end);
1656 /* normal case, no cross section between start and end */
1657 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1658 l2c210_flush_range(new_start, new_end);
1662 /* They cross sections, so it can only be a cross from section
1665 l2c210_flush_range(new_start,
1666 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1667 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1671 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
1672 static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
1673 .type = "BCM-L2C-310",
1674 .way_size_0 = SZ_8K,
1676 .of_parse = l2c310_of_parse,
1677 .enable = l2c310_enable,
1678 .save = l2c310_save,
1679 .configure = l2c310_configure,
1680 .unlock = l2c310_unlock,
1682 .inv_range = bcm_inv_range,
1683 .clean_range = bcm_clean_range,
1684 .flush_range = bcm_flush_range,
1685 .flush_all = l2c210_flush_all,
1686 .disable = l2c310_disable,
1687 .sync = l2c210_sync,
1688 .resume = l2c310_resume,
1692 static void __init tauros3_save(void __iomem *base)
1696 l2x0_saved_regs.aux2_ctrl =
1697 readl_relaxed(base + TAUROS3_AUX2_CTRL);
1698 l2x0_saved_regs.prefetch_ctrl =
1699 readl_relaxed(base + L310_PREFETCH_CTRL);
1702 static void tauros3_configure(void __iomem *base)
1704 l2c_configure(base);
1705 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1706 base + TAUROS3_AUX2_CTRL);
1707 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1708 base + L310_PREFETCH_CTRL);
1711 static const struct l2c_init_data of_tauros3_data __initconst = {
1713 .way_size_0 = SZ_8K,
1715 .enable = l2c_enable,
1716 .save = tauros3_save,
1717 .configure = tauros3_configure,
1718 .unlock = l2c_unlock,
1719 /* Tauros3 broadcasts L1 cache operations to L2 */
1721 .resume = l2c_resume,
1725 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
1726 static const struct of_device_id l2x0_ids[] __initconst = {
1727 L2C_ID("arm,l210-cache", of_l2c210_data),
1728 L2C_ID("arm,l220-cache", of_l2c220_data),
1729 L2C_ID("arm,pl310-cache", of_l2c310_data),
1730 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1731 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1732 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1733 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
1734 /* Deprecated IDs */
1735 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1739 int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
1741 const struct l2c_init_data *data;
1742 struct device_node *np;
1743 struct resource res;
1744 u32 cache_id, old_aux;
1745 u32 cache_level = 2;
1746 bool nosync = false;
1748 np = of_find_matching_node(NULL, l2x0_ids);
1752 if (of_address_to_resource(np, 0, &res))
1755 l2x0_base = ioremap(res.start, resource_size(&res));
1759 l2x0_saved_regs.phy_base = res.start;
1761 data = of_match_node(l2x0_ids, np)->data;
1763 if (of_device_is_compatible(np, "arm,pl310-cache") &&
1764 of_property_read_bool(np, "arm,io-coherent"))
1765 data = &of_l2c310_coherent_data;
1767 old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
1768 if (old_aux != ((old_aux & aux_mask) | aux_val)) {
1769 pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
1770 old_aux, (old_aux & aux_mask) | aux_val);
1771 } else if (aux_mask != ~0U && aux_val != 0) {
1772 pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n");
1775 /* All L2 caches are unified, so this property should be specified */
1776 if (!of_property_read_bool(np, "cache-unified"))
1777 pr_err("L2C: device tree omits to specify unified cache\n");
1779 if (of_property_read_u32(np, "cache-level", &cache_level))
1780 pr_err("L2C: device tree omits to specify cache-level\n");
1782 if (cache_level != 2)
1783 pr_err("L2C: device tree specifies invalid cache level\n");
1785 nosync = of_property_read_bool(np, "arm,outer-sync-disable");
1787 /* Read back current (default) hardware configuration */
1789 data->save(l2x0_base);
1791 /* L2 configuration can only be changed if the cache is disabled */
1792 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
1794 data->of_parse(np, &aux_val, &aux_mask);
1796 if (cache_id_part_number_from_dt)
1797 cache_id = cache_id_part_number_from_dt;
1799 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1801 return __l2c_init(data, aux_val, aux_mask, cache_id, nosync);