2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
4 * Copyright (C) 2007 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/spinlock.h>
24 #include <linux/of_address.h>
26 #include <asm/cacheflush.h>
27 #include <asm/hardware/cache-l2x0.h>
28 #include "cache-tauros3.h"
29 #include "cache-aurora-l2.h"
31 struct l2c_init_data {
33 void (*of_parse)(const struct device_node *, u32 *, u32 *);
34 void (*enable)(void __iomem *, u32, unsigned);
35 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
36 void (*save)(void __iomem *);
37 struct outer_cache_fns outer_cache;
40 #define CACHE_LINE_SIZE 32
42 static void __iomem *l2x0_base;
43 static DEFINE_RAW_SPINLOCK(l2x0_lock);
44 static u32 l2x0_way_mask; /* Bitmask of active ways */
46 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
48 struct l2x0_regs l2x0_saved_regs;
51 * Common code for all cache controllers.
53 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
55 /* wait for cache operation by line or way to complete */
56 while (readl_relaxed(reg) & mask)
61 * This should only be called when we have a requirement that the
62 * register be written due to a work-around, as platforms running
63 * in non-secure mode may not be able to access this register.
65 static inline void l2c_set_debug(void __iomem *base, unsigned long val)
67 outer_cache.set_debug(val);
70 static void __l2c_op_way(void __iomem *reg)
72 writel_relaxed(l2x0_way_mask, reg);
73 l2c_wait_mask(reg, l2x0_way_mask);
76 static inline void l2c_unlock(void __iomem *base, unsigned num)
80 for (i = 0; i < num; i++) {
81 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
82 i * L2X0_LOCKDOWN_STRIDE);
83 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
84 i * L2X0_LOCKDOWN_STRIDE);
89 * Enable the L2 cache controller. This function must only be
90 * called when the cache controller is known to be disabled.
92 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
96 /* Only write the aux register if it needs changing */
97 if (readl_relaxed(base + L2X0_AUX_CTRL) != aux)
98 writel_relaxed(aux, base + L2X0_AUX_CTRL);
100 l2c_unlock(base, num_lock);
102 local_irq_save(flags);
103 __l2c_op_way(base + L2X0_INV_WAY);
104 writel_relaxed(0, base + sync_reg_offset);
105 l2c_wait_mask(base + sync_reg_offset, 1);
106 local_irq_restore(flags);
108 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
111 static void l2c_disable(void)
113 void __iomem *base = l2x0_base;
115 outer_cache.flush_all();
116 writel_relaxed(0, base + L2X0_CTRL);
120 #ifdef CONFIG_CACHE_PL310
121 static inline void cache_wait(void __iomem *reg, unsigned long mask)
123 /* cache operations by line are atomic on PL310 */
126 #define cache_wait l2c_wait_mask
129 static inline void cache_sync(void)
131 void __iomem *base = l2x0_base;
133 writel_relaxed(0, base + sync_reg_offset);
134 cache_wait(base + L2X0_CACHE_SYNC, 1);
137 static inline void l2x0_clean_line(unsigned long addr)
139 void __iomem *base = l2x0_base;
140 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
141 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
144 static inline void l2x0_inv_line(unsigned long addr)
146 void __iomem *base = l2x0_base;
147 cache_wait(base + L2X0_INV_LINE_PA, 1);
148 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
151 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
152 static inline void debug_writel(unsigned long val)
154 if (outer_cache.set_debug)
155 l2c_set_debug(l2x0_base, val);
158 /* Optimised out for non-errata case */
159 static inline void debug_writel(unsigned long val)
164 #ifdef CONFIG_PL310_ERRATA_588369
165 static inline void l2x0_flush_line(unsigned long addr)
167 void __iomem *base = l2x0_base;
169 /* Clean by PA followed by Invalidate by PA */
170 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
171 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
172 cache_wait(base + L2X0_INV_LINE_PA, 1);
173 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
177 static inline void l2x0_flush_line(unsigned long addr)
179 void __iomem *base = l2x0_base;
180 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
181 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
185 static void l2x0_cache_sync(void)
189 raw_spin_lock_irqsave(&l2x0_lock, flags);
191 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
194 static void __l2x0_flush_all(void)
197 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
202 static void l2x0_flush_all(void)
207 raw_spin_lock_irqsave(&l2x0_lock, flags);
209 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
212 static void l2x0_clean_all(void)
217 raw_spin_lock_irqsave(&l2x0_lock, flags);
218 __l2c_op_way(l2x0_base + L2X0_CLEAN_WAY);
220 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
223 static void l2x0_inv_all(void)
227 /* invalidate all ways */
228 raw_spin_lock_irqsave(&l2x0_lock, flags);
229 /* Invalidating when L2 is enabled is a nono */
230 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
231 __l2c_op_way(l2x0_base + L2X0_INV_WAY);
233 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
236 static void l2x0_inv_range(unsigned long start, unsigned long end)
238 void __iomem *base = l2x0_base;
241 raw_spin_lock_irqsave(&l2x0_lock, flags);
242 if (start & (CACHE_LINE_SIZE - 1)) {
243 start &= ~(CACHE_LINE_SIZE - 1);
245 l2x0_flush_line(start);
247 start += CACHE_LINE_SIZE;
250 if (end & (CACHE_LINE_SIZE - 1)) {
251 end &= ~(CACHE_LINE_SIZE - 1);
253 l2x0_flush_line(end);
257 while (start < end) {
258 unsigned long blk_end = start + min(end - start, 4096UL);
260 while (start < blk_end) {
261 l2x0_inv_line(start);
262 start += CACHE_LINE_SIZE;
266 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
267 raw_spin_lock_irqsave(&l2x0_lock, flags);
270 cache_wait(base + L2X0_INV_LINE_PA, 1);
272 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
275 static void l2x0_clean_range(unsigned long start, unsigned long end)
277 void __iomem *base = l2x0_base;
280 if ((end - start) >= l2x0_size) {
285 raw_spin_lock_irqsave(&l2x0_lock, flags);
286 start &= ~(CACHE_LINE_SIZE - 1);
287 while (start < end) {
288 unsigned long blk_end = start + min(end - start, 4096UL);
290 while (start < blk_end) {
291 l2x0_clean_line(start);
292 start += CACHE_LINE_SIZE;
296 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
297 raw_spin_lock_irqsave(&l2x0_lock, flags);
300 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
302 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
305 static void l2x0_flush_range(unsigned long start, unsigned long end)
307 void __iomem *base = l2x0_base;
310 if ((end - start) >= l2x0_size) {
315 raw_spin_lock_irqsave(&l2x0_lock, flags);
316 start &= ~(CACHE_LINE_SIZE - 1);
317 while (start < end) {
318 unsigned long blk_end = start + min(end - start, 4096UL);
321 while (start < blk_end) {
322 l2x0_flush_line(start);
323 start += CACHE_LINE_SIZE;
328 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
329 raw_spin_lock_irqsave(&l2x0_lock, flags);
332 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
334 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
337 static void l2x0_disable(void)
341 raw_spin_lock_irqsave(&l2x0_lock, flags);
343 writel_relaxed(0, l2x0_base + L2X0_CTRL);
345 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
348 static void l2x0_enable(void __iomem *base, u32 aux, unsigned num_lock)
352 id = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
353 if (id == L2X0_CACHE_ID_PART_L310)
358 /* l2x0 controller is disabled */
359 writel_relaxed(aux, base + L2X0_AUX_CTRL);
361 /* Make sure that I&D is not locked down when starting */
362 l2c_unlock(base, num_lock);
367 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
370 static void l2x0_resume(void)
372 void __iomem *base = l2x0_base;
374 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
375 l2x0_enable(base, l2x0_saved_regs.aux_ctrl, 0);
378 static const struct l2c_init_data l2x0_init_fns __initconst = {
379 .enable = l2x0_enable,
381 .inv_range = l2x0_inv_range,
382 .clean_range = l2x0_clean_range,
383 .flush_range = l2x0_flush_range,
384 .flush_all = l2x0_flush_all,
385 .disable = l2x0_disable,
386 .sync = l2x0_cache_sync,
387 .resume = l2x0_resume,
392 * L2C-210 specific code.
394 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
395 * ensure that no background operation is running. The way operations
396 * are all background tasks.
398 * While a background operation is in progress, any new operation is
399 * ignored (unspecified whether this causes an error.) Thankfully, not
402 * Never has a different sync register other than L2X0_CACHE_SYNC, but
403 * we use sync_reg_offset here so we can share some of this with L2C-310.
405 static void __l2c210_cache_sync(void __iomem *base)
407 writel_relaxed(0, base + sync_reg_offset);
410 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
413 while (start < end) {
414 writel_relaxed(start, reg);
415 start += CACHE_LINE_SIZE;
419 static void l2c210_inv_range(unsigned long start, unsigned long end)
421 void __iomem *base = l2x0_base;
423 if (start & (CACHE_LINE_SIZE - 1)) {
424 start &= ~(CACHE_LINE_SIZE - 1);
425 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
426 start += CACHE_LINE_SIZE;
429 if (end & (CACHE_LINE_SIZE - 1)) {
430 end &= ~(CACHE_LINE_SIZE - 1);
431 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
434 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
435 __l2c210_cache_sync(base);
438 static void l2c210_clean_range(unsigned long start, unsigned long end)
440 void __iomem *base = l2x0_base;
442 start &= ~(CACHE_LINE_SIZE - 1);
443 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
444 __l2c210_cache_sync(base);
447 static void l2c210_flush_range(unsigned long start, unsigned long end)
449 void __iomem *base = l2x0_base;
451 start &= ~(CACHE_LINE_SIZE - 1);
452 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
453 __l2c210_cache_sync(base);
456 static void l2c210_flush_all(void)
458 void __iomem *base = l2x0_base;
460 BUG_ON(!irqs_disabled());
462 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
463 __l2c210_cache_sync(base);
466 static void l2c210_sync(void)
468 __l2c210_cache_sync(l2x0_base);
471 static void l2c210_resume(void)
473 void __iomem *base = l2x0_base;
475 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
476 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
479 static const struct l2c_init_data l2c210_data __initconst = {
481 .enable = l2c_enable,
483 .inv_range = l2c210_inv_range,
484 .clean_range = l2c210_clean_range,
485 .flush_range = l2c210_flush_range,
486 .flush_all = l2c210_flush_all,
487 .disable = l2c_disable,
489 .resume = l2c210_resume,
494 * L2C-310 specific code.
496 * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
497 * and the way operations are all background tasks. However, issuing an
498 * operation while a background operation is in progress results in a
499 * SLVERR response. We can reuse:
501 * __l2c210_cache_sync (using sync_reg_offset)
503 * l2c210_inv_range (if 588369 is not applicable)
505 * l2c210_flush_range (if 588369 is not applicable)
506 * l2c210_flush_all (if 727915 is not applicable)
509 * 588369: PL310 R0P0->R1P0, fixed R2P0.
510 * Affects: all clean+invalidate operations
511 * clean and invalidate skips the invalidate step, so we need to issue
512 * separate operations. We also require the above debug workaround
513 * enclosing this code fragment on affected parts. On unaffected parts,
514 * we must not use this workaround without the debug register writes
515 * to avoid exposing a problem similar to 727915.
517 * 727915: PL310 R2P0->R3P0, fixed R3P1.
518 * Affects: clean+invalidate by way
519 * clean and invalidate by way runs in the background, and a store can
520 * hit the line between the clean operation and invalidate operation,
521 * resulting in the store being lost.
523 * 753970: PL310 R3P0, fixed R3P1.
525 * prevents merging writes after the sync operation, until another L2C
526 * operation is performed (or a number of other conditions.)
528 * 769419: PL310 R0P0->R3P1, fixed R3P2.
529 * Affects: store buffer
530 * store buffer is not automatically drained.
532 static void l2c310_set_debug(unsigned long val)
534 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
537 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
539 void __iomem *base = l2x0_base;
541 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
544 /* Erratum 588369 for both clean+invalidate operations */
545 raw_spin_lock_irqsave(&l2x0_lock, flags);
546 l2c_set_debug(base, 0x03);
548 if (start & (CACHE_LINE_SIZE - 1)) {
549 start &= ~(CACHE_LINE_SIZE - 1);
550 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
551 writel_relaxed(start, base + L2X0_INV_LINE_PA);
552 start += CACHE_LINE_SIZE;
555 if (end & (CACHE_LINE_SIZE - 1)) {
556 end &= ~(CACHE_LINE_SIZE - 1);
557 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
558 writel_relaxed(end, base + L2X0_INV_LINE_PA);
561 l2c_set_debug(base, 0x00);
562 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
565 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
566 __l2c210_cache_sync(base);
569 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
571 raw_spinlock_t *lock = &l2x0_lock;
573 void __iomem *base = l2x0_base;
575 raw_spin_lock_irqsave(lock, flags);
576 while (start < end) {
577 unsigned long blk_end = start + min(end - start, 4096UL);
579 l2c_set_debug(base, 0x03);
580 while (start < blk_end) {
581 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
582 writel_relaxed(start, base + L2X0_INV_LINE_PA);
583 start += CACHE_LINE_SIZE;
585 l2c_set_debug(base, 0x00);
588 raw_spin_unlock_irqrestore(lock, flags);
589 raw_spin_lock_irqsave(lock, flags);
592 raw_spin_unlock_irqrestore(lock, flags);
593 __l2c210_cache_sync(base);
596 static void l2c310_flush_all_erratum(void)
598 void __iomem *base = l2x0_base;
601 raw_spin_lock_irqsave(&l2x0_lock, flags);
602 l2c_set_debug(base, 0x03);
603 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
604 l2c_set_debug(base, 0x00);
605 __l2c210_cache_sync(base);
606 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
609 static void __init l2c310_save(void __iomem *base)
613 l2x0_saved_regs.tag_latency = readl_relaxed(base +
614 L2X0_TAG_LATENCY_CTRL);
615 l2x0_saved_regs.data_latency = readl_relaxed(base +
616 L2X0_DATA_LATENCY_CTRL);
617 l2x0_saved_regs.filter_end = readl_relaxed(base +
618 L2X0_ADDR_FILTER_END);
619 l2x0_saved_regs.filter_start = readl_relaxed(base +
620 L2X0_ADDR_FILTER_START);
622 revision = readl_relaxed(base + L2X0_CACHE_ID) &
623 L2X0_CACHE_ID_RTL_MASK;
625 /* From r2p0, there is Prefetch offset/control register */
626 if (revision >= L310_CACHE_ID_RTL_R2P0)
627 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
630 /* From r3p0, there is Power control register */
631 if (revision >= L310_CACHE_ID_RTL_R3P0)
632 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
636 static void l2c310_resume(void)
638 void __iomem *base = l2x0_base;
640 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
643 /* restore pl310 setup */
644 writel_relaxed(l2x0_saved_regs.tag_latency,
645 base + L2X0_TAG_LATENCY_CTRL);
646 writel_relaxed(l2x0_saved_regs.data_latency,
647 base + L2X0_DATA_LATENCY_CTRL);
648 writel_relaxed(l2x0_saved_regs.filter_end,
649 base + L2X0_ADDR_FILTER_END);
650 writel_relaxed(l2x0_saved_regs.filter_start,
651 base + L2X0_ADDR_FILTER_START);
653 revision = readl_relaxed(base + L2X0_CACHE_ID) &
654 L2X0_CACHE_ID_RTL_MASK;
656 if (revision >= L310_CACHE_ID_RTL_R2P0)
657 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
658 base + L2X0_PREFETCH_CTRL);
659 if (revision >= L310_CACHE_ID_RTL_R3P0)
660 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
661 base + L2X0_POWER_CTRL);
663 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
667 static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
668 struct outer_cache_fns *fns)
670 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
671 const char *errata[4];
674 /* For compatibility */
675 if (revision <= L310_CACHE_ID_RTL_R3P0)
676 fns->set_debug = l2c310_set_debug;
678 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
679 revision < L310_CACHE_ID_RTL_R2P0 &&
680 /* For bcm compatibility */
681 fns->inv_range == l2c210_inv_range) {
682 fns->inv_range = l2c310_inv_range_erratum;
683 fns->flush_range = l2c310_flush_range_erratum;
684 errata[n++] = "588369";
687 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
688 revision >= L310_CACHE_ID_RTL_R2P0 &&
689 revision < L310_CACHE_ID_RTL_R3P1) {
690 fns->flush_all = l2c310_flush_all_erratum;
691 errata[n++] = "727915";
694 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
695 revision == L310_CACHE_ID_RTL_R3P0) {
696 sync_reg_offset = L2X0_DUMMY_REG;
697 errata[n++] = "753970";
700 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
701 errata[n++] = "769419";
706 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
707 for (i = 0; i < n; i++)
708 pr_cont(" %s", errata[i]);
709 pr_cont(" enabled\n");
713 static const struct l2c_init_data l2c310_init_fns __initconst = {
715 .enable = l2c_enable,
716 .fixup = l2c310_fixup,
719 .inv_range = l2c210_inv_range,
720 .clean_range = l2c210_clean_range,
721 .flush_range = l2c210_flush_range,
722 .flush_all = l2c210_flush_all,
723 .disable = l2c_disable,
725 .set_debug = l2c310_set_debug,
726 .resume = l2c310_resume,
730 static void __init __l2c_init(const struct l2c_init_data *data,
731 u32 aux_val, u32 aux_mask, u32 cache_id)
733 struct outer_cache_fns fns;
737 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
741 * It is strange to save the register state before initialisation,
742 * but hey, this is what the DT implementations decided to do.
745 data->save(l2x0_base);
747 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
752 /* Determine the number of ways */
753 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
754 case L2X0_CACHE_ID_PART_L310:
762 case L2X0_CACHE_ID_PART_L210:
763 ways = (aux >> 13) & 0xf;
767 case AURORA_CACHE_ID:
768 ways = (aux >> 13) & 0xf;
769 ways = 2 << ((ways + 1) >> 2);
770 way_size_shift = AURORA_WAY_SIZE_SHIFT;
775 /* Assume unknown chips have 8 ways */
777 type = "L2x0 series";
781 l2x0_way_mask = (1 << ways) - 1;
784 * L2 cache Size = Way size * Number of ways
786 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
787 way_size = 1 << (way_size + way_size_shift);
789 l2x0_size = ways * way_size * SZ_1K;
791 fns = data->outer_cache;
793 data->fixup(l2x0_base, cache_id, &fns);
796 * Check if l2x0 controller is already enabled. If we are booting
797 * in non-secure mode accessing the below registers will fault.
799 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
800 data->enable(l2x0_base, aux, data->num_lock);
802 /* Re-read it in case some bits are reserved. */
803 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
805 /* Save the value for resuming. */
806 l2x0_saved_regs.aux_ctrl = aux;
810 pr_info("%s cache controller enabled, %d ways, %d kB\n",
811 type, ways, l2x0_size >> 10);
812 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
813 type, cache_id, aux);
816 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
818 const struct l2c_init_data *data;
823 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
825 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
827 data = &l2x0_init_fns;
830 case L2X0_CACHE_ID_PART_L210:
834 case L2X0_CACHE_ID_PART_L310:
835 data = &l2c310_init_fns;
839 __l2c_init(data, aux_val, aux_mask, cache_id);
843 static int l2_wt_override;
845 /* Aurora don't have the cache ID register available, so we have to
846 * pass it though the device tree */
847 static u32 cache_id_part_number_from_dt;
849 static void __init l2x0_of_parse(const struct device_node *np,
850 u32 *aux_val, u32 *aux_mask)
852 u32 data[2] = { 0, 0 };
855 u32 val = 0, mask = 0;
857 of_property_read_u32(np, "arm,tag-latency", &tag);
859 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
860 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
863 of_property_read_u32_array(np, "arm,data-latency",
864 data, ARRAY_SIZE(data));
865 if (data[0] && data[1]) {
866 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
867 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
868 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
869 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
872 of_property_read_u32(np, "arm,dirty-latency", &dirty);
874 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
875 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
883 static const struct l2c_init_data of_l2c210_data __initconst = {
885 .of_parse = l2x0_of_parse,
886 .enable = l2c_enable,
888 .inv_range = l2c210_inv_range,
889 .clean_range = l2c210_clean_range,
890 .flush_range = l2c210_flush_range,
891 .flush_all = l2c210_flush_all,
892 .disable = l2c_disable,
894 .resume = l2c210_resume,
898 static const struct l2c_init_data of_l2x0_data __initconst = {
899 .of_parse = l2x0_of_parse,
900 .enable = l2x0_enable,
902 .inv_range = l2x0_inv_range,
903 .clean_range = l2x0_clean_range,
904 .flush_range = l2x0_flush_range,
905 .flush_all = l2x0_flush_all,
906 .disable = l2x0_disable,
907 .sync = l2x0_cache_sync,
908 .resume = l2x0_resume,
912 static void __init l2c310_of_parse(const struct device_node *np,
913 u32 *aux_val, u32 *aux_mask)
915 u32 data[3] = { 0, 0, 0 };
916 u32 tag[3] = { 0, 0, 0 };
917 u32 filter[2] = { 0, 0 };
919 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
920 if (tag[0] && tag[1] && tag[2])
922 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
923 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
924 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
925 l2x0_base + L2X0_TAG_LATENCY_CTRL);
927 of_property_read_u32_array(np, "arm,data-latency",
928 data, ARRAY_SIZE(data));
929 if (data[0] && data[1] && data[2])
931 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
932 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
933 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
934 l2x0_base + L2X0_DATA_LATENCY_CTRL);
936 of_property_read_u32_array(np, "arm,filter-ranges",
937 filter, ARRAY_SIZE(filter));
939 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
940 l2x0_base + L2X0_ADDR_FILTER_END);
941 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
942 l2x0_base + L2X0_ADDR_FILTER_START);
946 static const struct l2c_init_data of_l2c310_data __initconst = {
948 .of_parse = l2c310_of_parse,
949 .enable = l2c_enable,
950 .fixup = l2c310_fixup,
953 .inv_range = l2c210_inv_range,
954 .clean_range = l2c210_clean_range,
955 .flush_range = l2c210_flush_range,
956 .flush_all = l2c210_flush_all,
957 .disable = l2c_disable,
959 .set_debug = l2c310_set_debug,
960 .resume = l2c310_resume,
965 * Note that the end addresses passed to Linux primitives are
966 * noninclusive, while the hardware cache range operations use
967 * inclusive start and end addresses.
969 static unsigned long calc_range_end(unsigned long start, unsigned long end)
972 * Limit the number of cache lines processed at once,
973 * since cache range operations stall the CPU pipeline
976 if (end > start + MAX_RANGE_SIZE)
977 end = start + MAX_RANGE_SIZE;
980 * Cache range operations can't straddle a page boundary.
982 if (end > PAGE_ALIGN(start+1))
983 end = PAGE_ALIGN(start+1);
989 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
990 * and range operations only do a TLB lookup on the start address.
992 static void aurora_pa_range(unsigned long start, unsigned long end,
993 unsigned long offset)
997 raw_spin_lock_irqsave(&l2x0_lock, flags);
998 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
999 writel_relaxed(end, l2x0_base + offset);
1000 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1005 static void aurora_inv_range(unsigned long start, unsigned long end)
1008 * round start and end adresses up to cache line size
1010 start &= ~(CACHE_LINE_SIZE - 1);
1011 end = ALIGN(end, CACHE_LINE_SIZE);
1014 * Invalidate all full cache lines between 'start' and 'end'.
1016 while (start < end) {
1017 unsigned long range_end = calc_range_end(start, end);
1018 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1019 AURORA_INVAL_RANGE_REG);
1024 static void aurora_clean_range(unsigned long start, unsigned long end)
1027 * If L2 is forced to WT, the L2 will always be clean and we
1028 * don't need to do anything here.
1030 if (!l2_wt_override) {
1031 start &= ~(CACHE_LINE_SIZE - 1);
1032 end = ALIGN(end, CACHE_LINE_SIZE);
1033 while (start != end) {
1034 unsigned long range_end = calc_range_end(start, end);
1035 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1036 AURORA_CLEAN_RANGE_REG);
1042 static void aurora_flush_range(unsigned long start, unsigned long end)
1044 start &= ~(CACHE_LINE_SIZE - 1);
1045 end = ALIGN(end, CACHE_LINE_SIZE);
1046 while (start != end) {
1047 unsigned long range_end = calc_range_end(start, end);
1049 * If L2 is forced to WT, the L2 will always be clean and we
1050 * just need to invalidate.
1053 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1054 AURORA_INVAL_RANGE_REG);
1056 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1057 AURORA_FLUSH_RANGE_REG);
1062 static void aurora_save(void __iomem *base)
1064 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1065 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1068 static void aurora_resume(void)
1070 void __iomem *base = l2x0_base;
1072 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1073 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
1074 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
1079 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
1080 * broadcasting of cache commands to L2.
1082 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
1087 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
1088 u |= AURORA_CTRL_FW; /* Set the FW bit */
1089 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1093 l2c_enable(base, aux, num_lock);
1096 static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1097 struct outer_cache_fns *fns)
1099 sync_reg_offset = AURORA_SYNC_REG;
1102 static void __init aurora_of_parse(const struct device_node *np,
1103 u32 *aux_val, u32 *aux_mask)
1105 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1106 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
1108 of_property_read_u32(np, "cache-id-part",
1109 &cache_id_part_number_from_dt);
1111 /* Determine and save the write policy */
1112 l2_wt_override = of_property_read_bool(np, "wt-override");
1114 if (l2_wt_override) {
1115 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1116 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1124 static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
1126 .of_parse = aurora_of_parse,
1127 .enable = l2c_enable,
1128 .fixup = aurora_fixup,
1129 .save = aurora_save,
1131 .inv_range = aurora_inv_range,
1132 .clean_range = aurora_clean_range,
1133 .flush_range = aurora_flush_range,
1134 .flush_all = l2x0_flush_all,
1135 .disable = l2x0_disable,
1136 .sync = l2x0_cache_sync,
1137 .resume = aurora_resume,
1141 static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
1143 .of_parse = aurora_of_parse,
1144 .enable = aurora_enable_no_outer,
1145 .fixup = aurora_fixup,
1146 .save = aurora_save,
1148 .resume = aurora_resume,
1153 * For certain Broadcom SoCs, depending on the address range, different offsets
1154 * need to be added to the address before passing it to L2 for
1155 * invalidation/clean/flush
1157 * Section Address Range Offset EMI
1158 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
1159 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
1160 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
1162 * When the start and end addresses have crossed two different sections, we
1163 * need to break the L2 operation into two, each within its own section.
1164 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
1165 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
1166 * 0xC0000000 - 0xC0001000
1169 * By breaking a single L2 operation into two, we may potentially suffer some
1170 * performance hit, but keep in mind the cross section case is very rare
1173 * We do not need to handle the case when the start address is in
1174 * Section 1 and the end address is in Section 3, since it is not a valid use
1178 * Section 1 in practical terms can no longer be used on rev A2. Because of
1179 * that the code does not need to handle section 1 at all.
1182 #define BCM_SYS_EMI_START_ADDR 0x40000000UL
1183 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
1185 #define BCM_SYS_EMI_OFFSET 0x40000000UL
1186 #define BCM_VC_EMI_OFFSET 0x80000000UL
1188 static inline int bcm_addr_is_sys_emi(unsigned long addr)
1190 return (addr >= BCM_SYS_EMI_START_ADDR) &&
1191 (addr < BCM_VC_EMI_SEC3_START_ADDR);
1194 static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1196 if (bcm_addr_is_sys_emi(addr))
1197 return addr + BCM_SYS_EMI_OFFSET;
1199 return addr + BCM_VC_EMI_OFFSET;
1202 static void bcm_inv_range(unsigned long start, unsigned long end)
1204 unsigned long new_start, new_end;
1206 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1208 if (unlikely(end <= start))
1211 new_start = bcm_l2_phys_addr(start);
1212 new_end = bcm_l2_phys_addr(end);
1214 /* normal case, no cross section between start and end */
1215 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1216 l2x0_inv_range(new_start, new_end);
1220 /* They cross sections, so it can only be a cross from section
1223 l2x0_inv_range(new_start,
1224 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1225 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1229 static void bcm_clean_range(unsigned long start, unsigned long end)
1231 unsigned long new_start, new_end;
1233 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1235 if (unlikely(end <= start))
1238 if ((end - start) >= l2x0_size) {
1243 new_start = bcm_l2_phys_addr(start);
1244 new_end = bcm_l2_phys_addr(end);
1246 /* normal case, no cross section between start and end */
1247 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1248 l2x0_clean_range(new_start, new_end);
1252 /* They cross sections, so it can only be a cross from section
1255 l2x0_clean_range(new_start,
1256 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1257 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1261 static void bcm_flush_range(unsigned long start, unsigned long end)
1263 unsigned long new_start, new_end;
1265 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1267 if (unlikely(end <= start))
1270 if ((end - start) >= l2x0_size) {
1275 new_start = bcm_l2_phys_addr(start);
1276 new_end = bcm_l2_phys_addr(end);
1278 /* normal case, no cross section between start and end */
1279 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1280 l2x0_flush_range(new_start, new_end);
1284 /* They cross sections, so it can only be a cross from section
1287 l2x0_flush_range(new_start,
1288 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1289 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1293 static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
1295 .of_parse = l2c310_of_parse,
1296 .enable = l2c_enable,
1297 .fixup = l2c310_fixup,
1298 .save = l2c310_save,
1300 .inv_range = bcm_inv_range,
1301 .clean_range = bcm_clean_range,
1302 .flush_range = bcm_flush_range,
1303 .flush_all = l2c210_flush_all,
1304 .disable = l2c_disable,
1305 .sync = l2c210_sync,
1306 .resume = l2c310_resume,
1310 static void __init tauros3_save(void __iomem *base)
1312 l2x0_saved_regs.aux2_ctrl =
1313 readl_relaxed(base + TAUROS3_AUX2_CTRL);
1314 l2x0_saved_regs.prefetch_ctrl =
1315 readl_relaxed(base + L2X0_PREFETCH_CTRL);
1318 static void tauros3_resume(void)
1320 void __iomem *base = l2x0_base;
1322 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1323 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1324 base + TAUROS3_AUX2_CTRL);
1325 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1326 base + L2X0_PREFETCH_CTRL);
1328 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
1332 static const struct l2c_init_data of_tauros3_data __initconst = {
1334 .enable = l2c_enable,
1335 .save = tauros3_save,
1336 /* Tauros3 broadcasts L1 cache operations to L2 */
1338 .resume = tauros3_resume,
1342 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
1343 static const struct of_device_id l2x0_ids[] __initconst = {
1344 L2C_ID("arm,l210-cache", of_l2c210_data),
1345 L2C_ID("arm,l220-cache", of_l2x0_data),
1346 L2C_ID("arm,pl310-cache", of_l2c310_data),
1347 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1348 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1349 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1350 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
1351 /* Deprecated IDs */
1352 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1356 int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
1358 const struct l2c_init_data *data;
1359 struct device_node *np;
1360 struct resource res;
1363 np = of_find_matching_node(NULL, l2x0_ids);
1367 if (of_address_to_resource(np, 0, &res))
1370 l2x0_base = ioremap(res.start, resource_size(&res));
1374 l2x0_saved_regs.phy_base = res.start;
1376 data = of_match_node(l2x0_ids, np)->data;
1378 /* L2 configuration can only be changed if the cache is disabled */
1379 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
1381 data->of_parse(np, &aux_val, &aux_mask);
1383 if (cache_id_part_number_from_dt)
1384 cache_id = cache_id_part_number_from_dt;
1386 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1388 __l2c_init(data, aux_val, aux_mask, cache_id);