2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
4 * Copyright (C) 2007 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
23 #include <asm/cacheflush.h>
24 #include <asm/hardware/cache-l2x0.h>
26 #define CACHE_LINE_SIZE 32
28 static void __iomem *l2x0_base;
29 static DEFINE_SPINLOCK(l2x0_lock);
30 static uint32_t l2x0_way_mask; /* Bitmask of active ways */
31 static uint32_t l2x0_size;
33 static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
35 /* wait for cache operation by line or way to complete */
36 while (readl_relaxed(reg) & mask)
40 #ifdef CONFIG_CACHE_PL310
41 static inline void cache_wait(void __iomem *reg, unsigned long mask)
43 /* cache operations by line are atomic on PL310 */
46 #define cache_wait cache_wait_way
49 static inline void cache_sync(void)
51 void __iomem *base = l2x0_base;
52 writel_relaxed(0, base + L2X0_CACHE_SYNC);
53 cache_wait(base + L2X0_CACHE_SYNC, 1);
56 static inline void l2x0_clean_line(unsigned long addr)
58 void __iomem *base = l2x0_base;
59 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
60 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
63 static inline void l2x0_inv_line(unsigned long addr)
65 void __iomem *base = l2x0_base;
66 cache_wait(base + L2X0_INV_LINE_PA, 1);
67 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
70 #ifdef CONFIG_PL310_ERRATA_588369
71 static void debug_writel(unsigned long val)
73 extern void omap_smc1(u32 fn, u32 arg);
76 * Texas Instrument secure monitor api to modify the
77 * PL310 Debug Control Register.
79 omap_smc1(0x100, val);
82 static inline void l2x0_flush_line(unsigned long addr)
84 void __iomem *base = l2x0_base;
86 /* Clean by PA followed by Invalidate by PA */
87 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
88 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
89 cache_wait(base + L2X0_INV_LINE_PA, 1);
90 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
94 /* Optimised out for non-errata case */
95 static inline void debug_writel(unsigned long val)
99 static inline void l2x0_flush_line(unsigned long addr)
101 void __iomem *base = l2x0_base;
102 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
103 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
107 static void l2x0_cache_sync(void)
111 spin_lock_irqsave(&l2x0_lock, flags);
113 spin_unlock_irqrestore(&l2x0_lock, flags);
116 static void l2x0_flush_all(void)
121 spin_lock_irqsave(&l2x0_lock, flags);
122 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
123 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
125 spin_unlock_irqrestore(&l2x0_lock, flags);
128 static void l2x0_clean_all(void)
133 spin_lock_irqsave(&l2x0_lock, flags);
134 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
135 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
137 spin_unlock_irqrestore(&l2x0_lock, flags);
140 static void l2x0_inv_all(void)
144 /* invalidate all ways */
145 spin_lock_irqsave(&l2x0_lock, flags);
146 /* Invalidating when L2 is enabled is a nono */
147 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
148 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
149 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
151 spin_unlock_irqrestore(&l2x0_lock, flags);
154 static void l2x0_inv_range(unsigned long start, unsigned long end)
156 void __iomem *base = l2x0_base;
159 spin_lock_irqsave(&l2x0_lock, flags);
160 if (start & (CACHE_LINE_SIZE - 1)) {
161 start &= ~(CACHE_LINE_SIZE - 1);
163 l2x0_flush_line(start);
165 start += CACHE_LINE_SIZE;
168 if (end & (CACHE_LINE_SIZE - 1)) {
169 end &= ~(CACHE_LINE_SIZE - 1);
171 l2x0_flush_line(end);
175 while (start < end) {
176 unsigned long blk_end = start + min(end - start, 4096UL);
178 while (start < blk_end) {
179 l2x0_inv_line(start);
180 start += CACHE_LINE_SIZE;
184 spin_unlock_irqrestore(&l2x0_lock, flags);
185 spin_lock_irqsave(&l2x0_lock, flags);
188 cache_wait(base + L2X0_INV_LINE_PA, 1);
190 spin_unlock_irqrestore(&l2x0_lock, flags);
193 static void l2x0_clean_range(unsigned long start, unsigned long end)
195 void __iomem *base = l2x0_base;
198 if ((end - start) >= l2x0_size) {
203 spin_lock_irqsave(&l2x0_lock, flags);
204 start &= ~(CACHE_LINE_SIZE - 1);
205 while (start < end) {
206 unsigned long blk_end = start + min(end - start, 4096UL);
208 while (start < blk_end) {
209 l2x0_clean_line(start);
210 start += CACHE_LINE_SIZE;
214 spin_unlock_irqrestore(&l2x0_lock, flags);
215 spin_lock_irqsave(&l2x0_lock, flags);
218 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
220 spin_unlock_irqrestore(&l2x0_lock, flags);
223 static void l2x0_flush_range(unsigned long start, unsigned long end)
225 void __iomem *base = l2x0_base;
228 if ((end - start) >= l2x0_size) {
233 spin_lock_irqsave(&l2x0_lock, flags);
234 start &= ~(CACHE_LINE_SIZE - 1);
235 while (start < end) {
236 unsigned long blk_end = start + min(end - start, 4096UL);
239 while (start < blk_end) {
240 l2x0_flush_line(start);
241 start += CACHE_LINE_SIZE;
246 spin_unlock_irqrestore(&l2x0_lock, flags);
247 spin_lock_irqsave(&l2x0_lock, flags);
250 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
252 spin_unlock_irqrestore(&l2x0_lock, flags);
255 static void l2x0_disable(void)
259 spin_lock_irqsave(&l2x0_lock, flags);
260 writel(0, l2x0_base + L2X0_CTRL);
261 spin_unlock_irqrestore(&l2x0_lock, flags);
264 void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
274 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
275 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
280 /* Determine the number of ways */
281 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
282 case L2X0_CACHE_ID_PART_L310:
289 case L2X0_CACHE_ID_PART_L210:
290 ways = (aux >> 13) & 0xf;
294 /* Assume unknown chips have 8 ways */
296 type = "L2x0 series";
300 l2x0_way_mask = (1 << ways) - 1;
303 * L2 cache Size = Way size * Number of ways
305 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
306 way_size = 1 << (way_size + 3);
307 l2x0_size = ways * way_size * SZ_1K;
310 * Check if l2x0 controller is already enabled.
311 * If you are booting from non-secure mode
312 * accessing the below registers will fault.
314 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
316 /* l2x0 controller is disabled */
317 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
322 writel_relaxed(1, l2x0_base + L2X0_CTRL);
325 outer_cache.inv_range = l2x0_inv_range;
326 outer_cache.clean_range = l2x0_clean_range;
327 outer_cache.flush_range = l2x0_flush_range;
328 outer_cache.sync = l2x0_cache_sync;
329 outer_cache.flush_all = l2x0_flush_all;
330 outer_cache.inv_all = l2x0_inv_all;
331 outer_cache.disable = l2x0_disable;
333 printk(KERN_INFO "%s cache controller enabled\n", type);
334 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
335 ways, cache_id, aux, l2x0_size);