2 * Blackfin CPLB exception handling.
3 * Copyright 2004-2007 Analog Devices Inc.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see the file COPYING, or write
17 * to the Free Software Foundation, Inc.,
18 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <linux/module.h>
23 #include <asm/blackfin.h>
24 #include <asm/cacheflush.h>
25 #include <asm/cplbinit.h>
26 #include <asm/mmu_context.h>
31 * This file is compiled with certain -ffixed-reg options. We have to
32 * make sure not to call any functions here that could clobber these
38 unsigned long *current_rwx_mask[NR_CPUS];
40 int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
41 int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
42 int nr_cplb_flush[NR_CPUS];
44 static inline void disable_dcplb(void)
48 ctrl = bfin_read_DMEM_CONTROL();
50 bfin_write_DMEM_CONTROL(ctrl);
54 static inline void enable_dcplb(void)
58 ctrl = bfin_read_DMEM_CONTROL();
60 bfin_write_DMEM_CONTROL(ctrl);
64 static inline void disable_icplb(void)
68 ctrl = bfin_read_IMEM_CONTROL();
70 bfin_write_IMEM_CONTROL(ctrl);
74 static inline void enable_icplb(void)
78 ctrl = bfin_read_IMEM_CONTROL();
80 bfin_write_IMEM_CONTROL(ctrl);
85 * Given the contents of the status register, return the index of the
86 * CPLB that caused the fault.
88 static inline int faulting_cplb_index(int status)
90 int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
95 * Given the contents of the status register and the DCPLB_DATA contents,
96 * return true if a write access should be permitted.
98 static inline int write_permitted(int status, unsigned long data)
100 if (status & FAULT_USERSUPV)
101 return !!(data & CPLB_SUPV_WR);
103 return !!(data & CPLB_USER_WR);
106 /* Counters to implement round-robin replacement. */
107 static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
110 * Find an ICPLB entry to be evicted and return its index.
112 static int evict_one_icplb(unsigned int cpu)
115 for (i = first_switched_icplb; i < MAX_CPLBS; i++)
116 if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
118 i = first_switched_icplb + icplb_rr_index[cpu];
119 if (i >= MAX_CPLBS) {
120 i -= MAX_CPLBS - first_switched_icplb;
121 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
123 icplb_rr_index[cpu]++;
127 static int evict_one_dcplb(unsigned int cpu)
130 for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
131 if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
133 i = first_switched_dcplb + dcplb_rr_index[cpu];
134 if (i >= MAX_CPLBS) {
135 i -= MAX_CPLBS - first_switched_dcplb;
136 dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
138 dcplb_rr_index[cpu]++;
142 static noinline int dcplb_miss(unsigned int cpu)
144 unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
145 int status = bfin_read_DCPLB_STATUS();
148 unsigned long d_data;
150 nr_dcplb_miss[cpu]++;
152 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
153 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
154 if (bfin_addr_dcacheable(addr)) {
155 d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
156 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
157 d_data |= CPLB_L1_AOW | CPLB_WT;
162 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
165 } else if (addr >= physical_mem_end) {
166 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
167 && (status & FAULT_USERSUPV)) {
169 d_data &= ~PAGE_SIZE_4KB;
170 d_data |= PAGE_SIZE_4MB;
171 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
172 && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
173 addr &= ~(1 * 1024 * 1024 - 1);
174 d_data &= ~PAGE_SIZE_4KB;
175 d_data |= PAGE_SIZE_1MB;
177 return CPLB_PROT_VIOL;
178 } else if (addr >= _ramend) {
179 d_data |= CPLB_USER_RD | CPLB_USER_WR;
181 mask = current_rwx_mask[cpu];
183 int page = addr >> PAGE_SHIFT;
185 int bit = 1 << (page & 31);
188 d_data |= CPLB_USER_RD;
190 mask += page_mask_nelts;
192 d_data |= CPLB_USER_WR;
195 idx = evict_one_dcplb(cpu);
198 dcplb_tbl[cpu][idx].addr = addr;
199 dcplb_tbl[cpu][idx].data = d_data;
202 bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
203 bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
209 static noinline int icplb_miss(unsigned int cpu)
211 unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
212 int status = bfin_read_ICPLB_STATUS();
214 unsigned long i_data;
216 nr_icplb_miss[cpu]++;
218 /* If inside the uncached DMA region, fault. */
219 if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
220 return CPLB_PROT_VIOL;
222 if (status & FAULT_USERSUPV)
223 nr_icplb_supv_miss[cpu]++;
226 * First, try to find a CPLB that matches this address. If we
227 * find one, then the fact that we're in the miss handler means
228 * that the instruction crosses a page boundary.
230 for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
231 if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
232 unsigned long this_addr = icplb_tbl[cpu][idx].addr;
233 if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
240 i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
242 #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
244 * Normal RAM, and possibly the reserved memory area, are
247 if (addr < _ramend ||
248 (addr < physical_mem_end && reserved_mem_icache_on))
249 i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
252 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
255 } else if (addr >= physical_mem_end) {
256 if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
257 && (status & FAULT_USERSUPV)) {
258 addr &= ~(1 * 1024 * 1024 - 1);
259 i_data &= ~PAGE_SIZE_4KB;
260 i_data |= PAGE_SIZE_1MB;
262 return CPLB_PROT_VIOL;
263 } else if (addr >= _ramend) {
264 i_data |= CPLB_USER_RD;
267 * Two cases to distinguish - a supervisor access must
268 * necessarily be for a module page; we grant it
269 * unconditionally (could do better here in the future).
270 * Otherwise, check the x bitmap of the current process.
272 if (!(status & FAULT_USERSUPV)) {
273 unsigned long *mask = current_rwx_mask[cpu];
276 int page = addr >> PAGE_SHIFT;
278 int bit = 1 << (page & 31);
280 mask += 2 * page_mask_nelts;
282 i_data |= CPLB_USER_RD;
286 idx = evict_one_icplb(cpu);
288 icplb_tbl[cpu][idx].addr = addr;
289 icplb_tbl[cpu][idx].data = i_data;
292 bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
293 bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
299 static noinline int dcplb_protection_fault(unsigned int cpu)
301 int status = bfin_read_DCPLB_STATUS();
303 nr_dcplb_prot[cpu]++;
305 if (status & FAULT_RW) {
306 int idx = faulting_cplb_index(status);
307 unsigned long data = dcplb_tbl[cpu][idx].data;
308 if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
309 write_permitted(status, data)) {
311 dcplb_tbl[cpu][idx].data = data;
312 bfin_write32(DCPLB_DATA0 + idx * 4, data);
316 return CPLB_PROT_VIOL;
319 int cplb_hdr(int seqstat, struct pt_regs *regs)
321 int cause = seqstat & 0x3f;
322 unsigned int cpu = smp_processor_id();
325 return dcplb_protection_fault(cpu);
327 return icplb_miss(cpu);
329 return dcplb_miss(cpu);
335 void flush_switched_cplbs(unsigned int cpu)
340 nr_cplb_flush[cpu]++;
342 local_irq_save_hw(flags);
344 for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
345 icplb_tbl[cpu][i].data = 0;
346 bfin_write32(ICPLB_DATA0 + i * 4, 0);
351 for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
352 dcplb_tbl[cpu][i].data = 0;
353 bfin_write32(DCPLB_DATA0 + i * 4, 0);
356 local_irq_restore_hw(flags);
360 void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
363 unsigned long addr = (unsigned long)masks;
364 unsigned long d_data;
368 current_rwx_mask[cpu] = masks;
372 local_irq_save_hw(flags);
373 current_rwx_mask[cpu] = masks;
375 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
379 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
380 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
381 d_data |= CPLB_L1_CHBL;
382 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
383 d_data |= CPLB_L1_AOW | CPLB_WT;
389 for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
390 dcplb_tbl[cpu][i].addr = addr;
391 dcplb_tbl[cpu][i].data = d_data;
392 bfin_write32(DCPLB_DATA0 + i * 4, d_data);
393 bfin_write32(DCPLB_ADDR0 + i * 4, addr);
397 local_irq_restore_hw(flags);