2 * Handle unaligned accesses by emulation.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2014 Imagination Technologies Ltd.
12 * This file contains exception handler for address error exception with the
13 * special capability to execute faulting instructions in software. The
14 * handler does not try to handle the case when the program counter points
15 * to an address not aligned to a word boundary.
17 * Putting data to unaligned addresses is a bad practice even on Intel where
18 * only the performance is affected. Much worse is that such code is non-
19 * portable. Due to several programs that die on MIPS due to alignment
20 * problems I decided to implement this handler anyway though I originally
21 * didn't intend to do this at all for user code.
23 * For now I enable fixing of address errors by default to make life easier.
24 * I however intend to disable this somewhen in the future when the alignment
25 * problems with user programs have been fixed. For programmers this is the
28 * Fixing address errors is a per process option. The option is inherited
29 * across fork(2) and execve(2) calls. If you really want to use the
30 * option in your user programs - I discourage the use of the software
31 * emulation strongly - use the following code in your userland stuff:
33 * #include <sys/sysmips.h>
36 * sysmips(MIPS_FIXADE, x);
39 * The argument x is 0 for disabling software emulation, enabled otherwise.
41 * Below a little program to play around with this feature.
44 * #include <sys/sysmips.h>
47 * unsigned char bar[8];
50 * main(int argc, char *argv[])
52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
53 * unsigned int *p = (unsigned int *) (x.bar + 3);
57 * sysmips(MIPS_FIXADE, atoi(argv[1]));
59 * printf("*p = %08lx\n", *p);
63 * for(i = 0; i <= 7; i++)
64 * printf("%02x ", x.bar[i]);
68 * Coprocessor loads are not supported; I think this case is unimportant
71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
72 * exception for the R6000.
73 * A store crossing a page boundary might be executed only partially.
74 * Undo the partial store in this case.
76 #include <linux/context_tracking.h>
78 #include <linux/signal.h>
79 #include <linux/smp.h>
80 #include <linux/sched.h>
81 #include <linux/debugfs.h>
82 #include <linux/perf_event.h>
85 #include <asm/branch.h>
86 #include <asm/byteorder.h>
89 #include <asm/fpu_emulator.h>
91 #include <asm/uaccess.h>
93 #include <asm/fpu_emulator.h>
95 #define STR(x) __STR(x)
99 UNALIGNED_ACTION_QUIET,
100 UNALIGNED_ACTION_SIGNAL,
101 UNALIGNED_ACTION_SHOW,
103 #ifdef CONFIG_DEBUG_FS
104 static u32 unaligned_instructions;
105 static u32 unaligned_action;
107 #define unaligned_action UNALIGNED_ACTION_QUIET
109 extern void show_registers(struct pt_regs *regs);
112 #define _LoadHW(addr, value, res, type) \
113 __asm__ __volatile__ (".set\tnoat\n" \
114 "1:\t"type##_lb("%0", "0(%2)")"\n" \
115 "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
121 ".section\t.fixup,\"ax\"\n\t" \
122 "4:\tli\t%1, %3\n\t" \
125 ".section\t__ex_table,\"a\"\n\t" \
126 STR(PTR)"\t1b, 4b\n\t" \
127 STR(PTR)"\t2b, 4b\n\t" \
129 : "=&r" (value), "=r" (res) \
130 : "r" (addr), "i" (-EFAULT));
132 #ifndef CONFIG_CPU_MIPSR6
133 #define _LoadW(addr, value, res, type) \
134 __asm__ __volatile__ ( \
135 "1:\t"type##_lwl("%0", "(%2)")"\n" \
136 "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
140 ".section\t.fixup,\"ax\"\n\t" \
141 "4:\tli\t%1, %3\n\t" \
144 ".section\t__ex_table,\"a\"\n\t" \
145 STR(PTR)"\t1b, 4b\n\t" \
146 STR(PTR)"\t2b, 4b\n\t" \
148 : "=&r" (value), "=r" (res) \
149 : "r" (addr), "i" (-EFAULT));
151 /* MIPSR6 has no lwl instruction */
152 #define _LoadW(addr, value, res, type) \
153 __asm__ __volatile__ ( \
156 "1:"type##_lb("%0", "0(%2)")"\n\t" \
157 "2:"type##_lbu("$1", "1(%2)")"\n\t" \
160 "3:"type##_lbu("$1", "2(%2)")"\n\t" \
163 "4:"type##_lbu("$1", "3(%2)")"\n\t" \
170 ".section\t.fixup,\"ax\"\n\t" \
171 "11:\tli\t%1, %3\n\t" \
174 ".section\t__ex_table,\"a\"\n\t" \
175 STR(PTR)"\t1b, 11b\n\t" \
176 STR(PTR)"\t2b, 11b\n\t" \
177 STR(PTR)"\t3b, 11b\n\t" \
178 STR(PTR)"\t4b, 11b\n\t" \
180 : "=&r" (value), "=r" (res) \
181 : "r" (addr), "i" (-EFAULT));
182 #endif /* CONFIG_CPU_MIPSR6 */
184 #define _LoadHWU(addr, value, res, type) \
185 __asm__ __volatile__ ( \
187 "1:\t"type##_lbu("%0", "0(%2)")"\n" \
188 "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
195 ".section\t.fixup,\"ax\"\n\t" \
196 "4:\tli\t%1, %3\n\t" \
199 ".section\t__ex_table,\"a\"\n\t" \
200 STR(PTR)"\t1b, 4b\n\t" \
201 STR(PTR)"\t2b, 4b\n\t" \
203 : "=&r" (value), "=r" (res) \
204 : "r" (addr), "i" (-EFAULT));
206 #ifndef CONFIG_CPU_MIPSR6
207 #define _LoadWU(addr, value, res, type) \
208 __asm__ __volatile__ ( \
209 "1:\t"type##_lwl("%0", "(%2)")"\n" \
210 "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
211 "dsll\t%0, %0, 32\n\t" \
212 "dsrl\t%0, %0, 32\n\t" \
216 "\t.section\t.fixup,\"ax\"\n\t" \
217 "4:\tli\t%1, %3\n\t" \
220 ".section\t__ex_table,\"a\"\n\t" \
221 STR(PTR)"\t1b, 4b\n\t" \
222 STR(PTR)"\t2b, 4b\n\t" \
224 : "=&r" (value), "=r" (res) \
225 : "r" (addr), "i" (-EFAULT));
227 #define _LoadDW(addr, value, res) \
228 __asm__ __volatile__ ( \
229 "1:\tldl\t%0, (%2)\n" \
230 "2:\tldr\t%0, 7(%2)\n\t" \
234 "\t.section\t.fixup,\"ax\"\n\t" \
235 "4:\tli\t%1, %3\n\t" \
238 ".section\t__ex_table,\"a\"\n\t" \
239 STR(PTR)"\t1b, 4b\n\t" \
240 STR(PTR)"\t2b, 4b\n\t" \
242 : "=&r" (value), "=r" (res) \
243 : "r" (addr), "i" (-EFAULT));
245 /* MIPSR6 has not lwl and ldl instructions */
246 #define _LoadWU(addr, value, res, type) \
247 __asm__ __volatile__ ( \
250 "1:"type##_lbu("%0", "0(%2)")"\n\t" \
251 "2:"type##_lbu("$1", "1(%2)")"\n\t" \
254 "3:"type##_lbu("$1", "2(%2)")"\n\t" \
257 "4:"type##_lbu("$1", "3(%2)")"\n\t" \
264 ".section\t.fixup,\"ax\"\n\t" \
265 "11:\tli\t%1, %3\n\t" \
268 ".section\t__ex_table,\"a\"\n\t" \
269 STR(PTR)"\t1b, 11b\n\t" \
270 STR(PTR)"\t2b, 11b\n\t" \
271 STR(PTR)"\t3b, 11b\n\t" \
272 STR(PTR)"\t4b, 11b\n\t" \
274 : "=&r" (value), "=r" (res) \
275 : "r" (addr), "i" (-EFAULT));
277 #define _LoadDW(addr, value, res) \
278 __asm__ __volatile__ ( \
281 "1:lb\t%0, 0(%2)\n\t" \
282 "2:lbu\t $1, 1(%2)\n\t" \
283 "dsll\t%0, 0x8\n\t" \
285 "3:lbu\t$1, 2(%2)\n\t" \
286 "dsll\t%0, 0x8\n\t" \
288 "4:lbu\t$1, 3(%2)\n\t" \
289 "dsll\t%0, 0x8\n\t" \
291 "5:lbu\t$1, 4(%2)\n\t" \
292 "dsll\t%0, 0x8\n\t" \
294 "6:lbu\t$1, 5(%2)\n\t" \
295 "dsll\t%0, 0x8\n\t" \
297 "7:lbu\t$1, 6(%2)\n\t" \
298 "dsll\t%0, 0x8\n\t" \
300 "8:lbu\t$1, 7(%2)\n\t" \
301 "dsll\t%0, 0x8\n\t" \
307 ".section\t.fixup,\"ax\"\n\t" \
308 "11:\tli\t%1, %3\n\t" \
311 ".section\t__ex_table,\"a\"\n\t" \
312 STR(PTR)"\t1b, 11b\n\t" \
313 STR(PTR)"\t2b, 11b\n\t" \
314 STR(PTR)"\t3b, 11b\n\t" \
315 STR(PTR)"\t4b, 11b\n\t" \
316 STR(PTR)"\t5b, 11b\n\t" \
317 STR(PTR)"\t6b, 11b\n\t" \
318 STR(PTR)"\t7b, 11b\n\t" \
319 STR(PTR)"\t8b, 11b\n\t" \
321 : "=&r" (value), "=r" (res) \
322 : "r" (addr), "i" (-EFAULT));
323 #endif /* CONFIG_CPU_MIPSR6 */
326 #define _StoreHW(addr, value, res, type) \
327 __asm__ __volatile__ ( \
329 "1:\t"type##_sb("%1", "1(%2)")"\n" \
330 "srl\t$1, %1, 0x8\n" \
331 "2:\t"type##_sb("$1", "0(%2)")"\n" \
336 ".section\t.fixup,\"ax\"\n\t" \
337 "4:\tli\t%0, %3\n\t" \
340 ".section\t__ex_table,\"a\"\n\t" \
341 STR(PTR)"\t1b, 4b\n\t" \
342 STR(PTR)"\t2b, 4b\n\t" \
345 : "r" (value), "r" (addr), "i" (-EFAULT));
347 #ifndef CONFIG_CPU_MIPSR6
348 #define _StoreW(addr, value, res, type) \
349 __asm__ __volatile__ ( \
350 "1:\t"type##_swl("%1", "(%2)")"\n" \
351 "2:\t"type##_swr("%1", "3(%2)")"\n\t"\
355 ".section\t.fixup,\"ax\"\n\t" \
356 "4:\tli\t%0, %3\n\t" \
359 ".section\t__ex_table,\"a\"\n\t" \
360 STR(PTR)"\t1b, 4b\n\t" \
361 STR(PTR)"\t2b, 4b\n\t" \
364 : "r" (value), "r" (addr), "i" (-EFAULT));
366 #define _StoreDW(addr, value, res) \
367 __asm__ __volatile__ ( \
368 "1:\tsdl\t%1,(%2)\n" \
369 "2:\tsdr\t%1, 7(%2)\n\t" \
373 ".section\t.fixup,\"ax\"\n\t" \
374 "4:\tli\t%0, %3\n\t" \
377 ".section\t__ex_table,\"a\"\n\t" \
378 STR(PTR)"\t1b, 4b\n\t" \
379 STR(PTR)"\t2b, 4b\n\t" \
382 : "r" (value), "r" (addr), "i" (-EFAULT));
384 /* MIPSR6 has no swl and sdl instructions */
385 #define _StoreW(addr, value, res, type) \
386 __asm__ __volatile__ ( \
389 "1:"type##_sb("%1", "3(%2)")"\n\t" \
390 "srl\t$1, %1, 0x8\n\t" \
391 "2:"type##_sb("$1", "2(%2)")"\n\t" \
392 "srl\t$1, $1, 0x8\n\t" \
393 "3:"type##_sb("$1", "1(%2)")"\n\t" \
394 "srl\t$1, $1, 0x8\n\t" \
395 "4:"type##_sb("$1", "0(%2)")"\n\t" \
400 ".section\t.fixup,\"ax\"\n\t" \
401 "11:\tli\t%0, %3\n\t" \
404 ".section\t__ex_table,\"a\"\n\t" \
405 STR(PTR)"\t1b, 11b\n\t" \
406 STR(PTR)"\t2b, 11b\n\t" \
407 STR(PTR)"\t3b, 11b\n\t" \
408 STR(PTR)"\t4b, 11b\n\t" \
411 : "r" (value), "r" (addr), "i" (-EFAULT) \
414 #define StoreDW(addr, value, res) \
415 __asm__ __volatile__ ( \
418 "1:sb\t%1, 7(%2)\n\t" \
419 "dsrl\t$1, %1, 0x8\n\t" \
420 "2:sb\t$1, 6(%2)\n\t" \
421 "dsrl\t$1, $1, 0x8\n\t" \
422 "3:sb\t$1, 5(%2)\n\t" \
423 "dsrl\t$1, $1, 0x8\n\t" \
424 "4:sb\t$1, 4(%2)\n\t" \
425 "dsrl\t$1, $1, 0x8\n\t" \
426 "5:sb\t$1, 3(%2)\n\t" \
427 "dsrl\t$1, $1, 0x8\n\t" \
428 "6:sb\t$1, 2(%2)\n\t" \
429 "dsrl\t$1, $1, 0x8\n\t" \
430 "7:sb\t$1, 1(%2)\n\t" \
431 "dsrl\t$1, $1, 0x8\n\t" \
432 "8:sb\t$1, 0(%2)\n\t" \
433 "dsrl\t$1, $1, 0x8\n\t" \
438 ".section\t.fixup,\"ax\"\n\t" \
439 "11:\tli\t%0, %3\n\t" \
442 ".section\t__ex_table,\"a\"\n\t" \
443 STR(PTR)"\t1b, 11b\n\t" \
444 STR(PTR)"\t2b, 11b\n\t" \
445 STR(PTR)"\t3b, 11b\n\t" \
446 STR(PTR)"\t4b, 11b\n\t" \
447 STR(PTR)"\t5b, 11b\n\t" \
448 STR(PTR)"\t6b, 11b\n\t" \
449 STR(PTR)"\t7b, 11b\n\t" \
450 STR(PTR)"\t8b, 11b\n\t" \
453 : "r" (value), "r" (addr), "i" (-EFAULT) \
455 #endif /* CONFIG_CPU_MIPSR6 */
457 #else /* __BIG_ENDIAN */
459 #define _LoadHW(addr, value, res, type) \
460 __asm__ __volatile__ (".set\tnoat\n" \
461 "1:\t"type##_lb("%0", "1(%2)")"\n" \
462 "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
468 ".section\t.fixup,\"ax\"\n\t" \
469 "4:\tli\t%1, %3\n\t" \
472 ".section\t__ex_table,\"a\"\n\t" \
473 STR(PTR)"\t1b, 4b\n\t" \
474 STR(PTR)"\t2b, 4b\n\t" \
476 : "=&r" (value), "=r" (res) \
477 : "r" (addr), "i" (-EFAULT));
479 #ifndef CONFIG_CPU_MIPSR6
480 #define _LoadW(addr, value, res, type) \
481 __asm__ __volatile__ ( \
482 "1:\t"type##_lwl("%0", "3(%2)")"\n" \
483 "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
487 ".section\t.fixup,\"ax\"\n\t" \
488 "4:\tli\t%1, %3\n\t" \
491 ".section\t__ex_table,\"a\"\n\t" \
492 STR(PTR)"\t1b, 4b\n\t" \
493 STR(PTR)"\t2b, 4b\n\t" \
495 : "=&r" (value), "=r" (res) \
496 : "r" (addr), "i" (-EFAULT));
498 /* MIPSR6 has no lwl instruction */
499 #define _LoadW(addr, value, res, type) \
500 __asm__ __volatile__ ( \
503 "1:"type##_lb("%0", "3(%2)")"\n\t" \
504 "2:"type##_lbu("$1", "2(%2)")"\n\t" \
507 "3:"type##_lbu("$1", "1(%2)")"\n\t" \
510 "4:"type##_lbu("$1", "0(%2)")"\n\t" \
517 ".section\t.fixup,\"ax\"\n\t" \
518 "11:\tli\t%1, %3\n\t" \
521 ".section\t__ex_table,\"a\"\n\t" \
522 STR(PTR)"\t1b, 11b\n\t" \
523 STR(PTR)"\t2b, 11b\n\t" \
524 STR(PTR)"\t3b, 11b\n\t" \
525 STR(PTR)"\t4b, 11b\n\t" \
527 : "=&r" (value), "=r" (res) \
528 : "r" (addr), "i" (-EFAULT));
529 #endif /* CONFIG_CPU_MIPSR6 */
532 #define _LoadHWU(addr, value, res, type) \
533 __asm__ __volatile__ ( \
535 "1:\t"type##_lbu("%0", "1(%2)")"\n" \
536 "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
543 ".section\t.fixup,\"ax\"\n\t" \
544 "4:\tli\t%1, %3\n\t" \
547 ".section\t__ex_table,\"a\"\n\t" \
548 STR(PTR)"\t1b, 4b\n\t" \
549 STR(PTR)"\t2b, 4b\n\t" \
551 : "=&r" (value), "=r" (res) \
552 : "r" (addr), "i" (-EFAULT));
554 #ifndef CONFIG_CPU_MIPSR6
555 #define _LoadWU(addr, value, res, type) \
556 __asm__ __volatile__ ( \
557 "1:\t"type##_lwl("%0", "3(%2)")"\n" \
558 "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
559 "dsll\t%0, %0, 32\n\t" \
560 "dsrl\t%0, %0, 32\n\t" \
564 "\t.section\t.fixup,\"ax\"\n\t" \
565 "4:\tli\t%1, %3\n\t" \
568 ".section\t__ex_table,\"a\"\n\t" \
569 STR(PTR)"\t1b, 4b\n\t" \
570 STR(PTR)"\t2b, 4b\n\t" \
572 : "=&r" (value), "=r" (res) \
573 : "r" (addr), "i" (-EFAULT));
575 #define _LoadDW(addr, value, res) \
576 __asm__ __volatile__ ( \
577 "1:\tldl\t%0, 7(%2)\n" \
578 "2:\tldr\t%0, (%2)\n\t" \
582 "\t.section\t.fixup,\"ax\"\n\t" \
583 "4:\tli\t%1, %3\n\t" \
586 ".section\t__ex_table,\"a\"\n\t" \
587 STR(PTR)"\t1b, 4b\n\t" \
588 STR(PTR)"\t2b, 4b\n\t" \
590 : "=&r" (value), "=r" (res) \
591 : "r" (addr), "i" (-EFAULT));
593 /* MIPSR6 has not lwl and ldl instructions */
594 #define _LoadWU(addr, value, res, type) \
595 __asm__ __volatile__ ( \
598 "1:"type##_lbu("%0", "3(%2)")"\n\t" \
599 "2:"type##_lbu("$1", "2(%2)")"\n\t" \
602 "3:"type##_lbu("$1", "1(%2)")"\n\t" \
605 "4:"type##_lbu("$1", "0(%2)")"\n\t" \
612 ".section\t.fixup,\"ax\"\n\t" \
613 "11:\tli\t%1, %3\n\t" \
616 ".section\t__ex_table,\"a\"\n\t" \
617 STR(PTR)"\t1b, 11b\n\t" \
618 STR(PTR)"\t2b, 11b\n\t" \
619 STR(PTR)"\t3b, 11b\n\t" \
620 STR(PTR)"\t4b, 11b\n\t" \
622 : "=&r" (value), "=r" (res) \
623 : "r" (addr), "i" (-EFAULT));
625 #define _LoadDW(addr, value, res) \
626 __asm__ __volatile__ ( \
629 "1:lb\t%0, 7(%2)\n\t" \
630 "2:lbu\t$1, 6(%2)\n\t" \
631 "dsll\t%0, 0x8\n\t" \
633 "3:lbu\t$1, 5(%2)\n\t" \
634 "dsll\t%0, 0x8\n\t" \
636 "4:lbu\t$1, 4(%2)\n\t" \
637 "dsll\t%0, 0x8\n\t" \
639 "5:lbu\t$1, 3(%2)\n\t" \
640 "dsll\t%0, 0x8\n\t" \
642 "6:lbu\t$1, 2(%2)\n\t" \
643 "dsll\t%0, 0x8\n\t" \
645 "7:lbu\t$1, 1(%2)\n\t" \
646 "dsll\t%0, 0x8\n\t" \
648 "8:lbu\t$1, 0(%2)\n\t" \
649 "dsll\t%0, 0x8\n\t" \
655 ".section\t.fixup,\"ax\"\n\t" \
656 "11:\tli\t%1, %3\n\t" \
659 ".section\t__ex_table,\"a\"\n\t" \
660 STR(PTR)"\t1b, 11b\n\t" \
661 STR(PTR)"\t2b, 11b\n\t" \
662 STR(PTR)"\t3b, 11b\n\t" \
663 STR(PTR)"\t4b, 11b\n\t" \
664 STR(PTR)"\t5b, 11b\n\t" \
665 STR(PTR)"\t6b, 11b\n\t" \
666 STR(PTR)"\t7b, 11b\n\t" \
667 STR(PTR)"\t8b, 11b\n\t" \
669 : "=&r" (value), "=r" (res) \
670 : "r" (addr), "i" (-EFAULT));
671 #endif /* CONFIG_CPU_MIPSR6 */
673 #define _StoreHW(addr, value, res, type) \
674 __asm__ __volatile__ ( \
676 "1:\t"type##_sb("%1", "0(%2)")"\n" \
677 "srl\t$1,%1, 0x8\n" \
678 "2:\t"type##_sb("$1", "1(%2)")"\n" \
683 ".section\t.fixup,\"ax\"\n\t" \
684 "4:\tli\t%0, %3\n\t" \
687 ".section\t__ex_table,\"a\"\n\t" \
688 STR(PTR)"\t1b, 4b\n\t" \
689 STR(PTR)"\t2b, 4b\n\t" \
692 : "r" (value), "r" (addr), "i" (-EFAULT));
693 #ifndef CONFIG_CPU_MIPSR6
694 #define _StoreW(addr, value, res, type) \
695 __asm__ __volatile__ ( \
696 "1:\t"type##_swl("%1", "3(%2)")"\n" \
697 "2:\t"type##_swr("%1", "(%2)")"\n\t"\
701 ".section\t.fixup,\"ax\"\n\t" \
702 "4:\tli\t%0, %3\n\t" \
705 ".section\t__ex_table,\"a\"\n\t" \
706 STR(PTR)"\t1b, 4b\n\t" \
707 STR(PTR)"\t2b, 4b\n\t" \
710 : "r" (value), "r" (addr), "i" (-EFAULT));
712 #define _StoreDW(addr, value, res) \
713 __asm__ __volatile__ ( \
714 "1:\tsdl\t%1, 7(%2)\n" \
715 "2:\tsdr\t%1, (%2)\n\t" \
719 ".section\t.fixup,\"ax\"\n\t" \
720 "4:\tli\t%0, %3\n\t" \
723 ".section\t__ex_table,\"a\"\n\t" \
724 STR(PTR)"\t1b, 4b\n\t" \
725 STR(PTR)"\t2b, 4b\n\t" \
728 : "r" (value), "r" (addr), "i" (-EFAULT));
730 /* MIPSR6 has no swl and sdl instructions */
731 #define _StoreW(addr, value, res, type) \
732 __asm__ __volatile__ ( \
735 "1:"type##_sb("%1", "0(%2)")"\n\t" \
736 "srl\t$1, %1, 0x8\n\t" \
737 "2:"type##_sb("$1", "1(%2)")"\n\t" \
738 "srl\t$1, $1, 0x8\n\t" \
739 "3:"type##_sb("$1", "2(%2)")"\n\t" \
740 "srl\t$1, $1, 0x8\n\t" \
741 "4:"type##_sb("$1", "3(%2)")"\n\t" \
746 ".section\t.fixup,\"ax\"\n\t" \
747 "11:\tli\t%0, %3\n\t" \
750 ".section\t__ex_table,\"a\"\n\t" \
751 STR(PTR)"\t1b, 11b\n\t" \
752 STR(PTR)"\t2b, 11b\n\t" \
753 STR(PTR)"\t3b, 11b\n\t" \
754 STR(PTR)"\t4b, 11b\n\t" \
757 : "r" (value), "r" (addr), "i" (-EFAULT) \
760 #define _StoreDW(addr, value, res) \
761 __asm__ __volatile__ ( \
764 "1:sb\t%1, 0(%2)\n\t" \
765 "dsrl\t$1, %1, 0x8\n\t" \
766 "2:sb\t$1, 1(%2)\n\t" \
767 "dsrl\t$1, $1, 0x8\n\t" \
768 "3:sb\t$1, 2(%2)\n\t" \
769 "dsrl\t$1, $1, 0x8\n\t" \
770 "4:sb\t$1, 3(%2)\n\t" \
771 "dsrl\t$1, $1, 0x8\n\t" \
772 "5:sb\t$1, 4(%2)\n\t" \
773 "dsrl\t$1, $1, 0x8\n\t" \
774 "6:sb\t$1, 5(%2)\n\t" \
775 "dsrl\t$1, $1, 0x8\n\t" \
776 "7:sb\t$1, 6(%2)\n\t" \
777 "dsrl\t$1, $1, 0x8\n\t" \
778 "8:sb\t$1, 7(%2)\n\t" \
779 "dsrl\t$1, $1, 0x8\n\t" \
784 ".section\t.fixup,\"ax\"\n\t" \
785 "11:\tli\t%0, %3\n\t" \
788 ".section\t__ex_table,\"a\"\n\t" \
789 STR(PTR)"\t1b, 11b\n\t" \
790 STR(PTR)"\t2b, 11b\n\t" \
791 STR(PTR)"\t3b, 11b\n\t" \
792 STR(PTR)"\t4b, 11b\n\t" \
793 STR(PTR)"\t5b, 11b\n\t" \
794 STR(PTR)"\t6b, 11b\n\t" \
795 STR(PTR)"\t7b, 11b\n\t" \
796 STR(PTR)"\t8b, 11b\n\t" \
799 : "r" (value), "r" (addr), "i" (-EFAULT) \
801 #endif /* CONFIG_CPU_MIPSR6 */
804 #define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)
805 #define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user)
806 #define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel)
807 #define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user)
808 #define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel)
809 #define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user)
810 #define LoadW(addr, value, res) _LoadW(addr, value, res, kernel)
811 #define LoadWE(addr, value, res) _LoadW(addr, value, res, user)
812 #define LoadDW(addr, value, res) _LoadDW(addr, value, res)
814 #define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel)
815 #define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user)
816 #define StoreW(addr, value, res) _StoreW(addr, value, res, kernel)
817 #define StoreWE(addr, value, res) _StoreW(addr, value, res, user)
818 #define StoreDW(addr, value, res) _StoreDW(addr, value, res)
820 static void emulate_load_store_insn(struct pt_regs *regs,
821 void __user *addr, unsigned int __user *pc)
823 union mips_instruction insn;
826 unsigned long origpc;
827 unsigned long orig31;
828 void __user *fault_addr = NULL;
832 origpc = (unsigned long)pc;
833 orig31 = regs->regs[31];
835 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
838 * This load never faults.
840 __get_user(insn.word, pc);
842 switch (insn.i_format.opcode) {
844 * These are instructions that a compiler doesn't generate. We
845 * can assume therefore that the code is MIPS-aware and
846 * really buggy. Emulating these instructions would break the
855 * For these instructions the only way to create an address
856 * error is an attempted access to kernel/supervisor address
873 * The remaining opcodes are the ones that are really of
879 * we can land here only from kernel accessing user memory,
880 * so we need to "switch" the address limit to user space, so
881 * address check can work properly.
885 switch (insn.spec3_format.func) {
887 if (!access_ok(VERIFY_READ, addr, 2)) {
891 LoadHWE(addr, value, res);
896 compute_return_epc(regs);
897 regs->regs[insn.spec3_format.rt] = value;
900 if (!access_ok(VERIFY_READ, addr, 4)) {
904 LoadWE(addr, value, res);
909 compute_return_epc(regs);
910 regs->regs[insn.spec3_format.rt] = value;
913 if (!access_ok(VERIFY_READ, addr, 2)) {
917 LoadHWUE(addr, value, res);
922 compute_return_epc(regs);
923 regs->regs[insn.spec3_format.rt] = value;
926 if (!access_ok(VERIFY_WRITE, addr, 2)) {
930 compute_return_epc(regs);
931 value = regs->regs[insn.spec3_format.rt];
932 StoreHWE(addr, value, res);
939 if (!access_ok(VERIFY_WRITE, addr, 4)) {
943 compute_return_epc(regs);
944 value = regs->regs[insn.spec3_format.rt];
945 StoreWE(addr, value, res);
959 if (!access_ok(VERIFY_READ, addr, 2))
962 LoadHW(addr, value, res);
965 compute_return_epc(regs);
966 regs->regs[insn.i_format.rt] = value;
970 if (!access_ok(VERIFY_READ, addr, 4))
973 LoadW(addr, value, res);
976 compute_return_epc(regs);
977 regs->regs[insn.i_format.rt] = value;
981 if (!access_ok(VERIFY_READ, addr, 2))
984 LoadHWU(addr, value, res);
987 compute_return_epc(regs);
988 regs->regs[insn.i_format.rt] = value;
994 * A 32-bit kernel might be running on a 64-bit processor. But
995 * if we're on a 32-bit processor and an i-cache incoherency
996 * or race makes us see a 64-bit instruction here the sdl/sdr
997 * would blow up, so for now we don't handle unaligned 64-bit
998 * instructions on 32-bit kernels.
1000 if (!access_ok(VERIFY_READ, addr, 4))
1003 LoadWU(addr, value, res);
1006 compute_return_epc(regs);
1007 regs->regs[insn.i_format.rt] = value;
1009 #endif /* CONFIG_64BIT */
1011 /* Cannot handle 64-bit instructions in 32-bit kernel */
1017 * A 32-bit kernel might be running on a 64-bit processor. But
1018 * if we're on a 32-bit processor and an i-cache incoherency
1019 * or race makes us see a 64-bit instruction here the sdl/sdr
1020 * would blow up, so for now we don't handle unaligned 64-bit
1021 * instructions on 32-bit kernels.
1023 if (!access_ok(VERIFY_READ, addr, 8))
1026 LoadDW(addr, value, res);
1029 compute_return_epc(regs);
1030 regs->regs[insn.i_format.rt] = value;
1032 #endif /* CONFIG_64BIT */
1034 /* Cannot handle 64-bit instructions in 32-bit kernel */
1038 if (!access_ok(VERIFY_WRITE, addr, 2))
1041 compute_return_epc(regs);
1042 value = regs->regs[insn.i_format.rt];
1043 StoreHW(addr, value, res);
1049 if (!access_ok(VERIFY_WRITE, addr, 4))
1052 compute_return_epc(regs);
1053 value = regs->regs[insn.i_format.rt];
1054 StoreW(addr, value, res);
1062 * A 32-bit kernel might be running on a 64-bit processor. But
1063 * if we're on a 32-bit processor and an i-cache incoherency
1064 * or race makes us see a 64-bit instruction here the sdl/sdr
1065 * would blow up, so for now we don't handle unaligned 64-bit
1066 * instructions on 32-bit kernels.
1068 if (!access_ok(VERIFY_WRITE, addr, 8))
1071 compute_return_epc(regs);
1072 value = regs->regs[insn.i_format.rt];
1073 StoreDW(addr, value, res);
1077 #endif /* CONFIG_64BIT */
1079 /* Cannot handle 64-bit instructions in 32-bit kernel */
1086 die_if_kernel("Unaligned FP access in kernel code", regs);
1087 BUG_ON(!used_math());
1089 lose_fpu(1); /* Save FPU state for the emulator. */
1090 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
1092 own_fpu(1); /* Restore FPU state. */
1094 /* Signal if something went wrong. */
1095 process_fpemu_return(res, fault_addr);
1101 #ifndef CONFIG_CPU_MIPSR6
1103 * COP2 is available to implementor for application specific use.
1104 * It's up to applications to register a notifier chain and do
1105 * whatever they have to do, including possible sending of signals.
1107 * This instruction has been reallocated in Release 6
1110 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
1114 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
1118 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
1122 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
1127 * Pheeee... We encountered an yet unknown instruction or
1128 * cache coherence problem. Die sucker, die ...
1133 #ifdef CONFIG_DEBUG_FS
1134 unaligned_instructions++;
1140 /* roll back jump/branch */
1141 regs->cp0_epc = origpc;
1142 regs->regs[31] = orig31;
1143 /* Did we have an exception handler installed? */
1144 if (fixup_exception(regs))
1147 die_if_kernel("Unhandled kernel unaligned access", regs);
1148 force_sig(SIGSEGV, current);
1153 die_if_kernel("Unhandled kernel unaligned access", regs);
1154 force_sig(SIGBUS, current);
1160 ("Unhandled kernel unaligned access or invalid instruction", regs);
1161 force_sig(SIGILL, current);
1164 /* Recode table from 16-bit register notation to 32-bit GPR. */
1165 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
1167 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
1168 const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
1170 static void emulate_load_store_microMIPS(struct pt_regs *regs,
1173 unsigned long value;
1176 unsigned int reg = 0, rvar;
1177 unsigned long orig31;
1181 unsigned long origpc, contpc;
1182 union mips_instruction insn;
1183 struct mm_decoded_insn mminsn;
1184 void __user *fault_addr = NULL;
1186 origpc = regs->cp0_epc;
1187 orig31 = regs->regs[31];
1189 mminsn.micro_mips_mode = 1;
1192 * This load never faults.
1194 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
1195 __get_user(halfword, pc16);
1197 contpc = regs->cp0_epc + 2;
1198 word = ((unsigned int)halfword << 16);
1201 if (!mm_insn_16bit(halfword)) {
1202 __get_user(halfword, pc16);
1204 contpc = regs->cp0_epc + 4;
1210 if (get_user(halfword, pc16))
1212 mminsn.next_pc_inc = 2;
1213 word = ((unsigned int)halfword << 16);
1215 if (!mm_insn_16bit(halfword)) {
1217 if (get_user(halfword, pc16))
1219 mminsn.next_pc_inc = 4;
1222 mminsn.next_insn = word;
1224 insn = (union mips_instruction)(mminsn.insn);
1225 if (mm_isBranchInstr(regs, mminsn, &contpc))
1226 insn = (union mips_instruction)(mminsn.next_insn);
1228 /* Parse instruction to find what to do */
1230 switch (insn.mm_i_format.opcode) {
1233 switch (insn.mm_x_format.func) {
1235 reg = insn.mm_x_format.rd;
1242 switch (insn.mm_m_format.func) {
1244 reg = insn.mm_m_format.rd;
1248 if (!access_ok(VERIFY_READ, addr, 8))
1251 LoadW(addr, value, res);
1254 regs->regs[reg] = value;
1256 LoadW(addr, value, res);
1259 regs->regs[reg + 1] = value;
1263 reg = insn.mm_m_format.rd;
1267 if (!access_ok(VERIFY_WRITE, addr, 8))
1270 value = regs->regs[reg];
1271 StoreW(addr, value, res);
1275 value = regs->regs[reg + 1];
1276 StoreW(addr, value, res);
1283 reg = insn.mm_m_format.rd;
1287 if (!access_ok(VERIFY_READ, addr, 16))
1290 LoadDW(addr, value, res);
1293 regs->regs[reg] = value;
1295 LoadDW(addr, value, res);
1298 regs->regs[reg + 1] = value;
1300 #endif /* CONFIG_64BIT */
1306 reg = insn.mm_m_format.rd;
1310 if (!access_ok(VERIFY_WRITE, addr, 16))
1313 value = regs->regs[reg];
1314 StoreDW(addr, value, res);
1318 value = regs->regs[reg + 1];
1319 StoreDW(addr, value, res);
1323 #endif /* CONFIG_64BIT */
1328 reg = insn.mm_m_format.rd;
1330 if ((rvar > 9) || !reg)
1334 (VERIFY_READ, addr, 4 * (rvar + 1)))
1337 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1342 for (i = 16; rvar; rvar--, i++) {
1343 LoadW(addr, value, res);
1347 regs->regs[i] = value;
1349 if ((reg & 0xf) == 9) {
1350 LoadW(addr, value, res);
1354 regs->regs[30] = value;
1357 LoadW(addr, value, res);
1360 regs->regs[31] = value;
1365 reg = insn.mm_m_format.rd;
1367 if ((rvar > 9) || !reg)
1371 (VERIFY_WRITE, addr, 4 * (rvar + 1)))
1374 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1379 for (i = 16; rvar; rvar--, i++) {
1380 value = regs->regs[i];
1381 StoreW(addr, value, res);
1386 if ((reg & 0xf) == 9) {
1387 value = regs->regs[30];
1388 StoreW(addr, value, res);
1394 value = regs->regs[31];
1395 StoreW(addr, value, res);
1403 reg = insn.mm_m_format.rd;
1405 if ((rvar > 9) || !reg)
1409 (VERIFY_READ, addr, 8 * (rvar + 1)))
1412 if (!access_ok(VERIFY_READ, addr, 8 * rvar))
1418 for (i = 16; rvar; rvar--, i++) {
1419 LoadDW(addr, value, res);
1423 regs->regs[i] = value;
1425 if ((reg & 0xf) == 9) {
1426 LoadDW(addr, value, res);
1430 regs->regs[30] = value;
1433 LoadDW(addr, value, res);
1436 regs->regs[31] = value;
1439 #endif /* CONFIG_64BIT */
1445 reg = insn.mm_m_format.rd;
1447 if ((rvar > 9) || !reg)
1451 (VERIFY_WRITE, addr, 8 * (rvar + 1)))
1454 if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
1460 for (i = 16; rvar; rvar--, i++) {
1461 value = regs->regs[i];
1462 StoreDW(addr, value, res);
1467 if ((reg & 0xf) == 9) {
1468 value = regs->regs[30];
1469 StoreDW(addr, value, res);
1475 value = regs->regs[31];
1476 StoreDW(addr, value, res);
1481 #endif /* CONFIG_64BIT */
1485 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
1491 switch (insn.mm_m_format.func) {
1493 reg = insn.mm_m_format.rd;
1497 /* LL,SC,LLD,SCD are not serviced */
1501 switch (insn.mm_x_format.func) {
1516 /* roll back jump/branch */
1517 regs->cp0_epc = origpc;
1518 regs->regs[31] = orig31;
1520 die_if_kernel("Unaligned FP access in kernel code", regs);
1521 BUG_ON(!used_math());
1522 BUG_ON(!is_fpu_owner());
1524 lose_fpu(1); /* save the FPU state for the emulator */
1525 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
1527 own_fpu(1); /* restore FPU state */
1529 /* If something went wrong, signal */
1530 process_fpemu_return(res, fault_addr);
1537 reg = insn.mm_i_format.rt;
1541 reg = insn.mm_i_format.rt;
1545 reg = insn.mm_i_format.rt;
1549 reg = insn.mm_i_format.rt;
1553 reg = insn.mm_i_format.rt;
1557 reg = insn.mm_i_format.rt;
1561 reg = insn.mm_i_format.rt;
1565 switch (insn.mm16_m_format.func) {
1567 reg = insn.mm16_m_format.rlist;
1569 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1572 for (i = 16; rvar; rvar--, i++) {
1573 LoadW(addr, value, res);
1577 regs->regs[i] = value;
1579 LoadW(addr, value, res);
1582 regs->regs[31] = value;
1587 reg = insn.mm16_m_format.rlist;
1589 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1592 for (i = 16; rvar; rvar--, i++) {
1593 value = regs->regs[i];
1594 StoreW(addr, value, res);
1599 value = regs->regs[31];
1600 StoreW(addr, value, res);
1611 reg = reg16to32[insn.mm16_rb_format.rt];
1615 reg = reg16to32[insn.mm16_rb_format.rt];
1619 reg = reg16to32st[insn.mm16_rb_format.rt];
1623 reg = reg16to32st[insn.mm16_rb_format.rt];
1627 reg = insn.mm16_r5_format.rt;
1631 reg = insn.mm16_r5_format.rt;
1635 reg = reg16to32[insn.mm16_r3_format.rt];
1643 if (!access_ok(VERIFY_READ, addr, 2))
1646 LoadHW(addr, value, res);
1649 regs->regs[reg] = value;
1653 if (!access_ok(VERIFY_READ, addr, 2))
1656 LoadHWU(addr, value, res);
1659 regs->regs[reg] = value;
1663 if (!access_ok(VERIFY_READ, addr, 4))
1666 LoadW(addr, value, res);
1669 regs->regs[reg] = value;
1675 * A 32-bit kernel might be running on a 64-bit processor. But
1676 * if we're on a 32-bit processor and an i-cache incoherency
1677 * or race makes us see a 64-bit instruction here the sdl/sdr
1678 * would blow up, so for now we don't handle unaligned 64-bit
1679 * instructions on 32-bit kernels.
1681 if (!access_ok(VERIFY_READ, addr, 4))
1684 LoadWU(addr, value, res);
1687 regs->regs[reg] = value;
1689 #endif /* CONFIG_64BIT */
1691 /* Cannot handle 64-bit instructions in 32-bit kernel */
1697 * A 32-bit kernel might be running on a 64-bit processor. But
1698 * if we're on a 32-bit processor and an i-cache incoherency
1699 * or race makes us see a 64-bit instruction here the sdl/sdr
1700 * would blow up, so for now we don't handle unaligned 64-bit
1701 * instructions on 32-bit kernels.
1703 if (!access_ok(VERIFY_READ, addr, 8))
1706 LoadDW(addr, value, res);
1709 regs->regs[reg] = value;
1711 #endif /* CONFIG_64BIT */
1713 /* Cannot handle 64-bit instructions in 32-bit kernel */
1717 if (!access_ok(VERIFY_WRITE, addr, 2))
1720 value = regs->regs[reg];
1721 StoreHW(addr, value, res);
1727 if (!access_ok(VERIFY_WRITE, addr, 4))
1730 value = regs->regs[reg];
1731 StoreW(addr, value, res);
1739 * A 32-bit kernel might be running on a 64-bit processor. But
1740 * if we're on a 32-bit processor and an i-cache incoherency
1741 * or race makes us see a 64-bit instruction here the sdl/sdr
1742 * would blow up, so for now we don't handle unaligned 64-bit
1743 * instructions on 32-bit kernels.
1745 if (!access_ok(VERIFY_WRITE, addr, 8))
1748 value = regs->regs[reg];
1749 StoreDW(addr, value, res);
1753 #endif /* CONFIG_64BIT */
1755 /* Cannot handle 64-bit instructions in 32-bit kernel */
1759 regs->cp0_epc = contpc; /* advance or branch */
1761 #ifdef CONFIG_DEBUG_FS
1762 unaligned_instructions++;
1767 /* roll back jump/branch */
1768 regs->cp0_epc = origpc;
1769 regs->regs[31] = orig31;
1770 /* Did we have an exception handler installed? */
1771 if (fixup_exception(regs))
1774 die_if_kernel("Unhandled kernel unaligned access", regs);
1775 force_sig(SIGSEGV, current);
1780 die_if_kernel("Unhandled kernel unaligned access", regs);
1781 force_sig(SIGBUS, current);
1787 ("Unhandled kernel unaligned access or invalid instruction", regs);
1788 force_sig(SIGILL, current);
1791 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1793 unsigned long value;
1796 unsigned long orig31;
1798 unsigned long origpc;
1799 union mips16e_instruction mips16inst, oldinst;
1801 origpc = regs->cp0_epc;
1802 orig31 = regs->regs[31];
1803 pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1805 * This load never faults.
1807 __get_user(mips16inst.full, pc16);
1808 oldinst = mips16inst;
1810 /* skip EXTEND instruction */
1811 if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1813 __get_user(mips16inst.full, pc16);
1814 } else if (delay_slot(regs)) {
1815 /* skip jump instructions */
1816 /* JAL/JALX are 32 bits but have OPCODE in first short int */
1817 if (mips16inst.ri.opcode == MIPS16e_jal_op)
1820 if (get_user(mips16inst.full, pc16))
1824 switch (mips16inst.ri.opcode) {
1825 case MIPS16e_i64_op: /* I64 or RI64 instruction */
1826 switch (mips16inst.i64.func) { /* I64/RI64 func field check */
1827 case MIPS16e_ldpc_func:
1828 case MIPS16e_ldsp_func:
1829 reg = reg16to32[mips16inst.ri64.ry];
1832 case MIPS16e_sdsp_func:
1833 reg = reg16to32[mips16inst.ri64.ry];
1836 case MIPS16e_sdrasp_func:
1837 reg = 29; /* GPRSP */
1843 case MIPS16e_swsp_op:
1844 case MIPS16e_lwpc_op:
1845 case MIPS16e_lwsp_op:
1846 reg = reg16to32[mips16inst.ri.rx];
1850 if (mips16inst.i8.func != MIPS16e_swrasp_func)
1852 reg = 29; /* GPRSP */
1856 reg = reg16to32[mips16inst.rri.ry];
1860 switch (mips16inst.ri.opcode) {
1863 case MIPS16e_lbu_op:
1868 if (!access_ok(VERIFY_READ, addr, 2))
1871 LoadHW(addr, value, res);
1874 MIPS16e_compute_return_epc(regs, &oldinst);
1875 regs->regs[reg] = value;
1878 case MIPS16e_lhu_op:
1879 if (!access_ok(VERIFY_READ, addr, 2))
1882 LoadHWU(addr, value, res);
1885 MIPS16e_compute_return_epc(regs, &oldinst);
1886 regs->regs[reg] = value;
1890 case MIPS16e_lwpc_op:
1891 case MIPS16e_lwsp_op:
1892 if (!access_ok(VERIFY_READ, addr, 4))
1895 LoadW(addr, value, res);
1898 MIPS16e_compute_return_epc(regs, &oldinst);
1899 regs->regs[reg] = value;
1902 case MIPS16e_lwu_op:
1905 * A 32-bit kernel might be running on a 64-bit processor. But
1906 * if we're on a 32-bit processor and an i-cache incoherency
1907 * or race makes us see a 64-bit instruction here the sdl/sdr
1908 * would blow up, so for now we don't handle unaligned 64-bit
1909 * instructions on 32-bit kernels.
1911 if (!access_ok(VERIFY_READ, addr, 4))
1914 LoadWU(addr, value, res);
1917 MIPS16e_compute_return_epc(regs, &oldinst);
1918 regs->regs[reg] = value;
1920 #endif /* CONFIG_64BIT */
1922 /* Cannot handle 64-bit instructions in 32-bit kernel */
1929 * A 32-bit kernel might be running on a 64-bit processor. But
1930 * if we're on a 32-bit processor and an i-cache incoherency
1931 * or race makes us see a 64-bit instruction here the sdl/sdr
1932 * would blow up, so for now we don't handle unaligned 64-bit
1933 * instructions on 32-bit kernels.
1935 if (!access_ok(VERIFY_READ, addr, 8))
1938 LoadDW(addr, value, res);
1941 MIPS16e_compute_return_epc(regs, &oldinst);
1942 regs->regs[reg] = value;
1944 #endif /* CONFIG_64BIT */
1946 /* Cannot handle 64-bit instructions in 32-bit kernel */
1950 if (!access_ok(VERIFY_WRITE, addr, 2))
1953 MIPS16e_compute_return_epc(regs, &oldinst);
1954 value = regs->regs[reg];
1955 StoreHW(addr, value, res);
1961 case MIPS16e_swsp_op:
1962 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
1963 if (!access_ok(VERIFY_WRITE, addr, 4))
1966 MIPS16e_compute_return_epc(regs, &oldinst);
1967 value = regs->regs[reg];
1968 StoreW(addr, value, res);
1977 * A 32-bit kernel might be running on a 64-bit processor. But
1978 * if we're on a 32-bit processor and an i-cache incoherency
1979 * or race makes us see a 64-bit instruction here the sdl/sdr
1980 * would blow up, so for now we don't handle unaligned 64-bit
1981 * instructions on 32-bit kernels.
1983 if (!access_ok(VERIFY_WRITE, addr, 8))
1986 MIPS16e_compute_return_epc(regs, &oldinst);
1987 value = regs->regs[reg];
1988 StoreDW(addr, value, res);
1992 #endif /* CONFIG_64BIT */
1994 /* Cannot handle 64-bit instructions in 32-bit kernel */
1999 * Pheeee... We encountered an yet unknown instruction or
2000 * cache coherence problem. Die sucker, die ...
2005 #ifdef CONFIG_DEBUG_FS
2006 unaligned_instructions++;
2012 /* roll back jump/branch */
2013 regs->cp0_epc = origpc;
2014 regs->regs[31] = orig31;
2015 /* Did we have an exception handler installed? */
2016 if (fixup_exception(regs))
2019 die_if_kernel("Unhandled kernel unaligned access", regs);
2020 force_sig(SIGSEGV, current);
2025 die_if_kernel("Unhandled kernel unaligned access", regs);
2026 force_sig(SIGBUS, current);
2032 ("Unhandled kernel unaligned access or invalid instruction", regs);
2033 force_sig(SIGILL, current);
2036 asmlinkage void do_ade(struct pt_regs *regs)
2038 enum ctx_state prev_state;
2039 unsigned int __user *pc;
2042 prev_state = exception_enter();
2043 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
2044 1, regs, regs->cp0_badvaddr);
2046 * Did we catch a fault trying to load an instruction?
2048 if (regs->cp0_badvaddr == regs->cp0_epc)
2051 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
2053 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
2057 * Do branch emulation only if we didn't forward the exception.
2058 * This is all so but ugly ...
2062 * Are we running in microMIPS mode?
2064 if (get_isa16_mode(regs->cp0_epc)) {
2066 * Did we catch a fault trying to load an instruction in
2069 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
2071 if (unaligned_action == UNALIGNED_ACTION_SHOW)
2072 show_registers(regs);
2074 if (cpu_has_mmips) {
2076 if (!user_mode(regs))
2078 emulate_load_store_microMIPS(regs,
2079 (void __user *)regs->cp0_badvaddr);
2085 if (cpu_has_mips16) {
2087 if (!user_mode(regs))
2089 emulate_load_store_MIPS16e(regs,
2090 (void __user *)regs->cp0_badvaddr);
2099 if (unaligned_action == UNALIGNED_ACTION_SHOW)
2100 show_registers(regs);
2101 pc = (unsigned int __user *)exception_epc(regs);
2104 if (!user_mode(regs))
2106 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
2112 die_if_kernel("Kernel unaligned instruction access", regs);
2113 force_sig(SIGBUS, current);
2116 * XXX On return from the signal handler we should advance the epc
2118 exception_exit(prev_state);
2121 #ifdef CONFIG_DEBUG_FS
2122 extern struct dentry *mips_debugfs_dir;
2123 static int __init debugfs_unaligned(void)
2127 if (!mips_debugfs_dir)
2129 d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
2130 mips_debugfs_dir, &unaligned_instructions);
2133 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
2134 mips_debugfs_dir, &unaligned_action);
2139 __initcall(debugfs_unaligned);