2 * Handle unaligned accesses by emulation.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2014 Imagination Technologies Ltd.
12 * This file contains exception handler for address error exception with the
13 * special capability to execute faulting instructions in software. The
14 * handler does not try to handle the case when the program counter points
15 * to an address not aligned to a word boundary.
17 * Putting data to unaligned addresses is a bad practice even on Intel where
18 * only the performance is affected. Much worse is that such code is non-
19 * portable. Due to several programs that die on MIPS due to alignment
20 * problems I decided to implement this handler anyway though I originally
21 * didn't intend to do this at all for user code.
23 * For now I enable fixing of address errors by default to make life easier.
24 * I however intend to disable this somewhen in the future when the alignment
25 * problems with user programs have been fixed. For programmers this is the
28 * Fixing address errors is a per process option. The option is inherited
29 * across fork(2) and execve(2) calls. If you really want to use the
30 * option in your user programs - I discourage the use of the software
31 * emulation strongly - use the following code in your userland stuff:
33 * #include <sys/sysmips.h>
36 * sysmips(MIPS_FIXADE, x);
39 * The argument x is 0 for disabling software emulation, enabled otherwise.
41 * Below a little program to play around with this feature.
44 * #include <sys/sysmips.h>
47 * unsigned char bar[8];
50 * main(int argc, char *argv[])
52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
53 * unsigned int *p = (unsigned int *) (x.bar + 3);
57 * sysmips(MIPS_FIXADE, atoi(argv[1]));
59 * printf("*p = %08lx\n", *p);
63 * for(i = 0; i <= 7; i++)
64 * printf("%02x ", x.bar[i]);
68 * Coprocessor loads are not supported; I think this case is unimportant
71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
72 * exception for the R6000.
73 * A store crossing a page boundary might be executed only partially.
74 * Undo the partial store in this case.
76 #include <linux/context_tracking.h>
78 #include <linux/signal.h>
79 #include <linux/smp.h>
80 #include <linux/sched.h>
81 #include <linux/debugfs.h>
82 #include <linux/perf_event.h>
85 #include <asm/branch.h>
86 #include <asm/byteorder.h>
89 #include <asm/fpu_emulator.h>
91 #include <asm/uaccess.h>
93 #include <asm/fpu_emulator.h>
95 #define STR(x) __STR(x)
99 UNALIGNED_ACTION_QUIET,
100 UNALIGNED_ACTION_SIGNAL,
101 UNALIGNED_ACTION_SHOW,
103 #ifdef CONFIG_DEBUG_FS
104 static u32 unaligned_instructions;
105 static u32 unaligned_action;
107 #define unaligned_action UNALIGNED_ACTION_QUIET
109 extern void show_registers(struct pt_regs *regs);
112 #define LoadHW(addr, value, res) \
113 __asm__ __volatile__ (".set\tnoat\n" \
114 "1:\t"user_lb("%0", "0(%2)")"\n" \
115 "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
121 ".section\t.fixup,\"ax\"\n\t" \
122 "4:\tli\t%1, %3\n\t" \
125 ".section\t__ex_table,\"a\"\n\t" \
126 STR(PTR)"\t1b, 4b\n\t" \
127 STR(PTR)"\t2b, 4b\n\t" \
129 : "=&r" (value), "=r" (res) \
130 : "r" (addr), "i" (-EFAULT));
132 #ifndef CONFIG_CPU_MIPSR6
133 #define LoadW(addr, value, res) \
134 __asm__ __volatile__ ( \
135 "1:\t"user_lwl("%0", "(%2)")"\n" \
136 "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
140 ".section\t.fixup,\"ax\"\n\t" \
141 "4:\tli\t%1, %3\n\t" \
144 ".section\t__ex_table,\"a\"\n\t" \
145 STR(PTR)"\t1b, 4b\n\t" \
146 STR(PTR)"\t2b, 4b\n\t" \
148 : "=&r" (value), "=r" (res) \
149 : "r" (addr), "i" (-EFAULT));
151 /* MIPSR6 has no lwl instruction */
152 #define LoadW(addr, value, res) \
153 __asm__ __volatile__ ( \
156 "1:"user_lb("%0", "0(%2)")"\n\t" \
157 "2:"user_lbu("$1", "1(%2)")"\n\t" \
160 "3:"user_lbu("$1", "2(%2)")"\n\t" \
163 "4:"user_lbu("$1", "3(%2)")"\n\t" \
170 ".section\t.fixup,\"ax\"\n\t" \
171 "11:\tli\t%1, %3\n\t" \
174 ".section\t__ex_table,\"a\"\n\t" \
175 STR(PTR)"\t1b, 11b\n\t" \
176 STR(PTR)"\t2b, 11b\n\t" \
177 STR(PTR)"\t3b, 11b\n\t" \
178 STR(PTR)"\t4b, 11b\n\t" \
180 : "=&r" (value), "=r" (res) \
181 : "r" (addr), "i" (-EFAULT));
182 #endif /* CONFIG_CPU_MIPSR6 */
184 #define LoadHWU(addr, value, res) \
185 __asm__ __volatile__ ( \
187 "1:\t"user_lbu("%0", "0(%2)")"\n" \
188 "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
195 ".section\t.fixup,\"ax\"\n\t" \
196 "4:\tli\t%1, %3\n\t" \
199 ".section\t__ex_table,\"a\"\n\t" \
200 STR(PTR)"\t1b, 4b\n\t" \
201 STR(PTR)"\t2b, 4b\n\t" \
203 : "=&r" (value), "=r" (res) \
204 : "r" (addr), "i" (-EFAULT));
206 #ifndef CONFIG_CPU_MIPSR6
207 #define LoadWU(addr, value, res) \
208 __asm__ __volatile__ ( \
209 "1:\t"user_lwl("%0", "(%2)")"\n" \
210 "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
211 "dsll\t%0, %0, 32\n\t" \
212 "dsrl\t%0, %0, 32\n\t" \
216 "\t.section\t.fixup,\"ax\"\n\t" \
217 "4:\tli\t%1, %3\n\t" \
220 ".section\t__ex_table,\"a\"\n\t" \
221 STR(PTR)"\t1b, 4b\n\t" \
222 STR(PTR)"\t2b, 4b\n\t" \
224 : "=&r" (value), "=r" (res) \
225 : "r" (addr), "i" (-EFAULT));
227 #define LoadDW(addr, value, res) \
228 __asm__ __volatile__ ( \
229 "1:\tldl\t%0, (%2)\n" \
230 "2:\tldr\t%0, 7(%2)\n\t" \
234 "\t.section\t.fixup,\"ax\"\n\t" \
235 "4:\tli\t%1, %3\n\t" \
238 ".section\t__ex_table,\"a\"\n\t" \
239 STR(PTR)"\t1b, 4b\n\t" \
240 STR(PTR)"\t2b, 4b\n\t" \
242 : "=&r" (value), "=r" (res) \
243 : "r" (addr), "i" (-EFAULT));
245 /* MIPSR6 has not lwl and ldl instructions */
246 #define LoadWU(addr, value, res) \
247 __asm__ __volatile__ ( \
250 "1:"user_lbu("%0", "0(%2)")"\n\t" \
251 "2:"user_lbu("$1", "1(%2)")"\n\t" \
254 "3:"user_lbu("$1", "2(%2)")"\n\t" \
257 "4:"user_lbu("$1", "3(%2)")"\n\t" \
264 ".section\t.fixup,\"ax\"\n\t" \
265 "11:\tli\t%1, %3\n\t" \
268 ".section\t__ex_table,\"a\"\n\t" \
269 STR(PTR)"\t1b, 11b\n\t" \
270 STR(PTR)"\t2b, 11b\n\t" \
271 STR(PTR)"\t3b, 11b\n\t" \
272 STR(PTR)"\t4b, 11b\n\t" \
274 : "=&r" (value), "=r" (res) \
275 : "r" (addr), "i" (-EFAULT));
277 #define LoadDW(addr, value, res) \
278 __asm__ __volatile__ ( \
281 "1:lb\t%0, 0(%2)\n\t" \
282 "2:lbu\t $1, 1(%2)\n\t" \
283 "dsll\t%0, 0x8\n\t" \
285 "3:lbu\t$1, 2(%2)\n\t" \
286 "dsll\t%0, 0x8\n\t" \
288 "4:lbu\t$1, 3(%2)\n\t" \
289 "dsll\t%0, 0x8\n\t" \
291 "5:lbu\t$1, 4(%2)\n\t" \
292 "dsll\t%0, 0x8\n\t" \
294 "6:lbu\t$1, 5(%2)\n\t" \
295 "dsll\t%0, 0x8\n\t" \
297 "7:lbu\t$1, 6(%2)\n\t" \
298 "dsll\t%0, 0x8\n\t" \
300 "8:lbu\t$1, 7(%2)\n\t" \
301 "dsll\t%0, 0x8\n\t" \
307 ".section\t.fixup,\"ax\"\n\t" \
308 "11:\tli\t%1, %3\n\t" \
311 ".section\t__ex_table,\"a\"\n\t" \
312 STR(PTR)"\t1b, 11b\n\t" \
313 STR(PTR)"\t2b, 11b\n\t" \
314 STR(PTR)"\t3b, 11b\n\t" \
315 STR(PTR)"\t4b, 11b\n\t" \
316 STR(PTR)"\t5b, 11b\n\t" \
317 STR(PTR)"\t6b, 11b\n\t" \
318 STR(PTR)"\t7b, 11b\n\t" \
319 STR(PTR)"\t8b, 11b\n\t" \
321 : "=&r" (value), "=r" (res) \
322 : "r" (addr), "i" (-EFAULT));
323 #endif /* CONFIG_CPU_MIPSR6 */
326 #define StoreHW(addr, value, res) \
327 __asm__ __volatile__ ( \
329 "1:\t"user_sb("%1", "1(%2)")"\n" \
330 "srl\t$1, %1, 0x8\n" \
331 "2:\t"user_sb("$1", "0(%2)")"\n" \
336 ".section\t.fixup,\"ax\"\n\t" \
337 "4:\tli\t%0, %3\n\t" \
340 ".section\t__ex_table,\"a\"\n\t" \
341 STR(PTR)"\t1b, 4b\n\t" \
342 STR(PTR)"\t2b, 4b\n\t" \
345 : "r" (value), "r" (addr), "i" (-EFAULT));
347 #ifndef CONFIG_CPU_MIPSR6
348 #define StoreW(addr, value, res) \
349 __asm__ __volatile__ ( \
350 "1:\t"user_swl("%1", "(%2)")"\n" \
351 "2:\t"user_swr("%1", "3(%2)")"\n\t" \
355 ".section\t.fixup,\"ax\"\n\t" \
356 "4:\tli\t%0, %3\n\t" \
359 ".section\t__ex_table,\"a\"\n\t" \
360 STR(PTR)"\t1b, 4b\n\t" \
361 STR(PTR)"\t2b, 4b\n\t" \
364 : "r" (value), "r" (addr), "i" (-EFAULT));
366 #define StoreDW(addr, value, res) \
367 __asm__ __volatile__ ( \
368 "1:\tsdl\t%1,(%2)\n" \
369 "2:\tsdr\t%1, 7(%2)\n\t" \
373 ".section\t.fixup,\"ax\"\n\t" \
374 "4:\tli\t%0, %3\n\t" \
377 ".section\t__ex_table,\"a\"\n\t" \
378 STR(PTR)"\t1b, 4b\n\t" \
379 STR(PTR)"\t2b, 4b\n\t" \
382 : "r" (value), "r" (addr), "i" (-EFAULT));
384 /* MIPSR6 has no swl and sdl instructions */
385 #define StoreW(addr, value, res) \
386 __asm__ __volatile__ ( \
389 "1:"user_sb("%1", "3(%2)")"\n\t" \
390 "srl\t$1, %1, 0x8\n\t" \
391 "2:"user_sb("$1", "2(%2)")"\n\t" \
392 "srl\t$1, $1, 0x8\n\t" \
393 "3:"user_sb("$1", "1(%2)")"\n\t" \
394 "srl\t$1, $1, 0x8\n\t" \
395 "4:"user_sb("$1", "0(%2)")"\n\t" \
400 ".section\t.fixup,\"ax\"\n\t" \
401 "11:\tli\t%0, %3\n\t" \
404 ".section\t__ex_table,\"a\"\n\t" \
405 STR(PTR)"\t1b, 11b\n\t" \
406 STR(PTR)"\t2b, 11b\n\t" \
407 STR(PTR)"\t3b, 11b\n\t" \
408 STR(PTR)"\t4b, 11b\n\t" \
411 : "r" (value), "r" (addr), "i" (-EFAULT) \
414 #define StoreDW(addr, value, res) \
415 __asm__ __volatile__ ( \
418 "1:sb\t%1, 7(%2)\n\t" \
419 "dsrl\t$1, %1, 0x8\n\t" \
420 "2:sb\t$1, 6(%2)\n\t" \
421 "dsrl\t$1, $1, 0x8\n\t" \
422 "3:sb\t$1, 5(%2)\n\t" \
423 "dsrl\t$1, $1, 0x8\n\t" \
424 "4:sb\t$1, 4(%2)\n\t" \
425 "dsrl\t$1, $1, 0x8\n\t" \
426 "5:sb\t$1, 3(%2)\n\t" \
427 "dsrl\t$1, $1, 0x8\n\t" \
428 "6:sb\t$1, 2(%2)\n\t" \
429 "dsrl\t$1, $1, 0x8\n\t" \
430 "7:sb\t$1, 1(%2)\n\t" \
431 "dsrl\t$1, $1, 0x8\n\t" \
432 "8:sb\t$1, 0(%2)\n\t" \
433 "dsrl\t$1, $1, 0x8\n\t" \
438 ".section\t.fixup,\"ax\"\n\t" \
439 "11:\tli\t%0, %3\n\t" \
442 ".section\t__ex_table,\"a\"\n\t" \
443 STR(PTR)"\t1b, 11b\n\t" \
444 STR(PTR)"\t2b, 11b\n\t" \
445 STR(PTR)"\t3b, 11b\n\t" \
446 STR(PTR)"\t4b, 11b\n\t" \
447 STR(PTR)"\t5b, 11b\n\t" \
448 STR(PTR)"\t6b, 11b\n\t" \
449 STR(PTR)"\t7b, 11b\n\t" \
450 STR(PTR)"\t8b, 11b\n\t" \
453 : "r" (value), "r" (addr), "i" (-EFAULT) \
455 #endif /* CONFIG_CPU_MIPSR6 */
457 #else /* __BIG_ENDIAN */
459 #define LoadHW(addr, value, res) \
460 __asm__ __volatile__ (".set\tnoat\n" \
461 "1:\t"user_lb("%0", "1(%2)")"\n" \
462 "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
468 ".section\t.fixup,\"ax\"\n\t" \
469 "4:\tli\t%1, %3\n\t" \
472 ".section\t__ex_table,\"a\"\n\t" \
473 STR(PTR)"\t1b, 4b\n\t" \
474 STR(PTR)"\t2b, 4b\n\t" \
476 : "=&r" (value), "=r" (res) \
477 : "r" (addr), "i" (-EFAULT));
479 #ifndef CONFIG_CPU_MIPSR6
480 #define LoadW(addr, value, res) \
481 __asm__ __volatile__ ( \
482 "1:\t"user_lwl("%0", "3(%2)")"\n" \
483 "2:\t"user_lwr("%0", "(%2)")"\n\t" \
487 ".section\t.fixup,\"ax\"\n\t" \
488 "4:\tli\t%1, %3\n\t" \
491 ".section\t__ex_table,\"a\"\n\t" \
492 STR(PTR)"\t1b, 4b\n\t" \
493 STR(PTR)"\t2b, 4b\n\t" \
495 : "=&r" (value), "=r" (res) \
496 : "r" (addr), "i" (-EFAULT));
498 /* MIPSR6 has no lwl instruction */
499 #define LoadW(addr, value, res) \
500 __asm__ __volatile__ ( \
503 "1:"user_lb("%0", "3(%2)")"\n\t" \
504 "2:"user_lbu("$1", "2(%2)")"\n\t" \
507 "3:"user_lbu("$1", "1(%2)")"\n\t" \
510 "4:"user_lbu("$1", "0(%2)")"\n\t" \
517 ".section\t.fixup,\"ax\"\n\t" \
518 "11:\tli\t%1, %3\n\t" \
521 ".section\t__ex_table,\"a\"\n\t" \
522 STR(PTR)"\t1b, 11b\n\t" \
523 STR(PTR)"\t2b, 11b\n\t" \
524 STR(PTR)"\t3b, 11b\n\t" \
525 STR(PTR)"\t4b, 11b\n\t" \
527 : "=&r" (value), "=r" (res) \
528 : "r" (addr), "i" (-EFAULT));
529 #endif /* CONFIG_CPU_MIPSR6 */
532 #define LoadHWU(addr, value, res) \
533 __asm__ __volatile__ ( \
535 "1:\t"user_lbu("%0", "1(%2)")"\n" \
536 "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
543 ".section\t.fixup,\"ax\"\n\t" \
544 "4:\tli\t%1, %3\n\t" \
547 ".section\t__ex_table,\"a\"\n\t" \
548 STR(PTR)"\t1b, 4b\n\t" \
549 STR(PTR)"\t2b, 4b\n\t" \
551 : "=&r" (value), "=r" (res) \
552 : "r" (addr), "i" (-EFAULT));
554 #ifndef CONFIG_CPU_MIPSR6
555 #define LoadWU(addr, value, res) \
556 __asm__ __volatile__ ( \
557 "1:\t"user_lwl("%0", "3(%2)")"\n" \
558 "2:\t"user_lwr("%0", "(%2)")"\n\t" \
559 "dsll\t%0, %0, 32\n\t" \
560 "dsrl\t%0, %0, 32\n\t" \
564 "\t.section\t.fixup,\"ax\"\n\t" \
565 "4:\tli\t%1, %3\n\t" \
568 ".section\t__ex_table,\"a\"\n\t" \
569 STR(PTR)"\t1b, 4b\n\t" \
570 STR(PTR)"\t2b, 4b\n\t" \
572 : "=&r" (value), "=r" (res) \
573 : "r" (addr), "i" (-EFAULT));
575 #define LoadDW(addr, value, res) \
576 __asm__ __volatile__ ( \
577 "1:\tldl\t%0, 7(%2)\n" \
578 "2:\tldr\t%0, (%2)\n\t" \
582 "\t.section\t.fixup,\"ax\"\n\t" \
583 "4:\tli\t%1, %3\n\t" \
586 ".section\t__ex_table,\"a\"\n\t" \
587 STR(PTR)"\t1b, 4b\n\t" \
588 STR(PTR)"\t2b, 4b\n\t" \
590 : "=&r" (value), "=r" (res) \
591 : "r" (addr), "i" (-EFAULT));
593 /* MIPSR6 has not lwl and ldl instructions */
594 #define LoadWU(addr, value, res) \
595 __asm__ __volatile__ ( \
598 "1:"user_lbu("%0", "3(%2)")"\n\t" \
599 "2:"user_lbu("$1", "2(%2)")"\n\t" \
602 "3:"user_lbu("$1", "1(%2)")"\n\t" \
605 "4:"user_lbu("$1", "0(%2)")"\n\t" \
612 ".section\t.fixup,\"ax\"\n\t" \
613 "11:\tli\t%1, %3\n\t" \
616 ".section\t__ex_table,\"a\"\n\t" \
617 STR(PTR)"\t1b, 11b\n\t" \
618 STR(PTR)"\t2b, 11b\n\t" \
619 STR(PTR)"\t3b, 11b\n\t" \
620 STR(PTR)"\t4b, 11b\n\t" \
622 : "=&r" (value), "=r" (res) \
623 : "r" (addr), "i" (-EFAULT));
625 #define LoadDW(addr, value, res) \
626 __asm__ __volatile__ ( \
629 "1:lb\t%0, 7(%2)\n\t" \
630 "2:lbu\t$1, 6(%2)\n\t" \
631 "dsll\t%0, 0x8\n\t" \
633 "3:lbu\t$1, 5(%2)\n\t" \
634 "dsll\t%0, 0x8\n\t" \
636 "4:lbu\t$1, 4(%2)\n\t" \
637 "dsll\t%0, 0x8\n\t" \
639 "5:lbu\t$1, 3(%2)\n\t" \
640 "dsll\t%0, 0x8\n\t" \
642 "6:lbu\t$1, 2(%2)\n\t" \
643 "dsll\t%0, 0x8\n\t" \
645 "7:lbu\t$1, 1(%2)\n\t" \
646 "dsll\t%0, 0x8\n\t" \
648 "8:lbu\t$1, 0(%2)\n\t" \
649 "dsll\t%0, 0x8\n\t" \
655 ".section\t.fixup,\"ax\"\n\t" \
656 "11:\tli\t%1, %3\n\t" \
659 ".section\t__ex_table,\"a\"\n\t" \
660 STR(PTR)"\t1b, 11b\n\t" \
661 STR(PTR)"\t2b, 11b\n\t" \
662 STR(PTR)"\t3b, 11b\n\t" \
663 STR(PTR)"\t4b, 11b\n\t" \
664 STR(PTR)"\t5b, 11b\n\t" \
665 STR(PTR)"\t6b, 11b\n\t" \
666 STR(PTR)"\t7b, 11b\n\t" \
667 STR(PTR)"\t8b, 11b\n\t" \
669 : "=&r" (value), "=r" (res) \
670 : "r" (addr), "i" (-EFAULT));
671 #endif /* CONFIG_CPU_MIPSR6 */
673 #define StoreHW(addr, value, res) \
674 __asm__ __volatile__ ( \
676 "1:\t"user_sb("%1", "0(%2)")"\n" \
677 "srl\t$1,%1, 0x8\n" \
678 "2:\t"user_sb("$1", "1(%2)")"\n" \
683 ".section\t.fixup,\"ax\"\n\t" \
684 "4:\tli\t%0, %3\n\t" \
687 ".section\t__ex_table,\"a\"\n\t" \
688 STR(PTR)"\t1b, 4b\n\t" \
689 STR(PTR)"\t2b, 4b\n\t" \
692 : "r" (value), "r" (addr), "i" (-EFAULT));
693 #ifndef CONFIG_CPU_MIPSR6
694 #define StoreW(addr, value, res) \
695 __asm__ __volatile__ ( \
696 "1:\t"user_swl("%1", "3(%2)")"\n" \
697 "2:\t"user_swr("%1", "(%2)")"\n\t" \
701 ".section\t.fixup,\"ax\"\n\t" \
702 "4:\tli\t%0, %3\n\t" \
705 ".section\t__ex_table,\"a\"\n\t" \
706 STR(PTR)"\t1b, 4b\n\t" \
707 STR(PTR)"\t2b, 4b\n\t" \
710 : "r" (value), "r" (addr), "i" (-EFAULT));
712 #define StoreDW(addr, value, res) \
713 __asm__ __volatile__ ( \
714 "1:\tsdl\t%1, 7(%2)\n" \
715 "2:\tsdr\t%1, (%2)\n\t" \
719 ".section\t.fixup,\"ax\"\n\t" \
720 "4:\tli\t%0, %3\n\t" \
723 ".section\t__ex_table,\"a\"\n\t" \
724 STR(PTR)"\t1b, 4b\n\t" \
725 STR(PTR)"\t2b, 4b\n\t" \
728 : "r" (value), "r" (addr), "i" (-EFAULT));
730 /* MIPSR6 has no swl and sdl instructions */
731 #define StoreW(addr, value, res) \
732 __asm__ __volatile__ ( \
735 "1:"user_sb("%1", "0(%2)")"\n\t" \
736 "srl\t$1, %1, 0x8\n\t" \
737 "2:"user_sb("$1", "1(%2)")"\n\t" \
738 "srl\t$1, $1, 0x8\n\t" \
739 "3:"user_sb("$1", "2(%2)")"\n\t" \
740 "srl\t$1, $1, 0x8\n\t" \
741 "4:"user_sb("$1", "3(%2)")"\n\t" \
746 ".section\t.fixup,\"ax\"\n\t" \
747 "11:\tli\t%0, %3\n\t" \
750 ".section\t__ex_table,\"a\"\n\t" \
751 STR(PTR)"\t1b, 11b\n\t" \
752 STR(PTR)"\t2b, 11b\n\t" \
753 STR(PTR)"\t3b, 11b\n\t" \
754 STR(PTR)"\t4b, 11b\n\t" \
757 : "r" (value), "r" (addr), "i" (-EFAULT) \
760 #define StoreDW(addr, value, res) \
761 __asm__ __volatile__ ( \
764 "1:sb\t%1, 0(%2)\n\t" \
765 "dsrl\t$1, %1, 0x8\n\t" \
766 "2:sb\t$1, 1(%2)\n\t" \
767 "dsrl\t$1, $1, 0x8\n\t" \
768 "3:sb\t$1, 2(%2)\n\t" \
769 "dsrl\t$1, $1, 0x8\n\t" \
770 "4:sb\t$1, 3(%2)\n\t" \
771 "dsrl\t$1, $1, 0x8\n\t" \
772 "5:sb\t$1, 4(%2)\n\t" \
773 "dsrl\t$1, $1, 0x8\n\t" \
774 "6:sb\t$1, 5(%2)\n\t" \
775 "dsrl\t$1, $1, 0x8\n\t" \
776 "7:sb\t$1, 6(%2)\n\t" \
777 "dsrl\t$1, $1, 0x8\n\t" \
778 "8:sb\t$1, 7(%2)\n\t" \
779 "dsrl\t$1, $1, 0x8\n\t" \
784 ".section\t.fixup,\"ax\"\n\t" \
785 "11:\tli\t%0, %3\n\t" \
788 ".section\t__ex_table,\"a\"\n\t" \
789 STR(PTR)"\t1b, 11b\n\t" \
790 STR(PTR)"\t2b, 11b\n\t" \
791 STR(PTR)"\t3b, 11b\n\t" \
792 STR(PTR)"\t4b, 11b\n\t" \
793 STR(PTR)"\t5b, 11b\n\t" \
794 STR(PTR)"\t6b, 11b\n\t" \
795 STR(PTR)"\t7b, 11b\n\t" \
796 STR(PTR)"\t8b, 11b\n\t" \
799 : "r" (value), "r" (addr), "i" (-EFAULT) \
801 #endif /* CONFIG_CPU_MIPSR6 */
804 static void emulate_load_store_insn(struct pt_regs *regs,
805 void __user *addr, unsigned int __user *pc)
807 union mips_instruction insn;
810 unsigned long origpc;
811 unsigned long orig31;
812 void __user *fault_addr = NULL;
816 origpc = (unsigned long)pc;
817 orig31 = regs->regs[31];
819 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
822 * This load never faults.
824 __get_user(insn.word, pc);
826 switch (insn.i_format.opcode) {
828 * These are instructions that a compiler doesn't generate. We
829 * can assume therefore that the code is MIPS-aware and
830 * really buggy. Emulating these instructions would break the
839 * For these instructions the only way to create an address
840 * error is an attempted access to kernel/supervisor address
857 * The remaining opcodes are the ones that are really of
863 * we can land here only from kernel accessing user memory,
864 * so we need to "switch" the address limit to user space, so
865 * address check can work properly.
869 switch (insn.spec3_format.func) {
871 if (!access_ok(VERIFY_READ, addr, 2)) {
875 LoadHW(addr, value, res);
880 compute_return_epc(regs);
881 regs->regs[insn.spec3_format.rt] = value;
884 if (!access_ok(VERIFY_READ, addr, 4)) {
888 LoadW(addr, value, res);
893 compute_return_epc(regs);
894 regs->regs[insn.spec3_format.rt] = value;
897 if (!access_ok(VERIFY_READ, addr, 2)) {
901 LoadHWU(addr, value, res);
906 compute_return_epc(regs);
907 regs->regs[insn.spec3_format.rt] = value;
910 if (!access_ok(VERIFY_WRITE, addr, 2)) {
914 compute_return_epc(regs);
915 value = regs->regs[insn.spec3_format.rt];
916 StoreHW(addr, value, res);
923 if (!access_ok(VERIFY_WRITE, addr, 4)) {
927 compute_return_epc(regs);
928 value = regs->regs[insn.spec3_format.rt];
929 StoreW(addr, value, res);
943 if (!access_ok(VERIFY_READ, addr, 2))
946 LoadHW(addr, value, res);
949 compute_return_epc(regs);
950 regs->regs[insn.i_format.rt] = value;
954 if (!access_ok(VERIFY_READ, addr, 4))
957 LoadW(addr, value, res);
960 compute_return_epc(regs);
961 regs->regs[insn.i_format.rt] = value;
965 if (!access_ok(VERIFY_READ, addr, 2))
968 LoadHWU(addr, value, res);
971 compute_return_epc(regs);
972 regs->regs[insn.i_format.rt] = value;
978 * A 32-bit kernel might be running on a 64-bit processor. But
979 * if we're on a 32-bit processor and an i-cache incoherency
980 * or race makes us see a 64-bit instruction here the sdl/sdr
981 * would blow up, so for now we don't handle unaligned 64-bit
982 * instructions on 32-bit kernels.
984 if (!access_ok(VERIFY_READ, addr, 4))
987 LoadWU(addr, value, res);
990 compute_return_epc(regs);
991 regs->regs[insn.i_format.rt] = value;
993 #endif /* CONFIG_64BIT */
995 /* Cannot handle 64-bit instructions in 32-bit kernel */
1001 * A 32-bit kernel might be running on a 64-bit processor. But
1002 * if we're on a 32-bit processor and an i-cache incoherency
1003 * or race makes us see a 64-bit instruction here the sdl/sdr
1004 * would blow up, so for now we don't handle unaligned 64-bit
1005 * instructions on 32-bit kernels.
1007 if (!access_ok(VERIFY_READ, addr, 8))
1010 LoadDW(addr, value, res);
1013 compute_return_epc(regs);
1014 regs->regs[insn.i_format.rt] = value;
1016 #endif /* CONFIG_64BIT */
1018 /* Cannot handle 64-bit instructions in 32-bit kernel */
1022 if (!access_ok(VERIFY_WRITE, addr, 2))
1025 compute_return_epc(regs);
1026 value = regs->regs[insn.i_format.rt];
1027 StoreHW(addr, value, res);
1033 if (!access_ok(VERIFY_WRITE, addr, 4))
1036 compute_return_epc(regs);
1037 value = regs->regs[insn.i_format.rt];
1038 StoreW(addr, value, res);
1046 * A 32-bit kernel might be running on a 64-bit processor. But
1047 * if we're on a 32-bit processor and an i-cache incoherency
1048 * or race makes us see a 64-bit instruction here the sdl/sdr
1049 * would blow up, so for now we don't handle unaligned 64-bit
1050 * instructions on 32-bit kernels.
1052 if (!access_ok(VERIFY_WRITE, addr, 8))
1055 compute_return_epc(regs);
1056 value = regs->regs[insn.i_format.rt];
1057 StoreDW(addr, value, res);
1061 #endif /* CONFIG_64BIT */
1063 /* Cannot handle 64-bit instructions in 32-bit kernel */
1070 die_if_kernel("Unaligned FP access in kernel code", regs);
1071 BUG_ON(!used_math());
1073 lose_fpu(1); /* Save FPU state for the emulator. */
1074 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
1076 own_fpu(1); /* Restore FPU state. */
1078 /* Signal if something went wrong. */
1079 process_fpemu_return(res, fault_addr, 0);
1085 #ifndef CONFIG_CPU_MIPSR6
1087 * COP2 is available to implementor for application specific use.
1088 * It's up to applications to register a notifier chain and do
1089 * whatever they have to do, including possible sending of signals.
1091 * This instruction has been reallocated in Release 6
1094 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
1098 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
1102 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
1106 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
1111 * Pheeee... We encountered an yet unknown instruction or
1112 * cache coherence problem. Die sucker, die ...
1117 #ifdef CONFIG_DEBUG_FS
1118 unaligned_instructions++;
1124 /* roll back jump/branch */
1125 regs->cp0_epc = origpc;
1126 regs->regs[31] = orig31;
1127 /* Did we have an exception handler installed? */
1128 if (fixup_exception(regs))
1131 die_if_kernel("Unhandled kernel unaligned access", regs);
1132 force_sig(SIGSEGV, current);
1137 die_if_kernel("Unhandled kernel unaligned access", regs);
1138 force_sig(SIGBUS, current);
1144 ("Unhandled kernel unaligned access or invalid instruction", regs);
1145 force_sig(SIGILL, current);
1148 /* Recode table from 16-bit register notation to 32-bit GPR. */
1149 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
1151 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
1152 const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
1154 static void emulate_load_store_microMIPS(struct pt_regs *regs,
1157 unsigned long value;
1160 unsigned int reg = 0, rvar;
1161 unsigned long orig31;
1165 unsigned long origpc, contpc;
1166 union mips_instruction insn;
1167 struct mm_decoded_insn mminsn;
1168 void __user *fault_addr = NULL;
1170 origpc = regs->cp0_epc;
1171 orig31 = regs->regs[31];
1173 mminsn.micro_mips_mode = 1;
1176 * This load never faults.
1178 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
1179 __get_user(halfword, pc16);
1181 contpc = regs->cp0_epc + 2;
1182 word = ((unsigned int)halfword << 16);
1185 if (!mm_insn_16bit(halfword)) {
1186 __get_user(halfword, pc16);
1188 contpc = regs->cp0_epc + 4;
1194 if (get_user(halfword, pc16))
1196 mminsn.next_pc_inc = 2;
1197 word = ((unsigned int)halfword << 16);
1199 if (!mm_insn_16bit(halfword)) {
1201 if (get_user(halfword, pc16))
1203 mminsn.next_pc_inc = 4;
1206 mminsn.next_insn = word;
1208 insn = (union mips_instruction)(mminsn.insn);
1209 if (mm_isBranchInstr(regs, mminsn, &contpc))
1210 insn = (union mips_instruction)(mminsn.next_insn);
1212 /* Parse instruction to find what to do */
1214 switch (insn.mm_i_format.opcode) {
1217 switch (insn.mm_x_format.func) {
1219 reg = insn.mm_x_format.rd;
1226 switch (insn.mm_m_format.func) {
1228 reg = insn.mm_m_format.rd;
1232 if (!access_ok(VERIFY_READ, addr, 8))
1235 LoadW(addr, value, res);
1238 regs->regs[reg] = value;
1240 LoadW(addr, value, res);
1243 regs->regs[reg + 1] = value;
1247 reg = insn.mm_m_format.rd;
1251 if (!access_ok(VERIFY_WRITE, addr, 8))
1254 value = regs->regs[reg];
1255 StoreW(addr, value, res);
1259 value = regs->regs[reg + 1];
1260 StoreW(addr, value, res);
1267 reg = insn.mm_m_format.rd;
1271 if (!access_ok(VERIFY_READ, addr, 16))
1274 LoadDW(addr, value, res);
1277 regs->regs[reg] = value;
1279 LoadDW(addr, value, res);
1282 regs->regs[reg + 1] = value;
1284 #endif /* CONFIG_64BIT */
1290 reg = insn.mm_m_format.rd;
1294 if (!access_ok(VERIFY_WRITE, addr, 16))
1297 value = regs->regs[reg];
1298 StoreDW(addr, value, res);
1302 value = regs->regs[reg + 1];
1303 StoreDW(addr, value, res);
1307 #endif /* CONFIG_64BIT */
1312 reg = insn.mm_m_format.rd;
1314 if ((rvar > 9) || !reg)
1318 (VERIFY_READ, addr, 4 * (rvar + 1)))
1321 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1326 for (i = 16; rvar; rvar--, i++) {
1327 LoadW(addr, value, res);
1331 regs->regs[i] = value;
1333 if ((reg & 0xf) == 9) {
1334 LoadW(addr, value, res);
1338 regs->regs[30] = value;
1341 LoadW(addr, value, res);
1344 regs->regs[31] = value;
1349 reg = insn.mm_m_format.rd;
1351 if ((rvar > 9) || !reg)
1355 (VERIFY_WRITE, addr, 4 * (rvar + 1)))
1358 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1363 for (i = 16; rvar; rvar--, i++) {
1364 value = regs->regs[i];
1365 StoreW(addr, value, res);
1370 if ((reg & 0xf) == 9) {
1371 value = regs->regs[30];
1372 StoreW(addr, value, res);
1378 value = regs->regs[31];
1379 StoreW(addr, value, res);
1387 reg = insn.mm_m_format.rd;
1389 if ((rvar > 9) || !reg)
1393 (VERIFY_READ, addr, 8 * (rvar + 1)))
1396 if (!access_ok(VERIFY_READ, addr, 8 * rvar))
1402 for (i = 16; rvar; rvar--, i++) {
1403 LoadDW(addr, value, res);
1407 regs->regs[i] = value;
1409 if ((reg & 0xf) == 9) {
1410 LoadDW(addr, value, res);
1414 regs->regs[30] = value;
1417 LoadDW(addr, value, res);
1420 regs->regs[31] = value;
1423 #endif /* CONFIG_64BIT */
1429 reg = insn.mm_m_format.rd;
1431 if ((rvar > 9) || !reg)
1435 (VERIFY_WRITE, addr, 8 * (rvar + 1)))
1438 if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
1444 for (i = 16; rvar; rvar--, i++) {
1445 value = regs->regs[i];
1446 StoreDW(addr, value, res);
1451 if ((reg & 0xf) == 9) {
1452 value = regs->regs[30];
1453 StoreDW(addr, value, res);
1459 value = regs->regs[31];
1460 StoreDW(addr, value, res);
1465 #endif /* CONFIG_64BIT */
1469 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
1475 switch (insn.mm_m_format.func) {
1477 reg = insn.mm_m_format.rd;
1481 /* LL,SC,LLD,SCD are not serviced */
1485 switch (insn.mm_x_format.func) {
1500 /* roll back jump/branch */
1501 regs->cp0_epc = origpc;
1502 regs->regs[31] = orig31;
1504 die_if_kernel("Unaligned FP access in kernel code", regs);
1505 BUG_ON(!used_math());
1506 BUG_ON(!is_fpu_owner());
1508 lose_fpu(1); /* save the FPU state for the emulator */
1509 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
1511 own_fpu(1); /* restore FPU state */
1513 /* If something went wrong, signal */
1514 process_fpemu_return(res, fault_addr, 0);
1521 reg = insn.mm_i_format.rt;
1525 reg = insn.mm_i_format.rt;
1529 reg = insn.mm_i_format.rt;
1533 reg = insn.mm_i_format.rt;
1537 reg = insn.mm_i_format.rt;
1541 reg = insn.mm_i_format.rt;
1545 reg = insn.mm_i_format.rt;
1549 switch (insn.mm16_m_format.func) {
1551 reg = insn.mm16_m_format.rlist;
1553 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1556 for (i = 16; rvar; rvar--, i++) {
1557 LoadW(addr, value, res);
1561 regs->regs[i] = value;
1563 LoadW(addr, value, res);
1566 regs->regs[31] = value;
1571 reg = insn.mm16_m_format.rlist;
1573 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1576 for (i = 16; rvar; rvar--, i++) {
1577 value = regs->regs[i];
1578 StoreW(addr, value, res);
1583 value = regs->regs[31];
1584 StoreW(addr, value, res);
1595 reg = reg16to32[insn.mm16_rb_format.rt];
1599 reg = reg16to32[insn.mm16_rb_format.rt];
1603 reg = reg16to32st[insn.mm16_rb_format.rt];
1607 reg = reg16to32st[insn.mm16_rb_format.rt];
1611 reg = insn.mm16_r5_format.rt;
1615 reg = insn.mm16_r5_format.rt;
1619 reg = reg16to32[insn.mm16_r3_format.rt];
1627 if (!access_ok(VERIFY_READ, addr, 2))
1630 LoadHW(addr, value, res);
1633 regs->regs[reg] = value;
1637 if (!access_ok(VERIFY_READ, addr, 2))
1640 LoadHWU(addr, value, res);
1643 regs->regs[reg] = value;
1647 if (!access_ok(VERIFY_READ, addr, 4))
1650 LoadW(addr, value, res);
1653 regs->regs[reg] = value;
1659 * A 32-bit kernel might be running on a 64-bit processor. But
1660 * if we're on a 32-bit processor and an i-cache incoherency
1661 * or race makes us see a 64-bit instruction here the sdl/sdr
1662 * would blow up, so for now we don't handle unaligned 64-bit
1663 * instructions on 32-bit kernels.
1665 if (!access_ok(VERIFY_READ, addr, 4))
1668 LoadWU(addr, value, res);
1671 regs->regs[reg] = value;
1673 #endif /* CONFIG_64BIT */
1675 /* Cannot handle 64-bit instructions in 32-bit kernel */
1681 * A 32-bit kernel might be running on a 64-bit processor. But
1682 * if we're on a 32-bit processor and an i-cache incoherency
1683 * or race makes us see a 64-bit instruction here the sdl/sdr
1684 * would blow up, so for now we don't handle unaligned 64-bit
1685 * instructions on 32-bit kernels.
1687 if (!access_ok(VERIFY_READ, addr, 8))
1690 LoadDW(addr, value, res);
1693 regs->regs[reg] = value;
1695 #endif /* CONFIG_64BIT */
1697 /* Cannot handle 64-bit instructions in 32-bit kernel */
1701 if (!access_ok(VERIFY_WRITE, addr, 2))
1704 value = regs->regs[reg];
1705 StoreHW(addr, value, res);
1711 if (!access_ok(VERIFY_WRITE, addr, 4))
1714 value = regs->regs[reg];
1715 StoreW(addr, value, res);
1723 * A 32-bit kernel might be running on a 64-bit processor. But
1724 * if we're on a 32-bit processor and an i-cache incoherency
1725 * or race makes us see a 64-bit instruction here the sdl/sdr
1726 * would blow up, so for now we don't handle unaligned 64-bit
1727 * instructions on 32-bit kernels.
1729 if (!access_ok(VERIFY_WRITE, addr, 8))
1732 value = regs->regs[reg];
1733 StoreDW(addr, value, res);
1737 #endif /* CONFIG_64BIT */
1739 /* Cannot handle 64-bit instructions in 32-bit kernel */
1743 regs->cp0_epc = contpc; /* advance or branch */
1745 #ifdef CONFIG_DEBUG_FS
1746 unaligned_instructions++;
1751 /* roll back jump/branch */
1752 regs->cp0_epc = origpc;
1753 regs->regs[31] = orig31;
1754 /* Did we have an exception handler installed? */
1755 if (fixup_exception(regs))
1758 die_if_kernel("Unhandled kernel unaligned access", regs);
1759 force_sig(SIGSEGV, current);
1764 die_if_kernel("Unhandled kernel unaligned access", regs);
1765 force_sig(SIGBUS, current);
1771 ("Unhandled kernel unaligned access or invalid instruction", regs);
1772 force_sig(SIGILL, current);
1775 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1777 unsigned long value;
1780 unsigned long orig31;
1782 unsigned long origpc;
1783 union mips16e_instruction mips16inst, oldinst;
1785 origpc = regs->cp0_epc;
1786 orig31 = regs->regs[31];
1787 pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1789 * This load never faults.
1791 __get_user(mips16inst.full, pc16);
1792 oldinst = mips16inst;
1794 /* skip EXTEND instruction */
1795 if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1797 __get_user(mips16inst.full, pc16);
1798 } else if (delay_slot(regs)) {
1799 /* skip jump instructions */
1800 /* JAL/JALX are 32 bits but have OPCODE in first short int */
1801 if (mips16inst.ri.opcode == MIPS16e_jal_op)
1804 if (get_user(mips16inst.full, pc16))
1808 switch (mips16inst.ri.opcode) {
1809 case MIPS16e_i64_op: /* I64 or RI64 instruction */
1810 switch (mips16inst.i64.func) { /* I64/RI64 func field check */
1811 case MIPS16e_ldpc_func:
1812 case MIPS16e_ldsp_func:
1813 reg = reg16to32[mips16inst.ri64.ry];
1816 case MIPS16e_sdsp_func:
1817 reg = reg16to32[mips16inst.ri64.ry];
1820 case MIPS16e_sdrasp_func:
1821 reg = 29; /* GPRSP */
1827 case MIPS16e_swsp_op:
1828 case MIPS16e_lwpc_op:
1829 case MIPS16e_lwsp_op:
1830 reg = reg16to32[mips16inst.ri.rx];
1834 if (mips16inst.i8.func != MIPS16e_swrasp_func)
1836 reg = 29; /* GPRSP */
1840 reg = reg16to32[mips16inst.rri.ry];
1844 switch (mips16inst.ri.opcode) {
1847 case MIPS16e_lbu_op:
1852 if (!access_ok(VERIFY_READ, addr, 2))
1855 LoadHW(addr, value, res);
1858 MIPS16e_compute_return_epc(regs, &oldinst);
1859 regs->regs[reg] = value;
1862 case MIPS16e_lhu_op:
1863 if (!access_ok(VERIFY_READ, addr, 2))
1866 LoadHWU(addr, value, res);
1869 MIPS16e_compute_return_epc(regs, &oldinst);
1870 regs->regs[reg] = value;
1874 case MIPS16e_lwpc_op:
1875 case MIPS16e_lwsp_op:
1876 if (!access_ok(VERIFY_READ, addr, 4))
1879 LoadW(addr, value, res);
1882 MIPS16e_compute_return_epc(regs, &oldinst);
1883 regs->regs[reg] = value;
1886 case MIPS16e_lwu_op:
1889 * A 32-bit kernel might be running on a 64-bit processor. But
1890 * if we're on a 32-bit processor and an i-cache incoherency
1891 * or race makes us see a 64-bit instruction here the sdl/sdr
1892 * would blow up, so for now we don't handle unaligned 64-bit
1893 * instructions on 32-bit kernels.
1895 if (!access_ok(VERIFY_READ, addr, 4))
1898 LoadWU(addr, value, res);
1901 MIPS16e_compute_return_epc(regs, &oldinst);
1902 regs->regs[reg] = value;
1904 #endif /* CONFIG_64BIT */
1906 /* Cannot handle 64-bit instructions in 32-bit kernel */
1913 * A 32-bit kernel might be running on a 64-bit processor. But
1914 * if we're on a 32-bit processor and an i-cache incoherency
1915 * or race makes us see a 64-bit instruction here the sdl/sdr
1916 * would blow up, so for now we don't handle unaligned 64-bit
1917 * instructions on 32-bit kernels.
1919 if (!access_ok(VERIFY_READ, addr, 8))
1922 LoadDW(addr, value, res);
1925 MIPS16e_compute_return_epc(regs, &oldinst);
1926 regs->regs[reg] = value;
1928 #endif /* CONFIG_64BIT */
1930 /* Cannot handle 64-bit instructions in 32-bit kernel */
1934 if (!access_ok(VERIFY_WRITE, addr, 2))
1937 MIPS16e_compute_return_epc(regs, &oldinst);
1938 value = regs->regs[reg];
1939 StoreHW(addr, value, res);
1945 case MIPS16e_swsp_op:
1946 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
1947 if (!access_ok(VERIFY_WRITE, addr, 4))
1950 MIPS16e_compute_return_epc(regs, &oldinst);
1951 value = regs->regs[reg];
1952 StoreW(addr, value, res);
1961 * A 32-bit kernel might be running on a 64-bit processor. But
1962 * if we're on a 32-bit processor and an i-cache incoherency
1963 * or race makes us see a 64-bit instruction here the sdl/sdr
1964 * would blow up, so for now we don't handle unaligned 64-bit
1965 * instructions on 32-bit kernels.
1967 if (!access_ok(VERIFY_WRITE, addr, 8))
1970 MIPS16e_compute_return_epc(regs, &oldinst);
1971 value = regs->regs[reg];
1972 StoreDW(addr, value, res);
1976 #endif /* CONFIG_64BIT */
1978 /* Cannot handle 64-bit instructions in 32-bit kernel */
1983 * Pheeee... We encountered an yet unknown instruction or
1984 * cache coherence problem. Die sucker, die ...
1989 #ifdef CONFIG_DEBUG_FS
1990 unaligned_instructions++;
1996 /* roll back jump/branch */
1997 regs->cp0_epc = origpc;
1998 regs->regs[31] = orig31;
1999 /* Did we have an exception handler installed? */
2000 if (fixup_exception(regs))
2003 die_if_kernel("Unhandled kernel unaligned access", regs);
2004 force_sig(SIGSEGV, current);
2009 die_if_kernel("Unhandled kernel unaligned access", regs);
2010 force_sig(SIGBUS, current);
2016 ("Unhandled kernel unaligned access or invalid instruction", regs);
2017 force_sig(SIGILL, current);
2020 asmlinkage void do_ade(struct pt_regs *regs)
2022 enum ctx_state prev_state;
2023 unsigned int __user *pc;
2026 prev_state = exception_enter();
2027 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
2028 1, regs, regs->cp0_badvaddr);
2030 * Did we catch a fault trying to load an instruction?
2032 if (regs->cp0_badvaddr == regs->cp0_epc)
2035 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
2037 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
2041 * Do branch emulation only if we didn't forward the exception.
2042 * This is all so but ugly ...
2046 * Are we running in microMIPS mode?
2048 if (get_isa16_mode(regs->cp0_epc)) {
2050 * Did we catch a fault trying to load an instruction in
2053 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
2055 if (unaligned_action == UNALIGNED_ACTION_SHOW)
2056 show_registers(regs);
2058 if (cpu_has_mmips) {
2060 if (!user_mode(regs))
2062 emulate_load_store_microMIPS(regs,
2063 (void __user *)regs->cp0_badvaddr);
2069 if (cpu_has_mips16) {
2071 if (!user_mode(regs))
2073 emulate_load_store_MIPS16e(regs,
2074 (void __user *)regs->cp0_badvaddr);
2083 if (unaligned_action == UNALIGNED_ACTION_SHOW)
2084 show_registers(regs);
2085 pc = (unsigned int __user *)exception_epc(regs);
2088 if (!user_mode(regs))
2090 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
2096 die_if_kernel("Kernel unaligned instruction access", regs);
2097 force_sig(SIGBUS, current);
2100 * XXX On return from the signal handler we should advance the epc
2102 exception_exit(prev_state);
2105 #ifdef CONFIG_DEBUG_FS
2106 extern struct dentry *mips_debugfs_dir;
2107 static int __init debugfs_unaligned(void)
2111 if (!mips_debugfs_dir)
2113 d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
2114 mips_debugfs_dir, &unaligned_instructions);
2117 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
2118 mips_debugfs_dir, &unaligned_action);
2123 __initcall(debugfs_unaligned);