2 * Handle unaligned accesses by emulation.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2014 Imagination Technologies Ltd.
12 * This file contains exception handler for address error exception with the
13 * special capability to execute faulting instructions in software. The
14 * handler does not try to handle the case when the program counter points
15 * to an address not aligned to a word boundary.
17 * Putting data to unaligned addresses is a bad practice even on Intel where
18 * only the performance is affected. Much worse is that such code is non-
19 * portable. Due to several programs that die on MIPS due to alignment
20 * problems I decided to implement this handler anyway though I originally
21 * didn't intend to do this at all for user code.
23 * For now I enable fixing of address errors by default to make life easier.
24 * I however intend to disable this somewhen in the future when the alignment
25 * problems with user programs have been fixed. For programmers this is the
28 * Fixing address errors is a per process option. The option is inherited
29 * across fork(2) and execve(2) calls. If you really want to use the
30 * option in your user programs - I discourage the use of the software
31 * emulation strongly - use the following code in your userland stuff:
33 * #include <sys/sysmips.h>
36 * sysmips(MIPS_FIXADE, x);
39 * The argument x is 0 for disabling software emulation, enabled otherwise.
41 * Below a little program to play around with this feature.
44 * #include <sys/sysmips.h>
47 * unsigned char bar[8];
50 * main(int argc, char *argv[])
52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
53 * unsigned int *p = (unsigned int *) (x.bar + 3);
57 * sysmips(MIPS_FIXADE, atoi(argv[1]));
59 * printf("*p = %08lx\n", *p);
63 * for(i = 0; i <= 7; i++)
64 * printf("%02x ", x.bar[i]);
68 * Coprocessor loads are not supported; I think this case is unimportant
71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
72 * exception for the R6000.
73 * A store crossing a page boundary might be executed only partially.
74 * Undo the partial store in this case.
76 #include <linux/context_tracking.h>
78 #include <linux/signal.h>
79 #include <linux/smp.h>
80 #include <linux/sched.h>
81 #include <linux/debugfs.h>
82 #include <linux/perf_event.h>
85 #include <asm/branch.h>
86 #include <asm/byteorder.h>
89 #include <asm/fpu_emulator.h>
91 #include <asm/uaccess.h>
93 #define STR(x) __STR(x)
97 UNALIGNED_ACTION_QUIET,
98 UNALIGNED_ACTION_SIGNAL,
99 UNALIGNED_ACTION_SHOW,
101 #ifdef CONFIG_DEBUG_FS
102 static u32 unaligned_instructions;
103 static u32 unaligned_action;
105 #define unaligned_action UNALIGNED_ACTION_QUIET
107 extern void show_registers(struct pt_regs *regs);
110 #define LoadHW(addr, value, res) \
111 __asm__ __volatile__ (".set\tnoat\n" \
112 "1:\t"user_lb("%0", "0(%2)")"\n" \
113 "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
119 ".section\t.fixup,\"ax\"\n\t" \
120 "4:\tli\t%1, %3\n\t" \
123 ".section\t__ex_table,\"a\"\n\t" \
124 STR(PTR)"\t1b, 4b\n\t" \
125 STR(PTR)"\t2b, 4b\n\t" \
127 : "=&r" (value), "=r" (res) \
128 : "r" (addr), "i" (-EFAULT));
130 #ifndef CONFIG_CPU_MIPSR6
131 #define LoadW(addr, value, res) \
132 __asm__ __volatile__ ( \
133 "1:\t"user_lwl("%0", "(%2)")"\n" \
134 "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
138 ".section\t.fixup,\"ax\"\n\t" \
139 "4:\tli\t%1, %3\n\t" \
142 ".section\t__ex_table,\"a\"\n\t" \
143 STR(PTR)"\t1b, 4b\n\t" \
144 STR(PTR)"\t2b, 4b\n\t" \
146 : "=&r" (value), "=r" (res) \
147 : "r" (addr), "i" (-EFAULT));
149 /* MIPSR6 has no lwl instruction */
150 #define LoadW(addr, value, res) \
151 __asm__ __volatile__ ( \
154 "1:"user_lb("%0", "0(%2)")"\n\t" \
155 "2:"user_lbu("$1", "1(%2)")"\n\t" \
158 "3:"user_lbu("$1", "2(%2)")"\n\t" \
161 "4:"user_lbu("$1", "3(%2)")"\n\t" \
168 ".section\t.fixup,\"ax\"\n\t" \
169 "11:\tli\t%1, %3\n\t" \
172 ".section\t__ex_table,\"a\"\n\t" \
173 STR(PTR)"\t1b, 11b\n\t" \
174 STR(PTR)"\t2b, 11b\n\t" \
175 STR(PTR)"\t3b, 11b\n\t" \
176 STR(PTR)"\t4b, 11b\n\t" \
178 : "=&r" (value), "=r" (res) \
179 : "r" (addr), "i" (-EFAULT));
180 #endif /* CONFIG_CPU_MIPSR6 */
182 #define LoadHWU(addr, value, res) \
183 __asm__ __volatile__ ( \
185 "1:\t"user_lbu("%0", "0(%2)")"\n" \
186 "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
193 ".section\t.fixup,\"ax\"\n\t" \
194 "4:\tli\t%1, %3\n\t" \
197 ".section\t__ex_table,\"a\"\n\t" \
198 STR(PTR)"\t1b, 4b\n\t" \
199 STR(PTR)"\t2b, 4b\n\t" \
201 : "=&r" (value), "=r" (res) \
202 : "r" (addr), "i" (-EFAULT));
204 #ifndef CONFIG_CPU_MIPSR6
205 #define LoadWU(addr, value, res) \
206 __asm__ __volatile__ ( \
207 "1:\t"user_lwl("%0", "(%2)")"\n" \
208 "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
209 "dsll\t%0, %0, 32\n\t" \
210 "dsrl\t%0, %0, 32\n\t" \
214 "\t.section\t.fixup,\"ax\"\n\t" \
215 "4:\tli\t%1, %3\n\t" \
218 ".section\t__ex_table,\"a\"\n\t" \
219 STR(PTR)"\t1b, 4b\n\t" \
220 STR(PTR)"\t2b, 4b\n\t" \
222 : "=&r" (value), "=r" (res) \
223 : "r" (addr), "i" (-EFAULT));
225 #define LoadDW(addr, value, res) \
226 __asm__ __volatile__ ( \
227 "1:\tldl\t%0, (%2)\n" \
228 "2:\tldr\t%0, 7(%2)\n\t" \
232 "\t.section\t.fixup,\"ax\"\n\t" \
233 "4:\tli\t%1, %3\n\t" \
236 ".section\t__ex_table,\"a\"\n\t" \
237 STR(PTR)"\t1b, 4b\n\t" \
238 STR(PTR)"\t2b, 4b\n\t" \
240 : "=&r" (value), "=r" (res) \
241 : "r" (addr), "i" (-EFAULT));
243 /* MIPSR6 has not lwl and ldl instructions */
244 #define LoadWU(addr, value, res) \
245 __asm__ __volatile__ ( \
248 "1:"user_lbu("%0", "0(%2)")"\n\t" \
249 "2:"user_lbu("$1", "1(%2)")"\n\t" \
252 "3:"user_lbu("$1", "2(%2)")"\n\t" \
255 "4:"user_lbu("$1", "3(%2)")"\n\t" \
262 ".section\t.fixup,\"ax\"\n\t" \
263 "11:\tli\t%1, %3\n\t" \
266 ".section\t__ex_table,\"a\"\n\t" \
267 STR(PTR)"\t1b, 11b\n\t" \
268 STR(PTR)"\t2b, 11b\n\t" \
269 STR(PTR)"\t3b, 11b\n\t" \
270 STR(PTR)"\t4b, 11b\n\t" \
272 : "=&r" (value), "=r" (res) \
273 : "r" (addr), "i" (-EFAULT));
275 #define LoadDW(addr, value, res) \
276 __asm__ __volatile__ ( \
279 "1:lb\t%0, 0(%2)\n\t" \
280 "2:lbu\t $1, 1(%2)\n\t" \
281 "dsll\t%0, 0x8\n\t" \
283 "3:lbu\t$1, 2(%2)\n\t" \
284 "dsll\t%0, 0x8\n\t" \
286 "4:lbu\t$1, 3(%2)\n\t" \
287 "dsll\t%0, 0x8\n\t" \
289 "5:lbu\t$1, 4(%2)\n\t" \
290 "dsll\t%0, 0x8\n\t" \
292 "6:lbu\t$1, 5(%2)\n\t" \
293 "dsll\t%0, 0x8\n\t" \
295 "7:lbu\t$1, 6(%2)\n\t" \
296 "dsll\t%0, 0x8\n\t" \
298 "8:lbu\t$1, 7(%2)\n\t" \
299 "dsll\t%0, 0x8\n\t" \
305 ".section\t.fixup,\"ax\"\n\t" \
306 "11:\tli\t%1, %3\n\t" \
309 ".section\t__ex_table,\"a\"\n\t" \
310 STR(PTR)"\t1b, 11b\n\t" \
311 STR(PTR)"\t2b, 11b\n\t" \
312 STR(PTR)"\t3b, 11b\n\t" \
313 STR(PTR)"\t4b, 11b\n\t" \
314 STR(PTR)"\t5b, 11b\n\t" \
315 STR(PTR)"\t6b, 11b\n\t" \
316 STR(PTR)"\t7b, 11b\n\t" \
317 STR(PTR)"\t8b, 11b\n\t" \
319 : "=&r" (value), "=r" (res) \
320 : "r" (addr), "i" (-EFAULT));
321 #endif /* CONFIG_CPU_MIPSR6 */
324 #define StoreHW(addr, value, res) \
325 __asm__ __volatile__ ( \
327 "1:\t"user_sb("%1", "1(%2)")"\n" \
328 "srl\t$1, %1, 0x8\n" \
329 "2:\t"user_sb("$1", "0(%2)")"\n" \
334 ".section\t.fixup,\"ax\"\n\t" \
335 "4:\tli\t%0, %3\n\t" \
338 ".section\t__ex_table,\"a\"\n\t" \
339 STR(PTR)"\t1b, 4b\n\t" \
340 STR(PTR)"\t2b, 4b\n\t" \
343 : "r" (value), "r" (addr), "i" (-EFAULT));
345 #ifndef CONFIG_CPU_MIPSR6
346 #define StoreW(addr, value, res) \
347 __asm__ __volatile__ ( \
348 "1:\t"user_swl("%1", "(%2)")"\n" \
349 "2:\t"user_swr("%1", "3(%2)")"\n\t" \
353 ".section\t.fixup,\"ax\"\n\t" \
354 "4:\tli\t%0, %3\n\t" \
357 ".section\t__ex_table,\"a\"\n\t" \
358 STR(PTR)"\t1b, 4b\n\t" \
359 STR(PTR)"\t2b, 4b\n\t" \
362 : "r" (value), "r" (addr), "i" (-EFAULT));
364 #define StoreDW(addr, value, res) \
365 __asm__ __volatile__ ( \
366 "1:\tsdl\t%1,(%2)\n" \
367 "2:\tsdr\t%1, 7(%2)\n\t" \
371 ".section\t.fixup,\"ax\"\n\t" \
372 "4:\tli\t%0, %3\n\t" \
375 ".section\t__ex_table,\"a\"\n\t" \
376 STR(PTR)"\t1b, 4b\n\t" \
377 STR(PTR)"\t2b, 4b\n\t" \
380 : "r" (value), "r" (addr), "i" (-EFAULT));
382 /* MIPSR6 has no swl and sdl instructions */
383 #define StoreW(addr, value, res) \
384 __asm__ __volatile__ ( \
387 "1:"user_sb("%1", "3(%2)")"\n\t" \
388 "srl\t$1, %1, 0x8\n\t" \
389 "2:"user_sb("$1", "2(%2)")"\n\t" \
390 "srl\t$1, $1, 0x8\n\t" \
391 "3:"user_sb("$1", "1(%2)")"\n\t" \
392 "srl\t$1, $1, 0x8\n\t" \
393 "4:"user_sb("$1", "0(%2)")"\n\t" \
398 ".section\t.fixup,\"ax\"\n\t" \
399 "11:\tli\t%0, %3\n\t" \
402 ".section\t__ex_table,\"a\"\n\t" \
403 STR(PTR)"\t1b, 11b\n\t" \
404 STR(PTR)"\t2b, 11b\n\t" \
405 STR(PTR)"\t3b, 11b\n\t" \
406 STR(PTR)"\t4b, 11b\n\t" \
409 : "r" (value), "r" (addr), "i" (-EFAULT) \
412 #define StoreDW(addr, value, res) \
413 __asm__ __volatile__ ( \
416 "1:sb\t%1, 7(%2)\n\t" \
417 "dsrl\t$1, %1, 0x8\n\t" \
418 "2:sb\t$1, 6(%2)\n\t" \
419 "dsrl\t$1, $1, 0x8\n\t" \
420 "3:sb\t$1, 5(%2)\n\t" \
421 "dsrl\t$1, $1, 0x8\n\t" \
422 "4:sb\t$1, 4(%2)\n\t" \
423 "dsrl\t$1, $1, 0x8\n\t" \
424 "5:sb\t$1, 3(%2)\n\t" \
425 "dsrl\t$1, $1, 0x8\n\t" \
426 "6:sb\t$1, 2(%2)\n\t" \
427 "dsrl\t$1, $1, 0x8\n\t" \
428 "7:sb\t$1, 1(%2)\n\t" \
429 "dsrl\t$1, $1, 0x8\n\t" \
430 "8:sb\t$1, 0(%2)\n\t" \
431 "dsrl\t$1, $1, 0x8\n\t" \
436 ".section\t.fixup,\"ax\"\n\t" \
437 "11:\tli\t%0, %3\n\t" \
440 ".section\t__ex_table,\"a\"\n\t" \
441 STR(PTR)"\t1b, 11b\n\t" \
442 STR(PTR)"\t2b, 11b\n\t" \
443 STR(PTR)"\t3b, 11b\n\t" \
444 STR(PTR)"\t4b, 11b\n\t" \
445 STR(PTR)"\t5b, 11b\n\t" \
446 STR(PTR)"\t6b, 11b\n\t" \
447 STR(PTR)"\t7b, 11b\n\t" \
448 STR(PTR)"\t8b, 11b\n\t" \
451 : "r" (value), "r" (addr), "i" (-EFAULT) \
453 #endif /* CONFIG_CPU_MIPSR6 */
455 #else /* __BIG_ENDIAN */
457 #define LoadHW(addr, value, res) \
458 __asm__ __volatile__ (".set\tnoat\n" \
459 "1:\t"user_lb("%0", "1(%2)")"\n" \
460 "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
466 ".section\t.fixup,\"ax\"\n\t" \
467 "4:\tli\t%1, %3\n\t" \
470 ".section\t__ex_table,\"a\"\n\t" \
471 STR(PTR)"\t1b, 4b\n\t" \
472 STR(PTR)"\t2b, 4b\n\t" \
474 : "=&r" (value), "=r" (res) \
475 : "r" (addr), "i" (-EFAULT));
477 #ifndef CONFIG_CPU_MIPSR6
478 #define LoadW(addr, value, res) \
479 __asm__ __volatile__ ( \
480 "1:\t"user_lwl("%0", "3(%2)")"\n" \
481 "2:\t"user_lwr("%0", "(%2)")"\n\t" \
485 ".section\t.fixup,\"ax\"\n\t" \
486 "4:\tli\t%1, %3\n\t" \
489 ".section\t__ex_table,\"a\"\n\t" \
490 STR(PTR)"\t1b, 4b\n\t" \
491 STR(PTR)"\t2b, 4b\n\t" \
493 : "=&r" (value), "=r" (res) \
494 : "r" (addr), "i" (-EFAULT));
496 /* MIPSR6 has no lwl instruction */
497 #define LoadW(addr, value, res) \
498 __asm__ __volatile__ ( \
501 "1:"user_lb("%0", "3(%2)")"\n\t" \
502 "2:"user_lbu("$1", "2(%2)")"\n\t" \
505 "3:"user_lbu("$1", "1(%2)")"\n\t" \
508 "4:"user_lbu("$1", "0(%2)")"\n\t" \
515 ".section\t.fixup,\"ax\"\n\t" \
516 "11:\tli\t%1, %3\n\t" \
519 ".section\t__ex_table,\"a\"\n\t" \
520 STR(PTR)"\t1b, 11b\n\t" \
521 STR(PTR)"\t2b, 11b\n\t" \
522 STR(PTR)"\t3b, 11b\n\t" \
523 STR(PTR)"\t4b, 11b\n\t" \
525 : "=&r" (value), "=r" (res) \
526 : "r" (addr), "i" (-EFAULT));
527 #endif /* CONFIG_CPU_MIPSR6 */
530 #define LoadHWU(addr, value, res) \
531 __asm__ __volatile__ ( \
533 "1:\t"user_lbu("%0", "1(%2)")"\n" \
534 "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
541 ".section\t.fixup,\"ax\"\n\t" \
542 "4:\tli\t%1, %3\n\t" \
545 ".section\t__ex_table,\"a\"\n\t" \
546 STR(PTR)"\t1b, 4b\n\t" \
547 STR(PTR)"\t2b, 4b\n\t" \
549 : "=&r" (value), "=r" (res) \
550 : "r" (addr), "i" (-EFAULT));
552 #ifndef CONFIG_CPU_MIPSR6
553 #define LoadWU(addr, value, res) \
554 __asm__ __volatile__ ( \
555 "1:\t"user_lwl("%0", "3(%2)")"\n" \
556 "2:\t"user_lwr("%0", "(%2)")"\n\t" \
557 "dsll\t%0, %0, 32\n\t" \
558 "dsrl\t%0, %0, 32\n\t" \
562 "\t.section\t.fixup,\"ax\"\n\t" \
563 "4:\tli\t%1, %3\n\t" \
566 ".section\t__ex_table,\"a\"\n\t" \
567 STR(PTR)"\t1b, 4b\n\t" \
568 STR(PTR)"\t2b, 4b\n\t" \
570 : "=&r" (value), "=r" (res) \
571 : "r" (addr), "i" (-EFAULT));
573 #define LoadDW(addr, value, res) \
574 __asm__ __volatile__ ( \
575 "1:\tldl\t%0, 7(%2)\n" \
576 "2:\tldr\t%0, (%2)\n\t" \
580 "\t.section\t.fixup,\"ax\"\n\t" \
581 "4:\tli\t%1, %3\n\t" \
584 ".section\t__ex_table,\"a\"\n\t" \
585 STR(PTR)"\t1b, 4b\n\t" \
586 STR(PTR)"\t2b, 4b\n\t" \
588 : "=&r" (value), "=r" (res) \
589 : "r" (addr), "i" (-EFAULT));
591 /* MIPSR6 has not lwl and ldl instructions */
592 #define LoadWU(addr, value, res) \
593 __asm__ __volatile__ ( \
596 "1:"user_lbu("%0", "3(%2)")"\n\t" \
597 "2:"user_lbu("$1", "2(%2)")"\n\t" \
600 "3:"user_lbu("$1", "1(%2)")"\n\t" \
603 "4:"user_lbu("$1", "0(%2)")"\n\t" \
610 ".section\t.fixup,\"ax\"\n\t" \
611 "11:\tli\t%1, %3\n\t" \
614 ".section\t__ex_table,\"a\"\n\t" \
615 STR(PTR)"\t1b, 11b\n\t" \
616 STR(PTR)"\t2b, 11b\n\t" \
617 STR(PTR)"\t3b, 11b\n\t" \
618 STR(PTR)"\t4b, 11b\n\t" \
620 : "=&r" (value), "=r" (res) \
621 : "r" (addr), "i" (-EFAULT));
623 #define LoadDW(addr, value, res) \
624 __asm__ __volatile__ ( \
627 "1:lb\t%0, 7(%2)\n\t" \
628 "2:lbu\t$1, 6(%2)\n\t" \
629 "dsll\t%0, 0x8\n\t" \
631 "3:lbu\t$1, 5(%2)\n\t" \
632 "dsll\t%0, 0x8\n\t" \
634 "4:lbu\t$1, 4(%2)\n\t" \
635 "dsll\t%0, 0x8\n\t" \
637 "5:lbu\t$1, 3(%2)\n\t" \
638 "dsll\t%0, 0x8\n\t" \
640 "6:lbu\t$1, 2(%2)\n\t" \
641 "dsll\t%0, 0x8\n\t" \
643 "7:lbu\t$1, 1(%2)\n\t" \
644 "dsll\t%0, 0x8\n\t" \
646 "8:lbu\t$1, 0(%2)\n\t" \
647 "dsll\t%0, 0x8\n\t" \
653 ".section\t.fixup,\"ax\"\n\t" \
654 "11:\tli\t%1, %3\n\t" \
657 ".section\t__ex_table,\"a\"\n\t" \
658 STR(PTR)"\t1b, 11b\n\t" \
659 STR(PTR)"\t2b, 11b\n\t" \
660 STR(PTR)"\t3b, 11b\n\t" \
661 STR(PTR)"\t4b, 11b\n\t" \
662 STR(PTR)"\t5b, 11b\n\t" \
663 STR(PTR)"\t6b, 11b\n\t" \
664 STR(PTR)"\t7b, 11b\n\t" \
665 STR(PTR)"\t8b, 11b\n\t" \
667 : "=&r" (value), "=r" (res) \
668 : "r" (addr), "i" (-EFAULT));
669 #endif /* CONFIG_CPU_MIPSR6 */
671 #define StoreHW(addr, value, res) \
672 __asm__ __volatile__ ( \
674 "1:\t"user_sb("%1", "0(%2)")"\n" \
675 "srl\t$1,%1, 0x8\n" \
676 "2:\t"user_sb("$1", "1(%2)")"\n" \
681 ".section\t.fixup,\"ax\"\n\t" \
682 "4:\tli\t%0, %3\n\t" \
685 ".section\t__ex_table,\"a\"\n\t" \
686 STR(PTR)"\t1b, 4b\n\t" \
687 STR(PTR)"\t2b, 4b\n\t" \
690 : "r" (value), "r" (addr), "i" (-EFAULT));
691 #ifndef CONFIG_CPU_MIPSR6
692 #define StoreW(addr, value, res) \
693 __asm__ __volatile__ ( \
694 "1:\t"user_swl("%1", "3(%2)")"\n" \
695 "2:\t"user_swr("%1", "(%2)")"\n\t" \
699 ".section\t.fixup,\"ax\"\n\t" \
700 "4:\tli\t%0, %3\n\t" \
703 ".section\t__ex_table,\"a\"\n\t" \
704 STR(PTR)"\t1b, 4b\n\t" \
705 STR(PTR)"\t2b, 4b\n\t" \
708 : "r" (value), "r" (addr), "i" (-EFAULT));
710 #define StoreDW(addr, value, res) \
711 __asm__ __volatile__ ( \
712 "1:\tsdl\t%1, 7(%2)\n" \
713 "2:\tsdr\t%1, (%2)\n\t" \
717 ".section\t.fixup,\"ax\"\n\t" \
718 "4:\tli\t%0, %3\n\t" \
721 ".section\t__ex_table,\"a\"\n\t" \
722 STR(PTR)"\t1b, 4b\n\t" \
723 STR(PTR)"\t2b, 4b\n\t" \
726 : "r" (value), "r" (addr), "i" (-EFAULT));
728 /* MIPSR6 has no swl and sdl instructions */
729 #define StoreW(addr, value, res) \
730 __asm__ __volatile__ ( \
733 "1:"user_sb("%1", "0(%2)")"\n\t" \
734 "srl\t$1, %1, 0x8\n\t" \
735 "2:"user_sb("$1", "1(%2)")"\n\t" \
736 "srl\t$1, $1, 0x8\n\t" \
737 "3:"user_sb("$1", "2(%2)")"\n\t" \
738 "srl\t$1, $1, 0x8\n\t" \
739 "4:"user_sb("$1", "3(%2)")"\n\t" \
744 ".section\t.fixup,\"ax\"\n\t" \
745 "11:\tli\t%0, %3\n\t" \
748 ".section\t__ex_table,\"a\"\n\t" \
749 STR(PTR)"\t1b, 11b\n\t" \
750 STR(PTR)"\t2b, 11b\n\t" \
751 STR(PTR)"\t3b, 11b\n\t" \
752 STR(PTR)"\t4b, 11b\n\t" \
755 : "r" (value), "r" (addr), "i" (-EFAULT) \
758 #define StoreDW(addr, value, res) \
759 __asm__ __volatile__ ( \
762 "1:sb\t%1, 0(%2)\n\t" \
763 "dsrl\t$1, %1, 0x8\n\t" \
764 "2:sb\t$1, 1(%2)\n\t" \
765 "dsrl\t$1, $1, 0x8\n\t" \
766 "3:sb\t$1, 2(%2)\n\t" \
767 "dsrl\t$1, $1, 0x8\n\t" \
768 "4:sb\t$1, 3(%2)\n\t" \
769 "dsrl\t$1, $1, 0x8\n\t" \
770 "5:sb\t$1, 4(%2)\n\t" \
771 "dsrl\t$1, $1, 0x8\n\t" \
772 "6:sb\t$1, 5(%2)\n\t" \
773 "dsrl\t$1, $1, 0x8\n\t" \
774 "7:sb\t$1, 6(%2)\n\t" \
775 "dsrl\t$1, $1, 0x8\n\t" \
776 "8:sb\t$1, 7(%2)\n\t" \
777 "dsrl\t$1, $1, 0x8\n\t" \
782 ".section\t.fixup,\"ax\"\n\t" \
783 "11:\tli\t%0, %3\n\t" \
786 ".section\t__ex_table,\"a\"\n\t" \
787 STR(PTR)"\t1b, 11b\n\t" \
788 STR(PTR)"\t2b, 11b\n\t" \
789 STR(PTR)"\t3b, 11b\n\t" \
790 STR(PTR)"\t4b, 11b\n\t" \
791 STR(PTR)"\t5b, 11b\n\t" \
792 STR(PTR)"\t6b, 11b\n\t" \
793 STR(PTR)"\t7b, 11b\n\t" \
794 STR(PTR)"\t8b, 11b\n\t" \
797 : "r" (value), "r" (addr), "i" (-EFAULT) \
799 #endif /* CONFIG_CPU_MIPSR6 */
802 static void emulate_load_store_insn(struct pt_regs *regs,
803 void __user *addr, unsigned int __user *pc)
805 union mips_instruction insn;
808 unsigned long origpc;
809 unsigned long orig31;
810 void __user *fault_addr = NULL;
814 origpc = (unsigned long)pc;
815 orig31 = regs->regs[31];
817 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
820 * This load never faults.
822 __get_user(insn.word, pc);
824 switch (insn.i_format.opcode) {
826 * These are instructions that a compiler doesn't generate. We
827 * can assume therefore that the code is MIPS-aware and
828 * really buggy. Emulating these instructions would break the
837 * For these instructions the only way to create an address
838 * error is an attempted access to kernel/supervisor address
855 * The remaining opcodes are the ones that are really of
861 * we can land here only from kernel accessing user memory,
862 * so we need to "switch" the address limit to user space, so
863 * address check can work properly.
867 switch (insn.spec3_format.func) {
869 if (!access_ok(VERIFY_READ, addr, 2)) {
873 LoadHW(addr, value, res);
878 compute_return_epc(regs);
879 regs->regs[insn.spec3_format.rt] = value;
882 if (!access_ok(VERIFY_READ, addr, 4)) {
886 LoadW(addr, value, res);
891 compute_return_epc(regs);
892 regs->regs[insn.spec3_format.rt] = value;
895 if (!access_ok(VERIFY_READ, addr, 2)) {
899 LoadHWU(addr, value, res);
904 compute_return_epc(regs);
905 regs->regs[insn.spec3_format.rt] = value;
908 if (!access_ok(VERIFY_WRITE, addr, 2)) {
912 compute_return_epc(regs);
913 value = regs->regs[insn.spec3_format.rt];
914 StoreHW(addr, value, res);
921 if (!access_ok(VERIFY_WRITE, addr, 4)) {
925 compute_return_epc(regs);
926 value = regs->regs[insn.spec3_format.rt];
927 StoreW(addr, value, res);
941 if (!access_ok(VERIFY_READ, addr, 2))
944 LoadHW(addr, value, res);
947 compute_return_epc(regs);
948 regs->regs[insn.i_format.rt] = value;
952 if (!access_ok(VERIFY_READ, addr, 4))
955 LoadW(addr, value, res);
958 compute_return_epc(regs);
959 regs->regs[insn.i_format.rt] = value;
963 if (!access_ok(VERIFY_READ, addr, 2))
966 LoadHWU(addr, value, res);
969 compute_return_epc(regs);
970 regs->regs[insn.i_format.rt] = value;
976 * A 32-bit kernel might be running on a 64-bit processor. But
977 * if we're on a 32-bit processor and an i-cache incoherency
978 * or race makes us see a 64-bit instruction here the sdl/sdr
979 * would blow up, so for now we don't handle unaligned 64-bit
980 * instructions on 32-bit kernels.
982 if (!access_ok(VERIFY_READ, addr, 4))
985 LoadWU(addr, value, res);
988 compute_return_epc(regs);
989 regs->regs[insn.i_format.rt] = value;
991 #endif /* CONFIG_64BIT */
993 /* Cannot handle 64-bit instructions in 32-bit kernel */
999 * A 32-bit kernel might be running on a 64-bit processor. But
1000 * if we're on a 32-bit processor and an i-cache incoherency
1001 * or race makes us see a 64-bit instruction here the sdl/sdr
1002 * would blow up, so for now we don't handle unaligned 64-bit
1003 * instructions on 32-bit kernels.
1005 if (!access_ok(VERIFY_READ, addr, 8))
1008 LoadDW(addr, value, res);
1011 compute_return_epc(regs);
1012 regs->regs[insn.i_format.rt] = value;
1014 #endif /* CONFIG_64BIT */
1016 /* Cannot handle 64-bit instructions in 32-bit kernel */
1020 if (!access_ok(VERIFY_WRITE, addr, 2))
1023 compute_return_epc(regs);
1024 value = regs->regs[insn.i_format.rt];
1025 StoreHW(addr, value, res);
1031 if (!access_ok(VERIFY_WRITE, addr, 4))
1034 compute_return_epc(regs);
1035 value = regs->regs[insn.i_format.rt];
1036 StoreW(addr, value, res);
1044 * A 32-bit kernel might be running on a 64-bit processor. But
1045 * if we're on a 32-bit processor and an i-cache incoherency
1046 * or race makes us see a 64-bit instruction here the sdl/sdr
1047 * would blow up, so for now we don't handle unaligned 64-bit
1048 * instructions on 32-bit kernels.
1050 if (!access_ok(VERIFY_WRITE, addr, 8))
1053 compute_return_epc(regs);
1054 value = regs->regs[insn.i_format.rt];
1055 StoreDW(addr, value, res);
1059 #endif /* CONFIG_64BIT */
1061 /* Cannot handle 64-bit instructions in 32-bit kernel */
1068 die_if_kernel("Unaligned FP access in kernel code", regs);
1069 BUG_ON(!used_math());
1071 lose_fpu(1); /* Save FPU state for the emulator. */
1072 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
1074 own_fpu(1); /* Restore FPU state. */
1076 /* Signal if something went wrong. */
1077 process_fpemu_return(res, fault_addr, 0);
1083 #ifndef CONFIG_CPU_MIPSR6
1085 * COP2 is available to implementor for application specific use.
1086 * It's up to applications to register a notifier chain and do
1087 * whatever they have to do, including possible sending of signals.
1089 * This instruction has been reallocated in Release 6
1092 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
1096 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
1100 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
1104 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
1109 * Pheeee... We encountered an yet unknown instruction or
1110 * cache coherence problem. Die sucker, die ...
1115 #ifdef CONFIG_DEBUG_FS
1116 unaligned_instructions++;
1122 /* roll back jump/branch */
1123 regs->cp0_epc = origpc;
1124 regs->regs[31] = orig31;
1125 /* Did we have an exception handler installed? */
1126 if (fixup_exception(regs))
1129 die_if_kernel("Unhandled kernel unaligned access", regs);
1130 force_sig(SIGSEGV, current);
1135 die_if_kernel("Unhandled kernel unaligned access", regs);
1136 force_sig(SIGBUS, current);
1142 ("Unhandled kernel unaligned access or invalid instruction", regs);
1143 force_sig(SIGILL, current);
1146 /* Recode table from 16-bit register notation to 32-bit GPR. */
1147 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
1149 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
1150 const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
1152 static void emulate_load_store_microMIPS(struct pt_regs *regs,
1155 unsigned long value;
1158 unsigned int reg = 0, rvar;
1159 unsigned long orig31;
1163 unsigned long origpc, contpc;
1164 union mips_instruction insn;
1165 struct mm_decoded_insn mminsn;
1166 void __user *fault_addr = NULL;
1168 origpc = regs->cp0_epc;
1169 orig31 = regs->regs[31];
1171 mminsn.micro_mips_mode = 1;
1174 * This load never faults.
1176 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
1177 __get_user(halfword, pc16);
1179 contpc = regs->cp0_epc + 2;
1180 word = ((unsigned int)halfword << 16);
1183 if (!mm_insn_16bit(halfword)) {
1184 __get_user(halfword, pc16);
1186 contpc = regs->cp0_epc + 4;
1192 if (get_user(halfword, pc16))
1194 mminsn.next_pc_inc = 2;
1195 word = ((unsigned int)halfword << 16);
1197 if (!mm_insn_16bit(halfword)) {
1199 if (get_user(halfword, pc16))
1201 mminsn.next_pc_inc = 4;
1204 mminsn.next_insn = word;
1206 insn = (union mips_instruction)(mminsn.insn);
1207 if (mm_isBranchInstr(regs, mminsn, &contpc))
1208 insn = (union mips_instruction)(mminsn.next_insn);
1210 /* Parse instruction to find what to do */
1212 switch (insn.mm_i_format.opcode) {
1215 switch (insn.mm_x_format.func) {
1217 reg = insn.mm_x_format.rd;
1224 switch (insn.mm_m_format.func) {
1226 reg = insn.mm_m_format.rd;
1230 if (!access_ok(VERIFY_READ, addr, 8))
1233 LoadW(addr, value, res);
1236 regs->regs[reg] = value;
1238 LoadW(addr, value, res);
1241 regs->regs[reg + 1] = value;
1245 reg = insn.mm_m_format.rd;
1249 if (!access_ok(VERIFY_WRITE, addr, 8))
1252 value = regs->regs[reg];
1253 StoreW(addr, value, res);
1257 value = regs->regs[reg + 1];
1258 StoreW(addr, value, res);
1265 reg = insn.mm_m_format.rd;
1269 if (!access_ok(VERIFY_READ, addr, 16))
1272 LoadDW(addr, value, res);
1275 regs->regs[reg] = value;
1277 LoadDW(addr, value, res);
1280 regs->regs[reg + 1] = value;
1282 #endif /* CONFIG_64BIT */
1288 reg = insn.mm_m_format.rd;
1292 if (!access_ok(VERIFY_WRITE, addr, 16))
1295 value = regs->regs[reg];
1296 StoreDW(addr, value, res);
1300 value = regs->regs[reg + 1];
1301 StoreDW(addr, value, res);
1305 #endif /* CONFIG_64BIT */
1310 reg = insn.mm_m_format.rd;
1312 if ((rvar > 9) || !reg)
1316 (VERIFY_READ, addr, 4 * (rvar + 1)))
1319 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1324 for (i = 16; rvar; rvar--, i++) {
1325 LoadW(addr, value, res);
1329 regs->regs[i] = value;
1331 if ((reg & 0xf) == 9) {
1332 LoadW(addr, value, res);
1336 regs->regs[30] = value;
1339 LoadW(addr, value, res);
1342 regs->regs[31] = value;
1347 reg = insn.mm_m_format.rd;
1349 if ((rvar > 9) || !reg)
1353 (VERIFY_WRITE, addr, 4 * (rvar + 1)))
1356 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1361 for (i = 16; rvar; rvar--, i++) {
1362 value = regs->regs[i];
1363 StoreW(addr, value, res);
1368 if ((reg & 0xf) == 9) {
1369 value = regs->regs[30];
1370 StoreW(addr, value, res);
1376 value = regs->regs[31];
1377 StoreW(addr, value, res);
1385 reg = insn.mm_m_format.rd;
1387 if ((rvar > 9) || !reg)
1391 (VERIFY_READ, addr, 8 * (rvar + 1)))
1394 if (!access_ok(VERIFY_READ, addr, 8 * rvar))
1400 for (i = 16; rvar; rvar--, i++) {
1401 LoadDW(addr, value, res);
1405 regs->regs[i] = value;
1407 if ((reg & 0xf) == 9) {
1408 LoadDW(addr, value, res);
1412 regs->regs[30] = value;
1415 LoadDW(addr, value, res);
1418 regs->regs[31] = value;
1421 #endif /* CONFIG_64BIT */
1427 reg = insn.mm_m_format.rd;
1429 if ((rvar > 9) || !reg)
1433 (VERIFY_WRITE, addr, 8 * (rvar + 1)))
1436 if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
1442 for (i = 16; rvar; rvar--, i++) {
1443 value = regs->regs[i];
1444 StoreDW(addr, value, res);
1449 if ((reg & 0xf) == 9) {
1450 value = regs->regs[30];
1451 StoreDW(addr, value, res);
1457 value = regs->regs[31];
1458 StoreDW(addr, value, res);
1463 #endif /* CONFIG_64BIT */
1467 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
1473 switch (insn.mm_m_format.func) {
1475 reg = insn.mm_m_format.rd;
1479 /* LL,SC,LLD,SCD are not serviced */
1483 switch (insn.mm_x_format.func) {
1498 /* roll back jump/branch */
1499 regs->cp0_epc = origpc;
1500 regs->regs[31] = orig31;
1502 die_if_kernel("Unaligned FP access in kernel code", regs);
1503 BUG_ON(!used_math());
1504 BUG_ON(!is_fpu_owner());
1506 lose_fpu(1); /* save the FPU state for the emulator */
1507 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
1509 own_fpu(1); /* restore FPU state */
1511 /* If something went wrong, signal */
1512 process_fpemu_return(res, fault_addr, 0);
1519 reg = insn.mm_i_format.rt;
1523 reg = insn.mm_i_format.rt;
1527 reg = insn.mm_i_format.rt;
1531 reg = insn.mm_i_format.rt;
1535 reg = insn.mm_i_format.rt;
1539 reg = insn.mm_i_format.rt;
1543 reg = insn.mm_i_format.rt;
1547 switch (insn.mm16_m_format.func) {
1549 reg = insn.mm16_m_format.rlist;
1551 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1554 for (i = 16; rvar; rvar--, i++) {
1555 LoadW(addr, value, res);
1559 regs->regs[i] = value;
1561 LoadW(addr, value, res);
1564 regs->regs[31] = value;
1569 reg = insn.mm16_m_format.rlist;
1571 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1574 for (i = 16; rvar; rvar--, i++) {
1575 value = regs->regs[i];
1576 StoreW(addr, value, res);
1581 value = regs->regs[31];
1582 StoreW(addr, value, res);
1593 reg = reg16to32[insn.mm16_rb_format.rt];
1597 reg = reg16to32[insn.mm16_rb_format.rt];
1601 reg = reg16to32st[insn.mm16_rb_format.rt];
1605 reg = reg16to32st[insn.mm16_rb_format.rt];
1609 reg = insn.mm16_r5_format.rt;
1613 reg = insn.mm16_r5_format.rt;
1617 reg = reg16to32[insn.mm16_r3_format.rt];
1625 if (!access_ok(VERIFY_READ, addr, 2))
1628 LoadHW(addr, value, res);
1631 regs->regs[reg] = value;
1635 if (!access_ok(VERIFY_READ, addr, 2))
1638 LoadHWU(addr, value, res);
1641 regs->regs[reg] = value;
1645 if (!access_ok(VERIFY_READ, addr, 4))
1648 LoadW(addr, value, res);
1651 regs->regs[reg] = value;
1657 * A 32-bit kernel might be running on a 64-bit processor. But
1658 * if we're on a 32-bit processor and an i-cache incoherency
1659 * or race makes us see a 64-bit instruction here the sdl/sdr
1660 * would blow up, so for now we don't handle unaligned 64-bit
1661 * instructions on 32-bit kernels.
1663 if (!access_ok(VERIFY_READ, addr, 4))
1666 LoadWU(addr, value, res);
1669 regs->regs[reg] = value;
1671 #endif /* CONFIG_64BIT */
1673 /* Cannot handle 64-bit instructions in 32-bit kernel */
1679 * A 32-bit kernel might be running on a 64-bit processor. But
1680 * if we're on a 32-bit processor and an i-cache incoherency
1681 * or race makes us see a 64-bit instruction here the sdl/sdr
1682 * would blow up, so for now we don't handle unaligned 64-bit
1683 * instructions on 32-bit kernels.
1685 if (!access_ok(VERIFY_READ, addr, 8))
1688 LoadDW(addr, value, res);
1691 regs->regs[reg] = value;
1693 #endif /* CONFIG_64BIT */
1695 /* Cannot handle 64-bit instructions in 32-bit kernel */
1699 if (!access_ok(VERIFY_WRITE, addr, 2))
1702 value = regs->regs[reg];
1703 StoreHW(addr, value, res);
1709 if (!access_ok(VERIFY_WRITE, addr, 4))
1712 value = regs->regs[reg];
1713 StoreW(addr, value, res);
1721 * A 32-bit kernel might be running on a 64-bit processor. But
1722 * if we're on a 32-bit processor and an i-cache incoherency
1723 * or race makes us see a 64-bit instruction here the sdl/sdr
1724 * would blow up, so for now we don't handle unaligned 64-bit
1725 * instructions on 32-bit kernels.
1727 if (!access_ok(VERIFY_WRITE, addr, 8))
1730 value = regs->regs[reg];
1731 StoreDW(addr, value, res);
1735 #endif /* CONFIG_64BIT */
1737 /* Cannot handle 64-bit instructions in 32-bit kernel */
1741 regs->cp0_epc = contpc; /* advance or branch */
1743 #ifdef CONFIG_DEBUG_FS
1744 unaligned_instructions++;
1749 /* roll back jump/branch */
1750 regs->cp0_epc = origpc;
1751 regs->regs[31] = orig31;
1752 /* Did we have an exception handler installed? */
1753 if (fixup_exception(regs))
1756 die_if_kernel("Unhandled kernel unaligned access", regs);
1757 force_sig(SIGSEGV, current);
1762 die_if_kernel("Unhandled kernel unaligned access", regs);
1763 force_sig(SIGBUS, current);
1769 ("Unhandled kernel unaligned access or invalid instruction", regs);
1770 force_sig(SIGILL, current);
1773 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1775 unsigned long value;
1778 unsigned long orig31;
1780 unsigned long origpc;
1781 union mips16e_instruction mips16inst, oldinst;
1783 origpc = regs->cp0_epc;
1784 orig31 = regs->regs[31];
1785 pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1787 * This load never faults.
1789 __get_user(mips16inst.full, pc16);
1790 oldinst = mips16inst;
1792 /* skip EXTEND instruction */
1793 if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1795 __get_user(mips16inst.full, pc16);
1796 } else if (delay_slot(regs)) {
1797 /* skip jump instructions */
1798 /* JAL/JALX are 32 bits but have OPCODE in first short int */
1799 if (mips16inst.ri.opcode == MIPS16e_jal_op)
1802 if (get_user(mips16inst.full, pc16))
1806 switch (mips16inst.ri.opcode) {
1807 case MIPS16e_i64_op: /* I64 or RI64 instruction */
1808 switch (mips16inst.i64.func) { /* I64/RI64 func field check */
1809 case MIPS16e_ldpc_func:
1810 case MIPS16e_ldsp_func:
1811 reg = reg16to32[mips16inst.ri64.ry];
1814 case MIPS16e_sdsp_func:
1815 reg = reg16to32[mips16inst.ri64.ry];
1818 case MIPS16e_sdrasp_func:
1819 reg = 29; /* GPRSP */
1825 case MIPS16e_swsp_op:
1826 case MIPS16e_lwpc_op:
1827 case MIPS16e_lwsp_op:
1828 reg = reg16to32[mips16inst.ri.rx];
1832 if (mips16inst.i8.func != MIPS16e_swrasp_func)
1834 reg = 29; /* GPRSP */
1838 reg = reg16to32[mips16inst.rri.ry];
1842 switch (mips16inst.ri.opcode) {
1845 case MIPS16e_lbu_op:
1850 if (!access_ok(VERIFY_READ, addr, 2))
1853 LoadHW(addr, value, res);
1856 MIPS16e_compute_return_epc(regs, &oldinst);
1857 regs->regs[reg] = value;
1860 case MIPS16e_lhu_op:
1861 if (!access_ok(VERIFY_READ, addr, 2))
1864 LoadHWU(addr, value, res);
1867 MIPS16e_compute_return_epc(regs, &oldinst);
1868 regs->regs[reg] = value;
1872 case MIPS16e_lwpc_op:
1873 case MIPS16e_lwsp_op:
1874 if (!access_ok(VERIFY_READ, addr, 4))
1877 LoadW(addr, value, res);
1880 MIPS16e_compute_return_epc(regs, &oldinst);
1881 regs->regs[reg] = value;
1884 case MIPS16e_lwu_op:
1887 * A 32-bit kernel might be running on a 64-bit processor. But
1888 * if we're on a 32-bit processor and an i-cache incoherency
1889 * or race makes us see a 64-bit instruction here the sdl/sdr
1890 * would blow up, so for now we don't handle unaligned 64-bit
1891 * instructions on 32-bit kernels.
1893 if (!access_ok(VERIFY_READ, addr, 4))
1896 LoadWU(addr, value, res);
1899 MIPS16e_compute_return_epc(regs, &oldinst);
1900 regs->regs[reg] = value;
1902 #endif /* CONFIG_64BIT */
1904 /* Cannot handle 64-bit instructions in 32-bit kernel */
1911 * A 32-bit kernel might be running on a 64-bit processor. But
1912 * if we're on a 32-bit processor and an i-cache incoherency
1913 * or race makes us see a 64-bit instruction here the sdl/sdr
1914 * would blow up, so for now we don't handle unaligned 64-bit
1915 * instructions on 32-bit kernels.
1917 if (!access_ok(VERIFY_READ, addr, 8))
1920 LoadDW(addr, value, res);
1923 MIPS16e_compute_return_epc(regs, &oldinst);
1924 regs->regs[reg] = value;
1926 #endif /* CONFIG_64BIT */
1928 /* Cannot handle 64-bit instructions in 32-bit kernel */
1932 if (!access_ok(VERIFY_WRITE, addr, 2))
1935 MIPS16e_compute_return_epc(regs, &oldinst);
1936 value = regs->regs[reg];
1937 StoreHW(addr, value, res);
1943 case MIPS16e_swsp_op:
1944 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
1945 if (!access_ok(VERIFY_WRITE, addr, 4))
1948 MIPS16e_compute_return_epc(regs, &oldinst);
1949 value = regs->regs[reg];
1950 StoreW(addr, value, res);
1959 * A 32-bit kernel might be running on a 64-bit processor. But
1960 * if we're on a 32-bit processor and an i-cache incoherency
1961 * or race makes us see a 64-bit instruction here the sdl/sdr
1962 * would blow up, so for now we don't handle unaligned 64-bit
1963 * instructions on 32-bit kernels.
1965 if (!access_ok(VERIFY_WRITE, addr, 8))
1968 MIPS16e_compute_return_epc(regs, &oldinst);
1969 value = regs->regs[reg];
1970 StoreDW(addr, value, res);
1974 #endif /* CONFIG_64BIT */
1976 /* Cannot handle 64-bit instructions in 32-bit kernel */
1981 * Pheeee... We encountered an yet unknown instruction or
1982 * cache coherence problem. Die sucker, die ...
1987 #ifdef CONFIG_DEBUG_FS
1988 unaligned_instructions++;
1994 /* roll back jump/branch */
1995 regs->cp0_epc = origpc;
1996 regs->regs[31] = orig31;
1997 /* Did we have an exception handler installed? */
1998 if (fixup_exception(regs))
2001 die_if_kernel("Unhandled kernel unaligned access", regs);
2002 force_sig(SIGSEGV, current);
2007 die_if_kernel("Unhandled kernel unaligned access", regs);
2008 force_sig(SIGBUS, current);
2014 ("Unhandled kernel unaligned access or invalid instruction", regs);
2015 force_sig(SIGILL, current);
2018 asmlinkage void do_ade(struct pt_regs *regs)
2020 enum ctx_state prev_state;
2021 unsigned int __user *pc;
2024 prev_state = exception_enter();
2025 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
2026 1, regs, regs->cp0_badvaddr);
2028 * Did we catch a fault trying to load an instruction?
2030 if (regs->cp0_badvaddr == regs->cp0_epc)
2033 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
2035 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
2039 * Do branch emulation only if we didn't forward the exception.
2040 * This is all so but ugly ...
2044 * Are we running in microMIPS mode?
2046 if (get_isa16_mode(regs->cp0_epc)) {
2048 * Did we catch a fault trying to load an instruction in
2051 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
2053 if (unaligned_action == UNALIGNED_ACTION_SHOW)
2054 show_registers(regs);
2056 if (cpu_has_mmips) {
2058 if (!user_mode(regs))
2060 emulate_load_store_microMIPS(regs,
2061 (void __user *)regs->cp0_badvaddr);
2067 if (cpu_has_mips16) {
2069 if (!user_mode(regs))
2071 emulate_load_store_MIPS16e(regs,
2072 (void __user *)regs->cp0_badvaddr);
2081 if (unaligned_action == UNALIGNED_ACTION_SHOW)
2082 show_registers(regs);
2083 pc = (unsigned int __user *)exception_epc(regs);
2086 if (!user_mode(regs))
2088 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
2094 die_if_kernel("Kernel unaligned instruction access", regs);
2095 force_sig(SIGBUS, current);
2098 * XXX On return from the signal handler we should advance the epc
2100 exception_exit(prev_state);
2103 #ifdef CONFIG_DEBUG_FS
2104 extern struct dentry *mips_debugfs_dir;
2105 static int __init debugfs_unaligned(void)
2109 if (!mips_debugfs_dir)
2111 d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
2112 mips_debugfs_dir, &unaligned_instructions);
2115 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
2116 mips_debugfs_dir, &unaligned_action);
2121 __initcall(debugfs_unaligned);