2 * Low-level system-call handling, trap handlers and context-switching
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
18 #include <linux/sys.h>
19 #include <linux/linkage.h>
21 #include <asm/entry.h>
22 #include <asm/current.h>
23 #include <asm/processor.h>
24 #include <asm/exceptions.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/thread_info.h>
29 #include <asm/unistd.h>
31 #include <linux/errno.h>
32 #include <asm/signal.h>
36 /* The size of a state save frame. */
37 #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
39 /* The offset of the struct pt_regs in a `state save frame' on the stack. */
40 #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
42 #define C_ENTRY(name) .globl name; .align 4; name
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
49 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
95 msrclr r11, MSR_VMS | MSR_UMS
102 andi r11, r11, ~MSR_BIP
110 ori r11, r11, MSR_BIP
118 andi r11, r11, ~MSR_EIP
134 andi r11, r11, ~MSR_IE
150 ori r11, r11, MSR_VMS
151 andni r11, r11, MSR_UMS
159 ori r11, r11, MSR_VMS
160 andni r11, r11, MSR_UMS
168 andni r11, r11, (MSR_VMS|MSR_UMS)
174 /* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
179 /* turn on virtual protected mode save */
186 /* turn off virtual protected mode save and user mode save*/
189 rted r0, TOPHYS(1f); \
194 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
195 swi r3, r1, PTO+PT_R3; \
196 swi r4, r1, PTO+PT_R4; \
197 swi r5, r1, PTO+PT_R5; \
198 swi r6, r1, PTO+PT_R6; \
199 swi r7, r1, PTO+PT_R7; \
200 swi r8, r1, PTO+PT_R8; \
201 swi r9, r1, PTO+PT_R9; \
202 swi r10, r1, PTO+PT_R10; \
203 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
204 swi r12, r1, PTO+PT_R12; \
205 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
206 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
207 swi r15, r1, PTO+PT_R15; /* Save LP */ \
208 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
209 swi r19, r1, PTO+PT_R19; \
210 swi r20, r1, PTO+PT_R20; \
211 swi r21, r1, PTO+PT_R21; \
212 swi r22, r1, PTO+PT_R22; \
213 swi r23, r1, PTO+PT_R23; \
214 swi r24, r1, PTO+PT_R24; \
215 swi r25, r1, PTO+PT_R25; \
216 swi r26, r1, PTO+PT_R26; \
217 swi r27, r1, PTO+PT_R27; \
218 swi r28, r1, PTO+PT_R28; \
219 swi r29, r1, PTO+PT_R29; \
220 swi r30, r1, PTO+PT_R30; \
221 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
222 mfs r11, rmsr; /* save MSR */ \
224 swi r11, r1, PTO+PT_MSR;
226 #define RESTORE_REGS \
227 lwi r11, r1, PTO+PT_MSR; \
230 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
231 lwi r3, r1, PTO+PT_R3; \
232 lwi r4, r1, PTO+PT_R4; \
233 lwi r5, r1, PTO+PT_R5; \
234 lwi r6, r1, PTO+PT_R6; \
235 lwi r7, r1, PTO+PT_R7; \
236 lwi r8, r1, PTO+PT_R8; \
237 lwi r9, r1, PTO+PT_R9; \
238 lwi r10, r1, PTO+PT_R10; \
239 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
240 lwi r12, r1, PTO+PT_R12; \
241 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
242 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
243 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
244 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
245 lwi r19, r1, PTO+PT_R19; \
246 lwi r20, r1, PTO+PT_R20; \
247 lwi r21, r1, PTO+PT_R21; \
248 lwi r22, r1, PTO+PT_R22; \
249 lwi r23, r1, PTO+PT_R23; \
250 lwi r24, r1, PTO+PT_R24; \
251 lwi r25, r1, PTO+PT_R25; \
252 lwi r26, r1, PTO+PT_R26; \
253 lwi r27, r1, PTO+PT_R27; \
254 lwi r28, r1, PTO+PT_R28; \
255 lwi r29, r1, PTO+PT_R29; \
256 lwi r30, r1, PTO+PT_R30; \
257 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
264 * System calls are handled here.
267 * Syscall number in r12, args in r5-r10
270 * Trap entered via brki instruction, so BIP bit is set, and interrupts
271 * are masked. This is nice, means we don't have to CLI before state save
273 C_ENTRY(_user_exception):
274 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
275 addi r14, r14, 4 /* return address is 4 byte after call */
276 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
280 andi r11, r11, MSR_UMS
283 /* Kernel-mode state save - kernel execve */
284 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
286 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
287 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
289 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
292 addi r11, r0, 1; /* Was in kernel-mode. */
293 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
295 nop; /* Fill delay slot */
297 /* User-mode state save. */
299 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
300 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
302 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
303 /* calculate kernel stack pointer from task struct 8k */
304 addik r1, r1, THREAD_SIZE;
307 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
310 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
311 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
312 swi r11, r1, PTO+PT_R1; /* Store user SP. */
313 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
314 /* Save away the syscall number. */
315 swi r12, r1, PTO+PT_R0;
318 /* where the trap should return need -8 to adjust for rtsd r15, 8*/
319 /* Jump to the appropriate function for the system call number in r12
320 * (r12 is not preserved), or return an error if r12 is not valid. The LP
321 * register should point to the location where
322 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
324 # Step into virtual mode.
330 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
331 lwi r11, r11, TI_FLAGS /* get flags in thread info */
332 andi r11, r11, _TIF_WORK_SYSCALL_MASK
335 addik r3, r0, -ENOSYS
336 swi r3, r1, PTO + PT_R3
337 brlid r15, do_syscall_trace_enter
338 addik r5, r1, PTO + PT_R0
340 # do_syscall_trace_enter returns the new syscall nr.
342 lwi r5, r1, PTO+PT_R5;
343 lwi r6, r1, PTO+PT_R6;
344 lwi r7, r1, PTO+PT_R7;
345 lwi r8, r1, PTO+PT_R8;
346 lwi r9, r1, PTO+PT_R9;
347 lwi r10, r1, PTO+PT_R10;
349 /* Jump to the appropriate function for the system call number in r12
350 * (r12 is not preserved), or return an error if r12 is not valid.
351 * The LP register should point to the location where the called function
352 * should return. [note that MAKE_SYS_CALL uses label 1] */
353 /* See if the system call number is valid */
354 addi r11, r12, -__NR_syscalls;
356 /* Figure out which function to use for this system call. */
357 /* Note Microblaze barrel shift is optional, so don't rely on it */
358 add r12, r12, r12; /* convert num -> ptr */
362 /* Trac syscalls and stored them to r0_ram */
363 lwi r3, r12, 0x400 + r0_ram
365 swi r3, r12, 0x400 + r0_ram
368 # Find and jump into the syscall handler.
369 lwi r12, r12, sys_call_table
370 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
371 addi r15, r0, ret_from_trap-8
374 /* The syscall number is invalid, return an error. */
376 addi r3, r0, -ENOSYS;
377 rtsd r15,8; /* looks like a normal subroutine return */
381 /* Entry point used to return from a syscall/trap */
382 /* We re-enable BIP bit before state restore */
383 C_ENTRY(ret_from_trap):
384 swi r3, r1, PTO + PT_R3
385 swi r4, r1, PTO + PT_R4
387 lwi r11, r1, PTO+PT_MODE;
388 /* See if returning to kernel mode, if so, skip resched &c. */
390 /* We're returning to user mode, so check for various conditions that
391 * trigger rescheduling. */
392 /* FIXME: Restructure all these flag checks. */
393 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
394 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
395 andi r11, r11, _TIF_WORK_SYSCALL_MASK
398 brlid r15, do_syscall_trace_leave
399 addik r5, r1, PTO + PT_R0
401 /* We're returning to user mode, so check for various conditions that
402 * trigger rescheduling. */
403 /* get thread info from current task */
404 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
405 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
406 andi r11, r11, _TIF_NEED_RESCHED;
409 bralid r15, schedule; /* Call scheduler */
410 nop; /* delay slot */
412 /* Maybe handle a signal */
413 5: /* get thread info from current task*/
414 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
415 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
416 andi r11, r11, _TIF_SIGPENDING;
417 beqi r11, 1f; /* Signals to handle, handle them */
419 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
420 addi r7, r0, 1; /* Arg 3: int in_syscall */
421 bralid r15, do_signal; /* Handle any signals */
422 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
424 /* Finally, return to user state. */
425 1: set_bip; /* Ints masked for state restore */
426 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
430 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
431 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
434 /* Return to kernel state. */
435 2: set_bip; /* Ints masked for state restore */
439 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
442 TRAP_return: /* Make global symbol for debugging */
443 rtbd r14, 0; /* Instructions to return from an IRQ */
447 /* These syscalls need access to the struct pt_regs on the stack, so we
448 implement them in assembly (they're basically all wrappers anyway). */
450 C_ENTRY(sys_fork_wrapper):
451 addi r5, r0, SIGCHLD /* Arg 0: flags */
452 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
453 addik r7, r1, PTO /* Arg 2: parent context */
454 add r8. r0, r0 /* Arg 3: (unused) */
455 add r9, r0, r0; /* Arg 4: (unused) */
456 add r10, r0, r0; /* Arg 5: (unused) */
457 brid do_fork /* Do real work (tail-call) */
460 /* This the initial entry point for a new child thread, with an appropriate
461 stack in place that makes it look the the child is in the middle of an
462 syscall. This function is actually `returned to' from switch_thread
463 (copy_thread makes ret_from_fork the return address in each new thread's
465 C_ENTRY(ret_from_fork):
466 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
467 add r3, r5, r0; /* switch_thread returns the prev task */
468 /* ( in the delay slot ) */
469 add r3, r0, r0; /* Child's fork call should return 0. */
470 brid ret_from_trap; /* Do normal trap return */
474 brid microblaze_vfork /* Do real work (tail-call) */
478 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
479 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
480 1: addik r7, r1, PTO; /* Arg 2: parent context */
481 add r8, r0, r0; /* Arg 3: (unused) */
482 add r9, r0, r0; /* Arg 4: (unused) */
483 add r10, r0, r0; /* Arg 5: (unused) */
484 brid do_fork /* Do real work (tail-call) */
488 addik r8, r1, PTO; /* add user context as 4th arg */
489 brid microblaze_execve; /* Do real work (tail-call).*/
492 C_ENTRY(sys_rt_sigreturn_wrapper):
493 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
494 swi r4, r1, PTO+PT_R4;
495 addik r5, r1, PTO; /* add user context as 1st arg */
496 brlid r15, sys_rt_sigreturn /* Do real work */
498 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
499 lwi r4, r1, PTO+PT_R4;
500 bri ret_from_trap /* fall through will not work here due to align */
504 * HW EXCEPTION rutine start
508 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
509 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
510 /* See if already in kernel mode.*/ \
513 andi r11, r11, MSR_UMS; \
515 /* Kernel-mode state save. */ \
516 /* Reload kernel stack-ptr. */ \
517 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
519 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
520 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
521 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
523 /* PC, before IRQ/trap - this is one instruction above */ \
524 swi r17, r1, PTO+PT_PC; \
526 addi r11, r0, 1; /* Was in kernel-mode. */ \
527 swi r11, r1, PTO+PT_MODE; \
529 nop; /* Fill delay slot */ \
530 1: /* User-mode state save. */ \
531 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
532 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
534 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
535 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
538 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
540 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
541 swi r17, r1, PTO+PT_PC; \
543 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
544 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
545 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
546 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
547 /* Save away the syscall number. */ \
548 swi r0, r1, PTO+PT_R0; \
551 C_ENTRY(full_exception_trap):
552 /* adjust exception address for privileged instruction
553 * for finding where is it */
555 SAVE_STATE /* Save registers */
556 /* FIXME this can be store directly in PT_ESR reg.
557 * I tested it but there is a fault */
558 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
559 addik r15, r0, ret_from_exc - 8
560 addik r5, r1, PTO /* parameter struct pt_regs * regs */
563 mfs r7, rfsr; /* save FSR */
565 mts rfsr, r0; /* Clear sticky fsr */
567 addik r12, r0, full_exception
573 * Unaligned data trap.
575 * Unaligned data trap last on 4k page is handled here.
577 * Trap entered via exception, so EE bit is set, and interrupts
578 * are masked. This is nice, means we don't have to CLI before state save
580 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
582 C_ENTRY(unaligned_data_trap):
583 /* MS: I have to save r11 value and then restore it because
584 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
585 * instructions are not used. We don't need to do if MSR instructions
586 * are used and they use r0 instead of r11.
587 * I am using ENTRY_SP which should be primary used only for stack
589 swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
590 set_bip; /* equalize initial state for all possible entries */
593 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
594 SAVE_STATE /* Save registers.*/
595 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
596 addik r15, r0, ret_from_exc-8
597 mfs r3, resr /* ESR */
599 mfs r4, rear /* EAR */
601 addik r7, r1, PTO /* parameter struct pt_regs * regs */
602 addik r12, r0, _unaligned_data_exception
604 rtbd r12, 0; /* interrupts enabled */
610 * If the real exception handler (from hw_exception_handler.S) didn't find
611 * the mapping for the process, then we're thrown here to handle such situation.
613 * Trap entered via exceptions, so EE bit is set, and interrupts
614 * are masked. This is nice, means we don't have to CLI before state save
616 * Build a standard exception frame for TLB Access errors. All TLB exceptions
617 * will bail out to this point if they can't resolve the lightweight TLB fault.
619 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
620 * void do_page_fault(struct pt_regs *regs,
621 * unsigned long address,
622 * unsigned long error_code)
624 /* data and intruction trap - which is choose is resolved int fault.c */
625 C_ENTRY(page_fault_data_trap):
626 SAVE_STATE /* Save registers.*/
627 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
628 addik r15, r0, ret_from_exc-8
629 addik r5, r1, PTO /* parameter struct pt_regs * regs */
630 mfs r6, rear /* parameter unsigned long address */
632 mfs r7, resr /* parameter unsigned long error_code */
634 addik r12, r0, do_page_fault
636 rted r12, 0; /* interrupts enabled */
639 C_ENTRY(page_fault_instr_trap):
640 SAVE_STATE /* Save registers.*/
641 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
642 addik r15, r0, ret_from_exc-8
643 addik r5, r1, PTO /* parameter struct pt_regs * regs */
644 mfs r6, rear /* parameter unsigned long address */
646 ori r7, r0, 0 /* parameter unsigned long error_code */
647 addik r12, r0, do_page_fault
649 rted r12, 0; /* interrupts enabled */
652 /* Entry point used to return from an exception. */
653 C_ENTRY(ret_from_exc):
654 lwi r11, r1, PTO+PT_MODE;
655 bnei r11, 2f; /* See if returning to kernel mode, */
656 /* ... if so, skip resched &c. */
658 /* We're returning to user mode, so check for various conditions that
659 trigger rescheduling. */
660 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
661 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
662 andi r11, r11, _TIF_NEED_RESCHED;
665 /* Call the scheduler before returning from a syscall/trap. */
666 bralid r15, schedule; /* Call scheduler */
667 nop; /* delay slot */
669 /* Maybe handle a signal */
670 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
671 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
672 andi r11, r11, _TIF_SIGPENDING;
673 beqi r11, 1f; /* Signals to handle, handle them */
676 * Handle a signal return; Pending signals should be in r18.
678 * Not all registers are saved by the normal trap/interrupt entry
679 * points (for instance, call-saved registers (because the normal
680 * C-compiler calling sequence in the kernel makes sure they're
681 * preserved), and call-clobbered registers in the case of
682 * traps), but signal handlers may want to examine or change the
683 * complete register state. Here we save anything not saved by
684 * the normal entry sequence, so that it may be safely restored
685 * (in a possibly modified form) after do_signal returns. */
686 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
687 addi r7, r0, 0; /* Arg 3: int in_syscall */
688 bralid r15, do_signal; /* Handle any signals */
689 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
691 /* Finally, return to user state. */
692 1: set_bip; /* Ints masked for state restore */
693 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
698 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
700 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
702 /* Return to kernel state. */
703 2: set_bip; /* Ints masked for state restore */
707 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
711 EXC_return: /* Make global symbol for debugging */
712 rtbd r14, 0; /* Instructions to return from an IRQ */
716 * HW EXCEPTION rutine end
720 * Hardware maskable interrupts.
722 * The stack-pointer (r1) should have already been saved to the memory
723 * location PER_CPU(ENTRY_SP).
726 /* MS: we are in physical address */
727 /* Save registers, switch to proper stack, convert SP to virtual.*/
728 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
729 swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
730 /* MS: See if already in kernel mode. */
733 andi r11, r11, MSR_UMS
736 /* Kernel-mode state save. */
738 tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
739 /* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
740 swi r11, r1, (PT_R1 - PT_SIZE);
741 /* MS: restore r11 because of saving in SAVE_REGS */
742 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
744 /* MS: Make room on the stack -> activation record */
745 addik r1, r1, -STATE_SAVE_SIZE;
748 addi r11, r0, 1; /* MS: Was in kernel-mode. */
749 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
751 nop; /* MS: Fill delay slot */
754 /* User-mode state save. */
755 /* MS: restore r11 -> FIXME move before SAVE_REG */
756 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
757 /* MS: get the saved current */
758 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
760 lwi r1, r1, TS_THREAD_INFO;
761 addik r1, r1, THREAD_SIZE;
764 addik r1, r1, -STATE_SAVE_SIZE;
767 swi r0, r1, PTO + PT_MODE;
768 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
769 swi r11, r1, PTO+PT_R1;
771 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
772 swi r0, r1, PTO + PT_R0;
776 addik r11, r0, do_IRQ;
777 addik r15, r0, irq_call;
778 irq_call:rtbd r11, 0;
781 /* MS: we are in virtual mode */
783 lwi r11, r1, PTO + PT_MODE;
786 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
787 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
788 andi r11, r11, _TIF_NEED_RESCHED;
790 bralid r15, schedule;
791 nop; /* delay slot */
793 /* Maybe handle a signal */
794 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
795 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
796 andi r11, r11, _TIF_SIGPENDING;
797 beqid r11, no_intr_resched
798 /* Handle a signal return; Pending signals should be in r18. */
799 addi r7, r0, 0; /* Arg 3: int in_syscall */
800 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
801 bralid r15, do_signal; /* Handle any signals */
802 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
804 /* Finally, return to user state. */
806 /* Disable interrupts, we are now committed to the state restore */
808 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
812 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
813 lwi r1, r1, PT_R1 - PT_SIZE;
815 /* MS: Return to kernel state. */
817 #ifdef CONFIG_PREEMPT
818 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
819 /* MS: get preempt_count from thread info */
820 lwi r5, r11, TI_PREEMPT_COUNT;
823 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
824 andi r5, r5, _TIF_NEED_RESCHED;
825 beqi r5, restore /* if zero jump over */
828 /* interrupts are off that's why I am calling preempt_chedule_irq */
829 bralid r15, preempt_schedule_irq
831 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
832 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
833 andi r5, r5, _TIF_NEED_RESCHED;
834 bnei r5, preempt /* if non zero jump to resched */
837 VM_OFF /* MS: turn off MMU */
840 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
843 IRQ_return: /* MS: Make global symbol for debugging */
849 * We enter dbtrap in "BIP" (breakpoint) mode.
850 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
852 * however, wait to save state first
854 C_ENTRY(_debug_exception):
855 /* BIP bit is set on entry, no interrupts can occur */
856 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
858 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
861 andi r11, r11, MSR_UMS
863 /* Kernel-mode state save. */
864 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
866 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
867 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
869 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
872 addi r11, r0, 1; /* Was in kernel-mode. */
873 swi r11, r1, PTO + PT_MODE;
875 nop; /* Fill delay slot */
876 1: /* User-mode state save. */
877 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
878 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
880 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
881 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
884 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
887 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
888 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
889 swi r11, r1, PTO+PT_R1; /* Store user SP. */
890 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
891 /* Save away the syscall number. */
892 swi r0, r1, PTO+PT_R0;
896 addi r5, r0, SIGTRAP /* send the trap signal */
897 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
898 addk r7, r0, r0 /* 3rd param zero */
899 dbtrap_call: rtbd r0, send_sig;
900 addik r15, r0, dbtrap_call;
902 set_bip; /* Ints masked for state restore*/
903 lwi r11, r1, PTO+PT_MODE;
906 /* Get current task ptr into r11 */
907 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
908 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
909 andi r11, r11, _TIF_NEED_RESCHED;
912 /* Call the scheduler before returning from a syscall/trap. */
914 bralid r15, schedule; /* Call scheduler */
915 nop; /* delay slot */
916 /* XXX Is PT_DTRACE handling needed here? */
917 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
919 /* Maybe handle a signal */
920 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
921 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
922 andi r11, r11, _TIF_SIGPENDING;
923 beqi r11, 1f; /* Signals to handle, handle them */
925 /* Handle a signal return; Pending signals should be in r18. */
926 /* Not all registers are saved by the normal trap/interrupt entry
927 points (for instance, call-saved registers (because the normal
928 C-compiler calling sequence in the kernel makes sure they're
929 preserved), and call-clobbered registers in the case of
930 traps), but signal handlers may want to examine or change the
931 complete register state. Here we save anything not saved by
932 the normal entry sequence, so that it may be safely restored
933 (in a possibly modified form) after do_signal returns. */
935 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
936 addi r7, r0, 0; /* Arg 3: int in_syscall */
937 bralid r15, do_signal; /* Handle any signals */
938 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
941 /* Finally, return to user state. */
943 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
948 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
951 lwi r1, r1, PT_R1 - PT_SIZE;
952 /* Restore user stack pointer. */
955 /* Return to kernel state. */
959 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
963 DBTRAP_return: /* Make global symbol for debugging */
964 rtbd r14, 0; /* Instructions to return from an IRQ */
970 /* prepare return value */
971 addk r3, r0, CURRENT_TASK
973 /* save registers in cpu_context */
974 /* use r11 and r12, volatile registers, as temp register */
975 /* give start of cpu_context for previous process */
976 addik r11, r5, TI_CPU_CONTEXT
979 /* skip volatile registers.
980 * they are saved on stack when we jumped to _switch_to() */
981 /* dedicated registers */
988 /* save non-volatile registers */
1000 swi r30, r11, CC_R30
1001 /* special purpose registers */
1004 swi r12, r11, CC_MSR
1007 swi r12, r11, CC_EAR
1010 swi r12, r11, CC_ESR
1013 swi r12, r11, CC_FSR
1015 /* update r31, the current-give me pointer to task which will be next */
1016 lwi CURRENT_TASK, r6, TI_TASK
1017 /* stored it to current_save too */
1018 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
1020 /* get new process' cpu context and restore */
1021 /* give me start where start context of next task */
1022 addik r11, r6, TI_CPU_CONTEXT
1024 /* non-volatile registers */
1025 lwi r30, r11, CC_R30
1026 lwi r29, r11, CC_R29
1027 lwi r28, r11, CC_R28
1028 lwi r27, r11, CC_R27
1029 lwi r26, r11, CC_R26
1030 lwi r25, r11, CC_R25
1031 lwi r24, r11, CC_R24
1032 lwi r23, r11, CC_R23
1033 lwi r22, r11, CC_R22
1034 lwi r21, r11, CC_R21
1035 lwi r20, r11, CC_R20
1036 lwi r19, r11, CC_R19
1037 /* dedicated registers */
1038 lwi r18, r11, CC_R18
1039 lwi r17, r11, CC_R17
1040 lwi r16, r11, CC_R16
1041 lwi r15, r11, CC_R15
1042 lwi r14, r11, CC_R14
1043 lwi r13, r11, CC_R13
1044 /* skip volatile registers */
1048 /* special purpose registers */
1049 lwi r12, r11, CC_FSR
1052 lwi r12, r11, CC_MSR
1060 brai 0x70; /* Jump back to FS-boot */
1065 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1068 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1071 /* These are compiled and loaded into high memory, then
1072 * copied into place in mach_early_setup */
1073 .section .init.ivt, "ax"
1075 /* this is very important - here is the reset vector */
1076 /* in current MMU branch you don't care what is here - it is
1077 * used from bootloader site - but this is correct for FS-BOOT */
1080 brai TOPHYS(_user_exception); /* syscall handler */
1081 brai TOPHYS(_interrupt); /* Interrupt handler */
1082 brai TOPHYS(_break); /* nmi trap handler */
1083 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1086 brai TOPHYS(_debug_exception); /* debug trap handler*/
1088 .section .rodata,"a"
1089 #include "syscall_table.S"
1091 syscall_table_size=(.-sys_call_table)
1098 .ascii "IRQ (PREEMPTED)\0"
1099 type_SYSCALL_PREEMPT:
1100 .ascii " SYSCALL (PREEMPTED)\0"
1103 * Trap decoding for stack unwinder
1104 * Tuples are (start addr, end addr, string)
1105 * If return address lies on [start addr, end addr],
1106 * unwinder displays 'string'
1110 .global microblaze_trap_handlers
1111 microblaze_trap_handlers:
1112 /* Exact matches come first */
1113 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1114 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1115 /* Fuzzy matches go here */
1116 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1117 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1119 .word 0 ; .word 0 ; .word 0