2 * Low-level system-call handling, trap handlers and context-switching
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
18 #include <linux/sys.h>
19 #include <linux/linkage.h>
21 #include <asm/entry.h>
22 #include <asm/current.h>
23 #include <asm/processor.h>
24 #include <asm/exceptions.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/thread_info.h>
29 #include <asm/unistd.h>
31 #include <linux/errno.h>
32 #include <asm/signal.h>
36 /* The size of a state save frame. */
37 #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
39 /* The offset of the struct pt_regs in a `state save frame' on the stack. */
40 #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
42 #define C_ENTRY(name) .globl name; .align 4; name
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
49 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
100 msrclr r11, MSR_VMS | MSR_UMS
107 andi r11, r11, ~MSR_BIP
115 ori r11, r11, MSR_BIP
123 andi r11, r11, ~MSR_EIP
139 andi r11, r11, ~MSR_IE
155 ori r11, r11, MSR_VMS
156 andni r11, r11, MSR_UMS
164 ori r11, r11, MSR_VMS
165 andni r11, r11, MSR_UMS
173 andni r11, r11, MSR_UMS
181 andni r11, r11, (MSR_VMS|MSR_UMS)
187 /* Define how to call high-level functions. With MMU, virtual mode must be
188 * enabled when calling the high-level function. Clobbers R11.
189 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
192 /* turn on virtual protected mode save */
199 /* turn off virtual protected mode save and user mode save*/
202 rted r0, TOPHYS(1f); \
207 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
208 swi r3, r1, PTO+PT_R3; \
209 swi r4, r1, PTO+PT_R4; \
210 swi r5, r1, PTO+PT_R5; \
211 swi r6, r1, PTO+PT_R6; \
212 swi r7, r1, PTO+PT_R7; \
213 swi r8, r1, PTO+PT_R8; \
214 swi r9, r1, PTO+PT_R9; \
215 swi r10, r1, PTO+PT_R10; \
216 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
217 swi r12, r1, PTO+PT_R12; \
218 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
219 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
220 swi r15, r1, PTO+PT_R15; /* Save LP */ \
221 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
222 swi r19, r1, PTO+PT_R19; \
223 swi r20, r1, PTO+PT_R20; \
224 swi r21, r1, PTO+PT_R21; \
225 swi r22, r1, PTO+PT_R22; \
226 swi r23, r1, PTO+PT_R23; \
227 swi r24, r1, PTO+PT_R24; \
228 swi r25, r1, PTO+PT_R25; \
229 swi r26, r1, PTO+PT_R26; \
230 swi r27, r1, PTO+PT_R27; \
231 swi r28, r1, PTO+PT_R28; \
232 swi r29, r1, PTO+PT_R29; \
233 swi r30, r1, PTO+PT_R30; \
234 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
235 mfs r11, rmsr; /* save MSR */ \
237 swi r11, r1, PTO+PT_MSR;
239 #define RESTORE_REGS \
240 lwi r11, r1, PTO+PT_MSR; \
243 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
244 lwi r3, r1, PTO+PT_R3; \
245 lwi r4, r1, PTO+PT_R4; \
246 lwi r5, r1, PTO+PT_R5; \
247 lwi r6, r1, PTO+PT_R6; \
248 lwi r7, r1, PTO+PT_R7; \
249 lwi r8, r1, PTO+PT_R8; \
250 lwi r9, r1, PTO+PT_R9; \
251 lwi r10, r1, PTO+PT_R10; \
252 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
253 lwi r12, r1, PTO+PT_R12; \
254 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
255 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
256 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
257 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
258 lwi r19, r1, PTO+PT_R19; \
259 lwi r20, r1, PTO+PT_R20; \
260 lwi r21, r1, PTO+PT_R21; \
261 lwi r22, r1, PTO+PT_R22; \
262 lwi r23, r1, PTO+PT_R23; \
263 lwi r24, r1, PTO+PT_R24; \
264 lwi r25, r1, PTO+PT_R25; \
265 lwi r26, r1, PTO+PT_R26; \
266 lwi r27, r1, PTO+PT_R27; \
267 lwi r28, r1, PTO+PT_R28; \
268 lwi r29, r1, PTO+PT_R29; \
269 lwi r30, r1, PTO+PT_R30; \
270 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
277 * System calls are handled here.
280 * Syscall number in r12, args in r5-r10
283 * Trap entered via brki instruction, so BIP bit is set, and interrupts
284 * are masked. This is nice, means we don't have to CLI before state save
286 C_ENTRY(_user_exception):
287 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
288 addi r14, r14, 4 /* return address is 4 byte after call */
295 /* Kernel-mode state save - kernel execve */
296 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
299 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
302 swi r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */
304 nop; /* Fill delay slot */
306 /* User-mode state save. */
308 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
310 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
311 /* calculate kernel stack pointer from task struct 8k */
312 addik r1, r1, THREAD_SIZE;
315 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
318 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
319 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
320 swi r11, r1, PTO+PT_R1; /* Store user SP. */
321 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
322 /* Save away the syscall number. */
323 swi r12, r1, PTO+PT_R0;
326 /* where the trap should return need -8 to adjust for rtsd r15, 8*/
327 /* Jump to the appropriate function for the system call number in r12
328 * (r12 is not preserved), or return an error if r12 is not valid. The LP
329 * register should point to the location where
330 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
332 # Step into virtual mode.
338 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
339 lwi r11, r11, TI_FLAGS /* get flags in thread info */
340 andi r11, r11, _TIF_WORK_SYSCALL_MASK
343 addik r3, r0, -ENOSYS
344 swi r3, r1, PTO + PT_R3
345 brlid r15, do_syscall_trace_enter
346 addik r5, r1, PTO + PT_R0
348 # do_syscall_trace_enter returns the new syscall nr.
350 lwi r5, r1, PTO+PT_R5;
351 lwi r6, r1, PTO+PT_R6;
352 lwi r7, r1, PTO+PT_R7;
353 lwi r8, r1, PTO+PT_R8;
354 lwi r9, r1, PTO+PT_R9;
355 lwi r10, r1, PTO+PT_R10;
357 /* Jump to the appropriate function for the system call number in r12
358 * (r12 is not preserved), or return an error if r12 is not valid.
359 * The LP register should point to the location where the called function
360 * should return. [note that MAKE_SYS_CALL uses label 1] */
361 /* See if the system call number is valid */
362 addi r11, r12, -__NR_syscalls;
364 /* Figure out which function to use for this system call. */
365 /* Note Microblaze barrel shift is optional, so don't rely on it */
366 add r12, r12, r12; /* convert num -> ptr */
370 /* Trac syscalls and stored them to r0_ram */
371 lwi r3, r12, 0x400 + r0_ram
373 swi r3, r12, 0x400 + r0_ram
376 # Find and jump into the syscall handler.
377 lwi r12, r12, sys_call_table
378 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
379 addi r15, r0, ret_from_trap-8
382 /* The syscall number is invalid, return an error. */
384 addi r3, r0, -ENOSYS;
385 rtsd r15,8; /* looks like a normal subroutine return */
389 /* Entry point used to return from a syscall/trap */
390 /* We re-enable BIP bit before state restore */
391 C_ENTRY(ret_from_trap):
392 swi r3, r1, PTO + PT_R3
393 swi r4, r1, PTO + PT_R4
395 lwi r11, r1, PTO + PT_MODE;
396 /* See if returning to kernel mode, if so, skip resched &c. */
398 /* We're returning to user mode, so check for various conditions that
399 * trigger rescheduling. */
400 /* FIXME: Restructure all these flag checks. */
401 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
402 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
403 andi r11, r11, _TIF_WORK_SYSCALL_MASK
406 brlid r15, do_syscall_trace_leave
407 addik r5, r1, PTO + PT_R0
409 /* We're returning to user mode, so check for various conditions that
410 * trigger rescheduling. */
411 /* get thread info from current task */
412 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
413 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
414 andi r11, r11, _TIF_NEED_RESCHED;
417 bralid r15, schedule; /* Call scheduler */
418 nop; /* delay slot */
420 /* Maybe handle a signal */
421 5: /* get thread info from current task*/
422 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
423 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
424 andi r11, r11, _TIF_SIGPENDING;
425 beqi r11, 1f; /* Signals to handle, handle them */
427 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
428 addi r7, r0, 1; /* Arg 3: int in_syscall */
429 bralid r15, do_signal; /* Handle any signals */
430 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
432 /* Finally, return to user state. */
433 1: set_bip; /* Ints masked for state restore */
434 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
438 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
439 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
442 /* Return to kernel state. */
443 2: set_bip; /* Ints masked for state restore */
447 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
450 TRAP_return: /* Make global symbol for debugging */
451 rtbd r14, 0; /* Instructions to return from an IRQ */
455 /* These syscalls need access to the struct pt_regs on the stack, so we
456 implement them in assembly (they're basically all wrappers anyway). */
458 C_ENTRY(sys_fork_wrapper):
459 addi r5, r0, SIGCHLD /* Arg 0: flags */
460 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
461 addik r7, r1, PTO /* Arg 2: parent context */
462 add r8. r0, r0 /* Arg 3: (unused) */
463 add r9, r0, r0; /* Arg 4: (unused) */
464 add r10, r0, r0; /* Arg 5: (unused) */
465 brid do_fork /* Do real work (tail-call) */
468 /* This the initial entry point for a new child thread, with an appropriate
469 stack in place that makes it look the the child is in the middle of an
470 syscall. This function is actually `returned to' from switch_thread
471 (copy_thread makes ret_from_fork the return address in each new thread's
473 C_ENTRY(ret_from_fork):
474 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
475 add r3, r5, r0; /* switch_thread returns the prev task */
476 /* ( in the delay slot ) */
477 add r3, r0, r0; /* Child's fork call should return 0. */
478 brid ret_from_trap; /* Do normal trap return */
482 brid microblaze_vfork /* Do real work (tail-call) */
486 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
487 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
488 1: addik r7, r1, PTO; /* Arg 2: parent context */
489 add r8, r0, r0; /* Arg 3: (unused) */
490 add r9, r0, r0; /* Arg 4: (unused) */
491 add r10, r0, r0; /* Arg 5: (unused) */
492 brid do_fork /* Do real work (tail-call) */
496 addik r8, r1, PTO; /* add user context as 4th arg */
497 brid microblaze_execve; /* Do real work (tail-call).*/
500 C_ENTRY(sys_rt_sigreturn_wrapper):
501 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
502 swi r4, r1, PTO+PT_R4;
503 addik r5, r1, PTO; /* add user context as 1st arg */
504 brlid r15, sys_rt_sigreturn /* Do real work */
506 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
507 lwi r4, r1, PTO+PT_R4;
508 bri ret_from_trap /* fall through will not work here due to align */
512 * HW EXCEPTION rutine start
516 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
517 /* See if already in kernel mode.*/ \
520 andi r1, r1, MSR_UMS; \
522 /* Kernel-mode state save. */ \
523 /* Reload kernel stack-ptr. */ \
524 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
526 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
528 swi r1, r1, PTO+PT_MODE; \
530 nop; /* Fill delay slot */ \
531 1: /* User-mode state save. */ \
532 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
534 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
535 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
537 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
539 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \
540 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
541 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
542 /* MS: I am clearing UMS even in case when I come from kernel space */ \
544 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
546 C_ENTRY(full_exception_trap):
547 /* adjust exception address for privileged instruction
548 * for finding where is it */
550 SAVE_STATE /* Save registers */
551 /* PC, before IRQ/trap - this is one instruction above */
552 swi r17, r1, PTO+PT_PC;
554 /* FIXME this can be store directly in PT_ESR reg.
555 * I tested it but there is a fault */
556 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
557 addik r15, r0, ret_from_exc - 8
558 addik r5, r1, PTO /* parameter struct pt_regs * regs */
561 mfs r7, rfsr; /* save FSR */
563 mts rfsr, r0; /* Clear sticky fsr */
565 rted r0, full_exception
569 * Unaligned data trap.
571 * Unaligned data trap last on 4k page is handled here.
573 * Trap entered via exception, so EE bit is set, and interrupts
574 * are masked. This is nice, means we don't have to CLI before state save
576 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
578 C_ENTRY(unaligned_data_trap):
579 /* MS: I have to save r11 value and then restore it because
580 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
581 * instructions are not used. We don't need to do if MSR instructions
582 * are used and they use r0 instead of r11.
583 * I am using ENTRY_SP which should be primary used only for stack
585 swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
586 set_bip; /* equalize initial state for all possible entries */
589 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
590 SAVE_STATE /* Save registers.*/
591 /* PC, before IRQ/trap - this is one instruction above */
592 swi r17, r1, PTO+PT_PC;
594 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
595 addik r15, r0, ret_from_exc-8
596 mfs r3, resr /* ESR */
598 mfs r4, rear /* EAR */
600 rtbd r0, _unaligned_data_exception
601 addik r7, r1, PTO /* parameter struct pt_regs * regs */
606 * If the real exception handler (from hw_exception_handler.S) didn't find
607 * the mapping for the process, then we're thrown here to handle such situation.
609 * Trap entered via exceptions, so EE bit is set, and interrupts
610 * are masked. This is nice, means we don't have to CLI before state save
612 * Build a standard exception frame for TLB Access errors. All TLB exceptions
613 * will bail out to this point if they can't resolve the lightweight TLB fault.
615 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
616 * void do_page_fault(struct pt_regs *regs,
617 * unsigned long address,
618 * unsigned long error_code)
620 /* data and intruction trap - which is choose is resolved int fault.c */
621 C_ENTRY(page_fault_data_trap):
622 SAVE_STATE /* Save registers.*/
623 /* PC, before IRQ/trap - this is one instruction above */
624 swi r17, r1, PTO+PT_PC;
626 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
627 addik r15, r0, ret_from_exc-8
628 addik r5, r1, PTO /* parameter struct pt_regs * regs */
629 mfs r6, rear /* parameter unsigned long address */
631 mfs r7, resr /* parameter unsigned long error_code */
633 rted r0, do_page_fault
636 C_ENTRY(page_fault_instr_trap):
637 SAVE_STATE /* Save registers.*/
638 /* PC, before IRQ/trap - this is one instruction above */
639 swi r17, r1, PTO+PT_PC;
641 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
642 addik r15, r0, ret_from_exc-8
643 addik r5, r1, PTO /* parameter struct pt_regs * regs */
644 mfs r6, rear /* parameter unsigned long address */
646 rted r0, do_page_fault
647 ori r7, r0, 0 /* parameter unsigned long error_code */
649 /* Entry point used to return from an exception. */
650 C_ENTRY(ret_from_exc):
651 lwi r11, r1, PTO + PT_MODE;
652 bnei r11, 2f; /* See if returning to kernel mode, */
653 /* ... if so, skip resched &c. */
655 /* We're returning to user mode, so check for various conditions that
656 trigger rescheduling. */
657 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
658 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
659 andi r11, r11, _TIF_NEED_RESCHED;
662 /* Call the scheduler before returning from a syscall/trap. */
663 bralid r15, schedule; /* Call scheduler */
664 nop; /* delay slot */
666 /* Maybe handle a signal */
667 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
668 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
669 andi r11, r11, _TIF_SIGPENDING;
670 beqi r11, 1f; /* Signals to handle, handle them */
673 * Handle a signal return; Pending signals should be in r18.
675 * Not all registers are saved by the normal trap/interrupt entry
676 * points (for instance, call-saved registers (because the normal
677 * C-compiler calling sequence in the kernel makes sure they're
678 * preserved), and call-clobbered registers in the case of
679 * traps), but signal handlers may want to examine or change the
680 * complete register state. Here we save anything not saved by
681 * the normal entry sequence, so that it may be safely restored
682 * (in a possibly modified form) after do_signal returns. */
683 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
684 addi r7, r0, 0; /* Arg 3: int in_syscall */
685 bralid r15, do_signal; /* Handle any signals */
686 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
688 /* Finally, return to user state. */
689 1: set_bip; /* Ints masked for state restore */
690 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
695 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
697 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
699 /* Return to kernel state. */
700 2: set_bip; /* Ints masked for state restore */
704 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
708 EXC_return: /* Make global symbol for debugging */
709 rtbd r14, 0; /* Instructions to return from an IRQ */
713 * HW EXCEPTION rutine end
717 * Hardware maskable interrupts.
719 * The stack-pointer (r1) should have already been saved to the memory
720 * location PER_CPU(ENTRY_SP).
723 /* MS: we are in physical address */
724 /* Save registers, switch to proper stack, convert SP to virtual.*/
725 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
726 /* MS: See if already in kernel mode. */
732 /* Kernel-mode state save. */
733 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
734 tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
736 /* MS: Make room on the stack -> activation record */
737 addik r1, r1, -STATE_SAVE_SIZE;
739 swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */
741 nop; /* MS: Fill delay slot */
744 /* User-mode state save. */
745 /* MS: get the saved current */
746 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
748 lwi r1, r1, TS_THREAD_INFO;
749 addik r1, r1, THREAD_SIZE;
752 addik r1, r1, -STATE_SAVE_SIZE;
755 swi r0, r1, PTO + PT_MODE;
756 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
757 swi r11, r1, PTO+PT_R1;
759 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
763 addik r11, r0, do_IRQ;
764 addik r15, r0, irq_call;
765 irq_call:rtbd r11, 0;
768 /* MS: we are in virtual mode */
770 lwi r11, r1, PTO + PT_MODE;
773 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
774 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
775 andi r11, r11, _TIF_NEED_RESCHED;
777 bralid r15, schedule;
778 nop; /* delay slot */
780 /* Maybe handle a signal */
781 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
782 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
783 andi r11, r11, _TIF_SIGPENDING;
784 beqid r11, no_intr_resched
785 /* Handle a signal return; Pending signals should be in r18. */
786 addi r7, r0, 0; /* Arg 3: int in_syscall */
787 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
788 bralid r15, do_signal; /* Handle any signals */
789 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
791 /* Finally, return to user state. */
793 /* Disable interrupts, we are now committed to the state restore */
795 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
799 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
800 lwi r1, r1, PT_R1 - PT_SIZE;
802 /* MS: Return to kernel state. */
804 #ifdef CONFIG_PREEMPT
805 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
806 /* MS: get preempt_count from thread info */
807 lwi r5, r11, TI_PREEMPT_COUNT;
810 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
811 andi r5, r5, _TIF_NEED_RESCHED;
812 beqi r5, restore /* if zero jump over */
815 /* interrupts are off that's why I am calling preempt_chedule_irq */
816 bralid r15, preempt_schedule_irq
818 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
819 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
820 andi r5, r5, _TIF_NEED_RESCHED;
821 bnei r5, preempt /* if non zero jump to resched */
824 VM_OFF /* MS: turn off MMU */
827 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
830 IRQ_return: /* MS: Make global symbol for debugging */
836 * We enter dbtrap in "BIP" (breakpoint) mode.
837 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
839 * however, wait to save state first
841 C_ENTRY(_debug_exception):
842 /* BIP bit is set on entry, no interrupts can occur */
843 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
849 /* Kernel-mode state save. */
850 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
853 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
856 swi r1, r1, PTO + PT_MODE;
858 nop; /* Fill delay slot */
859 1: /* User-mode state save. */
860 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
862 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
863 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
866 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
869 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
870 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
871 swi r11, r1, PTO+PT_R1; /* Store user SP. */
876 addi r5, r0, SIGTRAP /* send the trap signal */
877 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
878 addk r7, r0, r0 /* 3rd param zero */
879 dbtrap_call: rtbd r0, send_sig;
880 addik r15, r0, dbtrap_call;
882 set_bip; /* Ints masked for state restore*/
883 lwi r11, r1, PTO + PT_MODE;
886 /* Get current task ptr into r11 */
887 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
888 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
889 andi r11, r11, _TIF_NEED_RESCHED;
892 /* Call the scheduler before returning from a syscall/trap. */
894 bralid r15, schedule; /* Call scheduler */
895 nop; /* delay slot */
896 /* XXX Is PT_DTRACE handling needed here? */
897 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
899 /* Maybe handle a signal */
900 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
901 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
902 andi r11, r11, _TIF_SIGPENDING;
903 beqi r11, 1f; /* Signals to handle, handle them */
905 /* Handle a signal return; Pending signals should be in r18. */
906 /* Not all registers are saved by the normal trap/interrupt entry
907 points (for instance, call-saved registers (because the normal
908 C-compiler calling sequence in the kernel makes sure they're
909 preserved), and call-clobbered registers in the case of
910 traps), but signal handlers may want to examine or change the
911 complete register state. Here we save anything not saved by
912 the normal entry sequence, so that it may be safely restored
913 (in a possibly modified form) after do_signal returns. */
915 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
916 addi r7, r0, 0; /* Arg 3: int in_syscall */
917 bralid r15, do_signal; /* Handle any signals */
918 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
921 /* Finally, return to user state. */
923 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
928 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
931 lwi r1, r1, PT_R1 - PT_SIZE;
932 /* Restore user stack pointer. */
935 /* Return to kernel state. */
939 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
943 DBTRAP_return: /* Make global symbol for debugging */
944 rtbd r14, 0; /* Instructions to return from an IRQ */
950 /* prepare return value */
951 addk r3, r0, CURRENT_TASK
953 /* save registers in cpu_context */
954 /* use r11 and r12, volatile registers, as temp register */
955 /* give start of cpu_context for previous process */
956 addik r11, r5, TI_CPU_CONTEXT
959 /* skip volatile registers.
960 * they are saved on stack when we jumped to _switch_to() */
961 /* dedicated registers */
968 /* save non-volatile registers */
981 /* special purpose registers */
995 /* update r31, the current-give me pointer to task which will be next */
996 lwi CURRENT_TASK, r6, TI_TASK
997 /* stored it to current_save too */
998 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
1000 /* get new process' cpu context and restore */
1001 /* give me start where start context of next task */
1002 addik r11, r6, TI_CPU_CONTEXT
1004 /* non-volatile registers */
1005 lwi r30, r11, CC_R30
1006 lwi r29, r11, CC_R29
1007 lwi r28, r11, CC_R28
1008 lwi r27, r11, CC_R27
1009 lwi r26, r11, CC_R26
1010 lwi r25, r11, CC_R25
1011 lwi r24, r11, CC_R24
1012 lwi r23, r11, CC_R23
1013 lwi r22, r11, CC_R22
1014 lwi r21, r11, CC_R21
1015 lwi r20, r11, CC_R20
1016 lwi r19, r11, CC_R19
1017 /* dedicated registers */
1018 lwi r18, r11, CC_R18
1019 lwi r17, r11, CC_R17
1020 lwi r16, r11, CC_R16
1021 lwi r15, r11, CC_R15
1022 lwi r14, r11, CC_R14
1023 lwi r13, r11, CC_R13
1024 /* skip volatile registers */
1028 /* special purpose registers */
1029 lwi r12, r11, CC_FSR
1032 lwi r12, r11, CC_MSR
1040 brai 0x70; /* Jump back to FS-boot */
1045 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1048 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1051 /* These are compiled and loaded into high memory, then
1052 * copied into place in mach_early_setup */
1053 .section .init.ivt, "ax"
1055 /* this is very important - here is the reset vector */
1056 /* in current MMU branch you don't care what is here - it is
1057 * used from bootloader site - but this is correct for FS-BOOT */
1060 brai TOPHYS(_user_exception); /* syscall handler */
1061 brai TOPHYS(_interrupt); /* Interrupt handler */
1062 brai TOPHYS(_break); /* nmi trap handler */
1063 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1066 brai TOPHYS(_debug_exception); /* debug trap handler*/
1068 .section .rodata,"a"
1069 #include "syscall_table.S"
1071 syscall_table_size=(.-sys_call_table)
1078 .ascii "IRQ (PREEMPTED)\0"
1079 type_SYSCALL_PREEMPT:
1080 .ascii " SYSCALL (PREEMPTED)\0"
1083 * Trap decoding for stack unwinder
1084 * Tuples are (start addr, end addr, string)
1085 * If return address lies on [start addr, end addr],
1086 * unwinder displays 'string'
1090 .global microblaze_trap_handlers
1091 microblaze_trap_handlers:
1092 /* Exact matches come first */
1093 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1094 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1095 /* Fuzzy matches go here */
1096 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1097 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1099 .word 0 ; .word 0 ; .word 0