4 #include <linux/thread_info.h>
5 #include <linux/sched.h>
6 #include <asm/ptrace.h>
9 /* SP must be STACK_BIAS adjusted already. */
10 static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
12 unsigned long base = (unsigned long) tp;
14 /* Stack pointer must be 16-byte aligned. */
18 if (sp >= (base + sizeof(struct thread_info)) &&
19 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
22 if (hardirq_stack[tp->cpu]) {
23 base = (unsigned long) hardirq_stack[tp->cpu];
25 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
27 base = (unsigned long) softirq_stack[tp->cpu];
29 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
35 /* Does "regs" point to a valid pt_regs trap frame? */
36 static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *regs)
38 unsigned long base = (unsigned long) tp;
39 unsigned long addr = (unsigned long) regs;
42 addr <= (base + THREAD_SIZE - sizeof(*regs)))
45 if (hardirq_stack[tp->cpu]) {
46 base = (unsigned long) hardirq_stack[tp->cpu];
48 addr <= (base + THREAD_SIZE - sizeof(*regs)))
50 base = (unsigned long) softirq_stack[tp->cpu];
52 addr <= (base + THREAD_SIZE - sizeof(*regs)))
58 if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC)
64 static inline __attribute__((always_inline)) void *set_hardirq_stack(void)
66 void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
68 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
70 orig_sp > (sp + THREAD_SIZE)) {
71 sp += THREAD_SIZE - 192 - STACK_BIAS;
72 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
78 static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
80 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
83 #endif /* _KSTACK_H */