2 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
4 * This file contains the lowest level x86-specific interrupt
5 * entry, irq-stacks and irq statistics code. All the remaining
6 * irq logic is done by the generic kernel/irq/ code and
7 * by the x86-specific irq controller code. (e.g. i8259.c and
11 #include <linux/seq_file.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/notifier.h>
15 #include <linux/cpu.h>
16 #include <linux/delay.h>
17 #include <linux/uaccess.h>
18 #include <linux/percpu.h>
23 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25 int sysctl_panic_on_stackoverflow __read_mostly;
27 /* Debugging check for stack overflow: is there less than 1KB free? */
28 static int check_stack_overflow(void)
32 __asm__ __volatile__("andl %%esp,%0" :
33 "=r" (sp) : "0" (THREAD_SIZE - 1));
35 return sp < (sizeof(struct thread_info) + STACK_WARN);
38 static void print_stack_overflow(void)
40 printk(KERN_WARNING "low stack detected by irq handler\n");
42 if (sysctl_panic_on_stackoverflow)
43 panic("low stack detected by irq handler - check messages\n");
47 static inline int check_stack_overflow(void) { return 0; }
48 static inline void print_stack_overflow(void) { }
51 DEFINE_PER_CPU(struct irq_stack *, hardirq_stack);
52 DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
54 static void call_on_stack(void *func, void *stack)
56 asm volatile("xchgl %%ebx,%%esp \n"
62 : "memory", "cc", "edx", "ecx", "eax");
65 static inline void *current_stack(void)
67 return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
70 static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
72 struct irq_stack *curstk, *irqstk;
73 u32 *isp, *prev_esp, arg1;
75 curstk = (struct irq_stack *) current_stack();
76 irqstk = __this_cpu_read(hardirq_stack);
79 * this is where we switch to the IRQ stack. However, if we are
80 * already using the IRQ stack (because we interrupted a hardirq
81 * handler) we can't do that and just have to keep using the
82 * current stack (which is the irq stack already after all)
84 if (unlikely(curstk == irqstk))
87 isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
89 /* Save the next esp at the bottom of the stack */
90 prev_esp = (u32 *)irqstk;
91 *prev_esp = current_stack_pointer();
93 if (unlikely(overflow))
94 call_on_stack(print_stack_overflow, isp);
96 asm volatile("xchgl %%ebx,%%esp \n"
99 : "=a" (arg1), "=b" (isp)
100 : "0" (desc), "1" (isp),
101 "D" (desc->handle_irq)
102 : "memory", "cc", "ecx");
107 * allocate per-cpu stacks for hardirq and for softirq processing
109 void irq_ctx_init(int cpu)
111 struct irq_stack *irqstk;
113 if (per_cpu(hardirq_stack, cpu))
116 irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
119 per_cpu(hardirq_stack, cpu) = irqstk;
121 irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
124 per_cpu(softirq_stack, cpu) = irqstk;
126 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
127 cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
130 void do_softirq_own_stack(void)
132 struct irq_stack *irqstk;
135 irqstk = __this_cpu_read(softirq_stack);
137 /* build the stack frame on the softirq stack */
138 isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
140 /* Push the previous esp onto the stack */
141 prev_esp = (u32 *)irqstk;
142 *prev_esp = current_stack_pointer();
144 call_on_stack(__do_softirq, isp);
147 bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
149 int overflow = check_stack_overflow();
151 if (IS_ERR_OR_NULL(desc))
154 if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) {
155 if (unlikely(overflow))
156 print_stack_overflow();
157 generic_handle_irq_desc(desc);