2 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
4 * This file contains the lowest level x86-specific interrupt
5 * entry, irq-stacks and irq statistics code. All the remaining
6 * irq logic is done by the generic kernel/irq/ code and
7 * by the x86-specific irq controller code. (e.g. i8259.c and
11 #include <linux/module.h>
12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/notifier.h>
16 #include <linux/cpu.h>
17 #include <linux/delay.h>
18 #include <linux/uaccess.h>
19 #include <linux/percpu.h>
24 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
25 EXPORT_PER_CPU_SYMBOL(irq_stat);
27 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
28 EXPORT_PER_CPU_SYMBOL(irq_regs);
30 #ifdef CONFIG_DEBUG_STACKOVERFLOW
32 int sysctl_panic_on_stackoverflow __read_mostly;
34 /* Debugging check for stack overflow: is there less than 1KB free? */
35 static int check_stack_overflow(void)
39 __asm__ __volatile__("andl %%esp,%0" :
40 "=r" (sp) : "0" (THREAD_SIZE - 1));
42 return sp < (sizeof(struct thread_info) + STACK_WARN);
45 static void print_stack_overflow(void)
47 printk(KERN_WARNING "low stack detected by irq handler\n");
49 if (sysctl_panic_on_stackoverflow)
50 panic("low stack detected by irq handler - check messages\n");
54 static inline int check_stack_overflow(void) { return 0; }
55 static inline void print_stack_overflow(void) { }
58 DEFINE_PER_CPU(struct irq_stack *, hardirq_stack);
59 DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
61 static void call_on_stack(void *func, void *stack)
63 asm volatile("xchgl %%ebx,%%esp \n"
69 : "memory", "cc", "edx", "ecx", "eax");
72 static inline void *current_stack(void)
74 return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
78 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
80 struct irq_stack *curstk, *irqstk;
81 u32 *isp, *prev_esp, arg1, arg2;
83 curstk = (struct irq_stack *) current_stack();
84 irqstk = __this_cpu_read(hardirq_stack);
87 * this is where we switch to the IRQ stack. However, if we are
88 * already using the IRQ stack (because we interrupted a hardirq
89 * handler) we can't do that and just have to keep using the
90 * current stack (which is the irq stack already after all)
92 if (unlikely(curstk == irqstk))
95 isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
97 /* Save the next esp at the bottom of the stack */
98 prev_esp = (u32 *)irqstk;
99 *prev_esp = current_stack_pointer();
101 if (unlikely(overflow))
102 call_on_stack(print_stack_overflow, isp);
104 asm volatile("xchgl %%ebx,%%esp \n"
106 "movl %%ebx,%%esp \n"
107 : "=a" (arg1), "=d" (arg2), "=b" (isp)
108 : "0" (irq), "1" (desc), "2" (isp),
109 "D" (desc->handle_irq)
110 : "memory", "cc", "ecx");
115 * allocate per-cpu stacks for hardirq and for softirq processing
117 void irq_ctx_init(int cpu)
119 struct irq_stack *irqstk;
121 if (per_cpu(hardirq_stack, cpu))
124 irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
127 per_cpu(hardirq_stack, cpu) = irqstk;
129 irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
132 per_cpu(softirq_stack, cpu) = irqstk;
134 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
135 cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
138 void do_softirq_own_stack(void)
140 struct thread_info *curstk;
141 struct irq_stack *irqstk;
144 curstk = current_stack();
145 irqstk = __this_cpu_read(softirq_stack);
147 /* build the stack frame on the softirq stack */
148 isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
150 /* Push the previous esp onto the stack */
151 prev_esp = (u32 *)irqstk;
152 *prev_esp = current_stack_pointer();
154 call_on_stack(__do_softirq, isp);
157 bool handle_irq(unsigned irq, struct pt_regs *regs)
159 struct irq_desc *desc;
162 overflow = check_stack_overflow();
164 desc = irq_to_desc(irq);
168 if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
169 if (unlikely(overflow))
170 print_stack_overflow();
171 desc->handle_irq(irq, desc);