#include <linux/random.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <linux/rculist.h>
+#include <linux/hash.h>
#include "internals.h"
-#ifdef CONFIG_TRACE_IRQFLAGS
-
/*
* lockdep: we want to handle all irq_desc locks as a single lock-class:
*/
-static struct lock_class_key irq_desc_lock_class;
-#endif
+struct lock_class_key irq_desc_lock_class;
/**
* handle_bad_irq - handle spurious and unhandled irqs
*
* Handles spurious and unhandled IRQ's. It also prints a debugmessage.
*/
-void
-handle_bad_irq(unsigned int irq, struct irq_desc *desc)
+void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
{
print_irq_desc(irq, desc);
- kstat_this_cpu.irqs[irq]++;
+ kstat_incr_irqs_this_cpu(irq, desc);
ack_bad_irq(irq);
}
int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs);
-#ifdef CONFIG_HAVE_DYN_ARRAY
+void __init __attribute__((weak)) arch_early_irq_init(void)
+{
+}
+
+#ifdef CONFIG_SPARSE_IRQ
static struct irq_desc irq_desc_init = {
- .irq = -1U,
- .status = IRQ_DISABLED,
- .chip = &no_irq_chip,
+ .irq = -1,
+ .status = IRQ_DISABLED,
+ .chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
- .depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+ .depth = 1,
+ .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
#ifdef CONFIG_SMP
- .affinity = CPU_MASK_ALL
+ .affinity = CPU_MASK_ALL
#endif
};
-
-static void init_one_irq_desc(struct irq_desc *desc)
+void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
{
- memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
-#ifdef CONFIG_TRACE_IRQFLAGS
- lockdep_set_class(&desc->lock, &irq_desc_lock_class);
-#endif
-}
+ unsigned long bytes;
+ char *ptr;
+ int node;
-#ifdef CONFIG_HAVE_SPARSE_IRQ
-static int nr_irq_desc = 32;
+ /* Compute how many bytes we need per irq and allocate them */
+ bytes = nr * sizeof(unsigned int);
-static int __init parse_nr_irq_desc(char *arg)
-{
- if (arg)
- nr_irq_desc = simple_strtoul(arg, NULL, 0);
- return 0;
+ node = cpu_to_node(cpu);
+ ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
+ printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
+
+ if (ptr)
+ desc->kstat_irqs = (unsigned int *)ptr;
}
-early_param("nr_irq_desc", parse_nr_irq_desc);
+void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu)
+{
+}
-static void __init init_work(void *data)
+static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
{
- struct dyn_array *da = data;
- int i;
- struct irq_desc *desc;
+ memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
+ desc->irq = irq;
+#ifdef CONFIG_SMP
+ desc->cpu = cpu;
+#endif
+ lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+ init_kstat_irqs(desc, cpu, nr_cpu_ids);
+ if (!desc->kstat_irqs) {
+ printk(KERN_ERR "can not alloc kstat_irqs\n");
+ BUG_ON(1);
+ }
+ arch_init_chip_data(desc, cpu);
+}
- desc = *da->name;
+/*
+ * Protect the sparse_irqs:
+ */
+DEFINE_SPINLOCK(sparse_irq_lock);
- for (i = 0; i < *da->nr; i++)
- init_one_irq_desc(&desc[i]);
+struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
- for (i = 1; i < *da->nr; i++)
- desc[i-1].next = &desc[i];
-}
+static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
+ [0 ... NR_IRQS_LEGACY-1] = {
+ .irq = -1,
+ .status = IRQ_DISABLED,
+ .chip = &no_irq_chip,
+ .handle_irq = handle_bad_irq,
+ .depth = 1,
+ .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+#ifdef CONFIG_SMP
+ .affinity = CPU_MASK_ALL
+#endif
+ }
+};
-static struct irq_desc *sparse_irqs;
-DEFINE_DYN_ARRAY(sparse_irqs, sizeof(struct irq_desc), nr_irq_desc, PAGE_SIZE, init_work);
+/* FIXME: use bootmem alloc ...*/
+static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
-extern int after_bootmem;
-extern void *__alloc_bootmem_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal);
-struct irq_desc *irq_to_desc(unsigned int irq)
+void __init early_irq_init(void)
{
- struct irq_desc *desc, *desc_pri;
+ struct irq_desc *desc;
+ int legacy_count;
int i;
- int count = 0;
- BUG_ON(irq == -1U);
+ desc = irq_desc_legacy;
+ legacy_count = ARRAY_SIZE(irq_desc_legacy);
- desc_pri = desc = &sparse_irqs[0];
- while (desc) {
- if (desc->irq == irq)
- return desc;
+ for (i = 0; i < legacy_count; i++) {
+ desc[i].irq = i;
+ desc[i].kstat_irqs = kstat_irqs_legacy[i];
- if (desc->irq == -1U) {
- desc->irq = irq;
- return desc;
- }
- desc_pri = desc;
- desc = desc->next;
- count++;
+ irq_desc_ptrs[i] = desc + i;
}
- /*
- * we run out of pre-allocate ones, allocate more
- */
- printk(KERN_DEBUG "try to get more irq_desc %d\n", nr_irq_desc);
+ for (i = legacy_count; i < NR_IRQS; i++)
+ irq_desc_ptrs[i] = NULL;
+
+ arch_early_irq_init();
+}
- if (after_bootmem)
- desc = kzalloc(sizeof(struct irq_desc)*nr_irq_desc, GFP_ATOMIC);
- else
- desc = __alloc_bootmem_nopanic(sizeof(struct irq_desc)*nr_irq_desc, PAGE_SIZE, 0);
+struct irq_desc *irq_to_desc(unsigned int irq)
+{
+ return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
+}
- if (!desc)
- panic("please boot with nr_irq_desc= %d\n", count * 2);
+struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
+{
+ struct irq_desc *desc;
+ unsigned long flags;
+ int node;
+
+ if (irq >= NR_IRQS) {
+ printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
+ irq, NR_IRQS);
+ WARN_ON(1);
+ return NULL;
+ }
- for (i = 0; i < nr_irq_desc; i++)
- init_one_irq_desc(&desc[i]);
+ desc = irq_desc_ptrs[irq];
+ if (desc)
+ return desc;
- for (i = 1; i < nr_irq_desc; i++)
- desc[i-1].next = &desc[i];
+ spin_lock_irqsave(&sparse_irq_lock, flags);
- desc->irq = irq;
- desc_pri->next = desc;
+ /* We have to check it to avoid races with another CPU */
+ desc = irq_desc_ptrs[irq];
+ if (desc)
+ goto out_unlock;
- return desc;
-}
-#else
-static void __init init_work(void *data)
-{
- struct dyn_array *da = data;
- int i;
- struct irq_desc *desc;
+ node = cpu_to_node(cpu);
+ desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
+ printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n",
+ irq, cpu, node);
+ if (!desc) {
+ printk(KERN_ERR "can not alloc irq_desc\n");
+ BUG_ON(1);
+ }
+ init_one_irq_desc(irq, desc, cpu);
- desc = *da->name;
+ irq_desc_ptrs[irq] = desc;
- for (i = 0; i < *da->nr; i++)
- init_one_irq_desc(&desc[i]);
+out_unlock:
+ spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ return desc;
}
-static struct irq_desc *irq_desc;
-DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work);
-
-#endif
#else
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(sparse_irqs->lock),
+ .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
#ifdef CONFIG_SMP
.affinity = CPU_MASK_ALL
#endif
#endif
-#ifndef CONFIG_HAVE_SPARSE_IRQ
-struct irq_desc *irq_to_desc(unsigned int irq)
-{
- if (irq < nr_irqs)
- return &irq_desc[irq];
-
- return NULL;
-}
-#endif
-
/*
* What should we do if we get a hw irq event on an illegal vector?
* Each architecture has to answer this themself.
*/
static void ack_bad(unsigned int irq)
{
- struct irq_desc *desc;
+ struct irq_desc *desc = irq_to_desc(irq);
- desc = irq_to_desc(irq);
print_irq_desc(irq, desc);
ack_bad_irq(irq);
}
struct irqaction *action;
unsigned int status;
- kstat_this_cpu.irqs[irq]++;
+ kstat_incr_irqs_this_cpu(irq, desc);
+
if (CHECK_IRQ_PER_CPU(desc->status)) {
irqreturn_t action_ret;
/*
* No locking required for CPU-local interrupts:
*/
- if (desc->chip->ack)
+ if (desc->chip->ack) {
desc->chip->ack(irq);
+ /* get new one */
+ desc = irq_remap_to_desc(irq, desc);
+ }
if (likely(!(desc->status & IRQ_DISABLED))) {
action_ret = handle_IRQ_event(irq, desc->action);
if (!noirqdebug)
}
spin_lock(&desc->lock);
- if (desc->chip->ack)
+ if (desc->chip->ack) {
desc->chip->ack(irq);
+ desc = irq_remap_to_desc(irq, desc);
+ }
/*
* REPLAY is when Linux resends an IRQ that was dropped earlier
* WAITING is used by probe to mark irqs that are being tested
}
#endif
-
-#ifdef CONFIG_TRACE_IRQFLAGS
void early_init_irq_lock_class(void)
{
-#ifndef CONFIG_HAVE_DYN_ARRAY
+ struct irq_desc *desc;
int i;
- for (i = 0; i < nr_irqs; i++)
- lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class);
-#endif
+ for_each_irq_desc(i, desc) {
+ if (!desc)
+ continue;
+
+ lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+ }
+}
+
+#ifdef CONFIG_SPARSE_IRQ
+unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ return desc->kstat_irqs[cpu];
}
#endif
+EXPORT_SYMBOL(kstat_irqs_cpu);