2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5 * This file contains the interrupt descriptor management code
7 * Detailed information is available in Documentation/DocBook/genericirq
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
18 #include "internals.h"
21 * lockdep: we want to handle all irq_desc locks as a single lock-class:
23 struct lock_class_key irq_desc_lock_class;
25 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
26 static void __init init_irq_default_affinity(void)
28 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29 cpumask_setall(irq_default_affinity);
32 static void __init init_irq_default_affinity(void)
38 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
40 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
43 #ifdef CONFIG_GENERIC_PENDING_IRQ
44 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45 free_cpumask_var(desc->irq_data.affinity);
52 static void desc_smp_init(struct irq_desc *desc, int node)
55 cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
60 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
61 static inline void desc_smp_init(struct irq_desc *desc, int node) { }
64 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
66 desc->irq_data.irq = irq;
67 desc->irq_data.chip = &no_irq_chip;
68 desc->irq_data.chip_data = NULL;
69 desc->irq_data.handler_data = NULL;
70 desc->irq_data.msi_desc = NULL;
71 desc->status = IRQ_DEFAULT_INIT_FLAGS;
72 desc->handle_irq = handle_bad_irq;
75 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
76 desc_smp_init(desc, node);
79 int nr_irqs = NR_IRQS;
80 EXPORT_SYMBOL_GPL(nr_irqs);
82 DEFINE_RAW_SPINLOCK(sparse_irq_lock);
83 static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
85 #ifdef CONFIG_SPARSE_IRQ
87 static struct irq_desc irq_desc_init = {
88 .status = IRQ_DEFAULT_INIT_FLAGS,
89 .handle_irq = handle_bad_irq,
91 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
94 void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
98 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
102 * don't overwite if can not get new one
103 * init_copy_kstat_irqs() could still use old one
106 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
107 desc->kstat_irqs = ptr;
111 static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
113 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
115 raw_spin_lock_init(&desc->lock);
116 desc->irq_data.irq = irq;
118 desc->irq_data.node = node;
120 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
121 init_kstat_irqs(desc, node, nr_cpu_ids);
122 if (!desc->kstat_irqs) {
123 printk(KERN_ERR "can not alloc kstat_irqs\n");
126 if (!alloc_desc_masks(desc, node, false)) {
127 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
130 init_desc_masks(desc);
131 arch_init_chip_data(desc, node);
134 static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
136 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
138 radix_tree_insert(&irq_desc_tree, irq, desc);
141 struct irq_desc *irq_to_desc(unsigned int irq)
143 return radix_tree_lookup(&irq_desc_tree, irq);
146 void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
150 ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
152 radix_tree_replace_slot(ptr, desc);
155 static void delete_irq_desc(unsigned int irq)
157 radix_tree_delete(&irq_desc_tree, irq);
161 static void free_masks(struct irq_desc *desc)
163 #ifdef CONFIG_GENERIC_PENDING_IRQ
164 free_cpumask_var(desc->pending_mask);
166 free_cpumask_var(desc->affinity);
169 static inline void free_masks(struct irq_desc *desc) { }
172 static struct irq_desc *alloc_desc(int irq, int node)
174 struct irq_desc *desc;
175 gfp_t gfp = GFP_KERNEL;
177 desc = kzalloc_node(sizeof(*desc), gfp, node);
180 /* allocate based on nr_cpu_ids */
181 desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs),
183 if (!desc->kstat_irqs)
186 if (alloc_masks(desc, gfp, node))
189 raw_spin_lock_init(&desc->lock);
190 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
192 desc_set_defaults(irq, desc, node);
197 kfree(desc->kstat_irqs);
203 static void free_desc(unsigned int irq)
205 struct irq_desc *desc = irq_to_desc(irq);
208 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
209 delete_irq_desc(irq);
210 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
213 kfree(desc->kstat_irqs);
217 static int alloc_descs(unsigned int start, unsigned int cnt, int node)
219 struct irq_desc *desc;
223 for (i = 0; i < cnt; i++) {
224 desc = alloc_desc(start + i, node);
227 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
228 irq_insert_desc(start + i, desc);
229 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
234 for (i--; i >= 0; i--)
235 free_desc(start + i);
237 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
238 bitmap_clear(allocated_irqs, start, cnt);
239 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
243 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
244 [0 ... NR_IRQS_LEGACY-1] = {
245 .status = IRQ_DEFAULT_INIT_FLAGS,
246 .handle_irq = handle_bad_irq,
248 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
252 static unsigned int *kstat_irqs_legacy;
254 int __init early_irq_init(void)
256 struct irq_desc *desc;
261 init_irq_default_affinity();
263 /* initialize nr_irqs based on nr_cpu_ids */
264 arch_probe_nr_irqs();
265 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
267 desc = irq_desc_legacy;
268 legacy_count = ARRAY_SIZE(irq_desc_legacy);
269 node = first_online_node;
271 /* allocate based on nr_cpu_ids */
272 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
273 sizeof(int), GFP_NOWAIT, node);
275 irq_desc_init.irq_data.chip = &no_irq_chip;
277 for (i = 0; i < legacy_count; i++) {
278 desc[i].irq_data.irq = i;
279 desc[i].irq_data.chip = &no_irq_chip;
281 desc[i].irq_data.node = node;
283 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
284 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
285 alloc_desc_masks(&desc[i], node, true);
286 init_desc_masks(&desc[i]);
287 irq_insert_desc(i, &desc[i]);
290 return arch_early_irq_init();
293 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
295 struct irq_desc *desc;
298 if (irq >= nr_irqs) {
299 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
304 desc = irq_to_desc(irq);
308 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
310 /* We have to check it to avoid races with another CPU */
311 desc = irq_to_desc(irq);
315 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
317 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
319 printk(KERN_ERR "can not alloc irq_desc\n");
322 init_one_irq_desc(irq, desc, node);
324 irq_insert_desc(irq, desc);
327 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
332 #else /* !CONFIG_SPARSE_IRQ */
334 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
335 [0 ... NR_IRQS-1] = {
336 .status = IRQ_DEFAULT_INIT_FLAGS,
337 .handle_irq = handle_bad_irq,
339 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
343 static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
344 int __init early_irq_init(void)
346 struct irq_desc *desc;
350 init_irq_default_affinity();
352 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
355 count = ARRAY_SIZE(irq_desc);
357 for (i = 0; i < count; i++) {
358 desc[i].irq_data.irq = i;
359 desc[i].irq_data.chip = &no_irq_chip;
360 alloc_desc_masks(&desc[i], 0, true);
361 init_desc_masks(&desc[i]);
362 desc[i].kstat_irqs = kstat_irqs_all[i];
363 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
365 return arch_early_irq_init();
368 struct irq_desc *irq_to_desc(unsigned int irq)
370 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
373 struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
375 return irq_to_desc(irq);
379 static inline int desc_node(struct irq_desc *desc)
381 return desc->irq_data.node;
384 static inline int desc_node(struct irq_desc *desc) { return 0; }
387 static void free_desc(unsigned int irq)
389 struct irq_desc *desc = irq_to_desc(irq);
392 raw_spin_lock_irqsave(&desc->lock, flags);
393 desc_set_defaults(irq, desc, desc_node(desc));
394 raw_spin_unlock_irqrestore(&desc->lock, flags);
397 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
401 #endif /* !CONFIG_SPARSE_IRQ */
403 /* Dynamic interrupt handling */
406 * irq_free_descs - free irq descriptors
407 * @from: Start of descriptor range
408 * @cnt: Number of consecutive irqs to free
410 void irq_free_descs(unsigned int from, unsigned int cnt)
415 if (from >= nr_irqs || (from + cnt) > nr_irqs)
418 for (i = 0; i < cnt; i++)
421 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
422 bitmap_clear(allocated_irqs, from, cnt);
423 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
427 * irq_alloc_descs - allocate and initialize a range of irq descriptors
428 * @irq: Allocate for specific irq number if irq >= 0
429 * @from: Start the search from this irq number
430 * @cnt: Number of consecutive irqs to allocate.
431 * @node: Preferred node on which the irq descriptor should be allocated
433 * Returns the first irq number or error code
436 irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
444 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
446 start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
448 if (irq >=0 && start != irq)
452 if (start >= nr_irqs)
455 bitmap_set(allocated_irqs, start, cnt);
456 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
457 return alloc_descs(start, cnt, node);
460 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
464 /* Statistics access */
465 void clear_kstat_irqs(struct irq_desc *desc)
467 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
470 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
472 struct irq_desc *desc = irq_to_desc(irq);
473 return desc ? desc->kstat_irqs[cpu] : 0;