2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
11 #include <linux/module.h>
12 #include <linux/rbtree.h>
13 #include <linux/irq.h>
16 #include <asm/uv/uv_irq.h>
17 #include <asm/uv/uv_hub.h>
19 /* MMR offset and pnode of hub sourcing interrupts for a given irq */
20 struct uv_irq_2_mmr_pnode{
27 static spinlock_t uv_irq_lock;
28 static struct rb_root uv_irq_root;
30 static int uv_set_irq_affinity(unsigned int, const struct cpumask *);
32 static void uv_noop(unsigned int irq)
36 static unsigned int uv_noop_ret(unsigned int irq)
41 static void uv_ack_apic(unsigned int irq)
46 struct irq_chip uv_irq_chip = {
48 .startup = uv_noop_ret,
57 .set_affinity = uv_set_irq_affinity,
61 * Add offset and pnode information of the hub sourcing interrupts to the
62 * rb tree for a specific irq.
64 static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
66 struct rb_node **link = &uv_irq_root.rb_node;
67 struct rb_node *parent = NULL;
68 struct uv_irq_2_mmr_pnode *n;
69 struct uv_irq_2_mmr_pnode *e;
70 unsigned long irqflags;
72 n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
73 uv_blade_to_memory_nid(blade));
79 n->pnode = uv_blade_to_pnode(blade);
80 spin_lock_irqsave(&uv_irq_lock, irqflags);
81 /* Find the right place in the rbtree: */
84 e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
86 if (unlikely(irq == e->irq)) {
87 /* irq entry exists */
88 e->pnode = uv_blade_to_pnode(blade);
90 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
96 link = &(*link)->rb_left;
98 link = &(*link)->rb_right;
101 /* Insert the node into the rbtree. */
102 rb_link_node(&n->list, parent, link);
103 rb_insert_color(&n->list, &uv_irq_root);
105 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
109 /* Retrieve offset and pnode information from the rb tree for a specific irq */
110 int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
112 struct uv_irq_2_mmr_pnode *e;
114 unsigned long irqflags;
116 spin_lock_irqsave(&uv_irq_lock, irqflags);
117 n = uv_irq_root.rb_node;
119 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
124 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
133 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
138 * Re-target the irq to the specified CPU and enable the specified MMR located
139 * on the specified blade to allow the sending of MSIs to the specified CPU.
142 arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
143 unsigned long mmr_offset, int restrict)
145 const struct cpumask *eligible_cpu = cpumask_of(cpu);
146 struct irq_desc *desc = irq_to_desc(irq);
149 unsigned long mmr_value;
150 struct uv_IO_APIC_route_entry *entry;
153 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
154 sizeof(unsigned long));
158 err = assign_irq_vector(irq, cfg, eligible_cpu);
162 if (restrict == UV_AFFINITY_CPU)
163 desc->status |= IRQ_NO_BALANCING;
165 desc->status |= IRQ_MOVE_PCNTXT;
167 set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
171 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
172 entry->vector = cfg->vector;
173 entry->delivery_mode = apic->irq_delivery_mode;
174 entry->dest_mode = apic->irq_dest_mode;
178 entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
180 mmr_pnode = uv_blade_to_pnode(mmr_blade);
181 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
183 if (cfg->move_in_progress)
184 send_cleanup_vector(cfg);
190 * Disable the specified MMR located on the specified blade so that MSIs are
191 * longer allowed to be sent.
193 static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
195 unsigned long mmr_value;
196 struct uv_IO_APIC_route_entry *entry;
198 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
199 sizeof(unsigned long));
202 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
205 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
208 static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
210 struct irq_desc *desc = irq_to_desc(irq);
211 struct irq_cfg *cfg = desc->chip_data;
213 unsigned long mmr_value;
214 struct uv_IO_APIC_route_entry *entry;
215 unsigned long mmr_offset;
218 if (set_desc_affinity(desc, mask, &dest))
222 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
224 entry->vector = cfg->vector;
225 entry->delivery_mode = apic->irq_delivery_mode;
226 entry->dest_mode = apic->irq_dest_mode;
232 /* Get previously stored MMR and pnode of hub sourcing interrupts */
233 if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode))
236 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
238 if (cfg->move_in_progress)
239 send_cleanup_vector(cfg);
245 * Set up a mapping of an available irq and vector, and enable the specified
246 * MMR that defines the MSI that is to be sent to the specified CPU when an
247 * interrupt is raised.
249 int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
250 unsigned long mmr_offset, int restrict)
254 irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
259 ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
262 uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
268 EXPORT_SYMBOL_GPL(uv_setup_irq);
271 * Tear down a mapping of an irq and vector, and disable the specified MMR that
272 * defined the MSI that was to be sent to the specified CPU when an interrupt
275 * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
277 void uv_teardown_irq(unsigned int irq)
279 struct uv_irq_2_mmr_pnode *e;
281 unsigned long irqflags;
283 spin_lock_irqsave(&uv_irq_lock, irqflags);
284 n = uv_irq_root.rb_node;
286 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
288 arch_disable_uv_irq(e->pnode, e->offset);
289 rb_erase(n, &uv_irq_root);
298 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
301 EXPORT_SYMBOL_GPL(uv_teardown_irq);