1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
7 #include <asm/io_apic.h>
8 #include "intel-iommu.h"
9 #include "intr_remapping.h"
11 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
12 static int ir_ioapic_num;
13 int intr_remapping_enabled;
16 struct intel_iommu *iommu;
22 #ifdef CONFIG_HAVE_DYN_ARRAY
23 static struct irq_2_iommu *irq_2_iommuX;
24 DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL);
26 static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
29 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
32 return &irq_2_iommuX[irq];
36 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
38 return irq_2_iommu(irq);
41 static DEFINE_SPINLOCK(irq_2_ir_lock);
43 static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
45 struct irq_2_iommu *irq_iommu;
47 irq_iommu = irq_2_iommu(irq);
52 if (!irq_iommu->iommu)
58 int irq_remapped(int irq)
60 return valid_irq_2_iommu(irq) != NULL;
63 int get_irte(int irq, struct irte *entry)
66 struct irq_2_iommu *irq_iommu;
71 spin_lock(&irq_2_ir_lock);
72 irq_iommu = valid_irq_2_iommu(irq);
74 spin_unlock(&irq_2_ir_lock);
78 index = irq_iommu->irte_index + irq_iommu->sub_handle;
79 *entry = *(irq_iommu->iommu->ir_table->base + index);
81 spin_unlock(&irq_2_ir_lock);
85 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
87 struct ir_table *table = iommu->ir_table;
88 struct irq_2_iommu *irq_iommu;
89 u16 index, start_index;
90 unsigned int mask = 0;
96 /* protect irq_2_iommu_alloc later */
101 * start the IRTE search from index 0.
103 index = start_index = 0;
106 count = __roundup_pow_of_two(count);
110 if (mask > ecap_max_handle_mask(iommu->ecap)) {
112 "Requested mask %x exceeds the max invalidation handle"
113 " mask value %Lx\n", mask,
114 ecap_max_handle_mask(iommu->ecap));
118 spin_lock(&irq_2_ir_lock);
120 for (i = index; i < index + count; i++)
121 if (table->base[i].present)
123 /* empty index found */
124 if (i == index + count)
127 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
129 if (index == start_index) {
130 spin_unlock(&irq_2_ir_lock);
131 printk(KERN_ERR "can't allocate an IRTE\n");
136 for (i = index; i < index + count; i++)
137 table->base[i].present = 1;
139 irq_iommu = irq_2_iommu_alloc(irq);
140 irq_iommu->iommu = iommu;
141 irq_iommu->irte_index = index;
142 irq_iommu->sub_handle = 0;
143 irq_iommu->irte_mask = mask;
145 spin_unlock(&irq_2_ir_lock);
150 static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
154 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
158 qi_submit_sync(&desc, iommu);
161 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
164 struct irq_2_iommu *irq_iommu;
166 spin_lock(&irq_2_ir_lock);
167 irq_iommu = valid_irq_2_iommu(irq);
169 spin_unlock(&irq_2_ir_lock);
173 *sub_handle = irq_iommu->sub_handle;
174 index = irq_iommu->irte_index;
175 spin_unlock(&irq_2_ir_lock);
179 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
181 struct irq_2_iommu *irq_iommu;
183 spin_lock(&irq_2_ir_lock);
185 irq_iommu = irq_2_iommu_alloc(irq);
187 irq_iommu->iommu = iommu;
188 irq_iommu->irte_index = index;
189 irq_iommu->sub_handle = subhandle;
190 irq_iommu->irte_mask = 0;
192 spin_unlock(&irq_2_ir_lock);
197 int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
199 struct irq_2_iommu *irq_iommu;
201 spin_lock(&irq_2_ir_lock);
202 irq_iommu = valid_irq_2_iommu(irq);
204 spin_unlock(&irq_2_ir_lock);
208 irq_iommu->iommu = NULL;
209 irq_iommu->irte_index = 0;
210 irq_iommu->sub_handle = 0;
211 irq_2_iommu(irq)->irte_mask = 0;
213 spin_unlock(&irq_2_ir_lock);
218 int modify_irte(int irq, struct irte *irte_modified)
222 struct intel_iommu *iommu;
223 struct irq_2_iommu *irq_iommu;
225 spin_lock(&irq_2_ir_lock);
226 irq_iommu = valid_irq_2_iommu(irq);
228 spin_unlock(&irq_2_ir_lock);
232 iommu = irq_iommu->iommu;
234 index = irq_iommu->irte_index + irq_iommu->sub_handle;
235 irte = &iommu->ir_table->base[index];
237 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
238 __iommu_flush_cache(iommu, irte, sizeof(*irte));
240 qi_flush_iec(iommu, index, 0);
242 spin_unlock(&irq_2_ir_lock);
246 int flush_irte(int irq)
249 struct intel_iommu *iommu;
250 struct irq_2_iommu *irq_iommu;
252 spin_lock(&irq_2_ir_lock);
253 irq_iommu = valid_irq_2_iommu(irq);
255 spin_unlock(&irq_2_ir_lock);
259 iommu = irq_iommu->iommu;
261 index = irq_iommu->irte_index + irq_iommu->sub_handle;
263 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
264 spin_unlock(&irq_2_ir_lock);
269 struct intel_iommu *map_ioapic_to_ir(int apic)
273 for (i = 0; i < MAX_IO_APICS; i++)
274 if (ir_ioapic[i].id == apic)
275 return ir_ioapic[i].iommu;
279 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
281 struct dmar_drhd_unit *drhd;
283 drhd = dmar_find_matched_drhd_unit(dev);
290 int free_irte(int irq)
294 struct intel_iommu *iommu;
295 struct irq_2_iommu *irq_iommu;
297 spin_lock(&irq_2_ir_lock);
298 irq_iommu = valid_irq_2_iommu(irq);
300 spin_unlock(&irq_2_ir_lock);
304 iommu = irq_iommu->iommu;
306 index = irq_iommu->irte_index + irq_iommu->sub_handle;
307 irte = &iommu->ir_table->base[index];
309 if (!irq_iommu->sub_handle) {
310 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
311 set_64bit((unsigned long *)irte, 0);
312 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
315 irq_iommu->iommu = NULL;
316 irq_iommu->irte_index = 0;
317 irq_iommu->sub_handle = 0;
318 irq_iommu->irte_mask = 0;
320 spin_unlock(&irq_2_ir_lock);
325 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
331 addr = virt_to_phys((void *)iommu->ir_table->base);
333 spin_lock_irqsave(&iommu->register_lock, flags);
335 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
336 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
338 /* Set interrupt-remapping table pointer */
339 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
340 writel(cmd, iommu->reg + DMAR_GCMD_REG);
342 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
343 readl, (sts & DMA_GSTS_IRTPS), sts);
344 spin_unlock_irqrestore(&iommu->register_lock, flags);
347 * global invalidation of interrupt entry cache before enabling
348 * interrupt-remapping.
350 qi_global_iec(iommu);
352 spin_lock_irqsave(&iommu->register_lock, flags);
354 /* Enable interrupt-remapping */
355 cmd = iommu->gcmd | DMA_GCMD_IRE;
356 iommu->gcmd |= DMA_GCMD_IRE;
357 writel(cmd, iommu->reg + DMAR_GCMD_REG);
359 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
360 readl, (sts & DMA_GSTS_IRES), sts);
362 spin_unlock_irqrestore(&iommu->register_lock, flags);
366 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
368 struct ir_table *ir_table;
371 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
374 if (!iommu->ir_table)
377 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
380 printk(KERN_ERR "failed to allocate pages of order %d\n",
381 INTR_REMAP_PAGE_ORDER);
382 kfree(iommu->ir_table);
386 ir_table->base = page_address(pages);
388 iommu_set_intr_remapping(iommu, mode);
392 int __init enable_intr_remapping(int eim)
394 struct dmar_drhd_unit *drhd;
398 * check for the Interrupt-remapping support
400 for_each_drhd_unit(drhd) {
401 struct intel_iommu *iommu = drhd->iommu;
403 if (!ecap_ir_support(iommu->ecap))
406 if (eim && !ecap_eim_support(iommu->ecap)) {
407 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
408 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
414 * Enable queued invalidation for all the DRHD's.
416 for_each_drhd_unit(drhd) {
418 struct intel_iommu *iommu = drhd->iommu;
419 ret = dmar_enable_qi(iommu);
422 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
423 " invalidation, ecap %Lx, ret %d\n",
424 drhd->reg_base_addr, iommu->ecap, ret);
430 * Setup Interrupt-remapping for all the DRHD's now.
432 for_each_drhd_unit(drhd) {
433 struct intel_iommu *iommu = drhd->iommu;
435 if (!ecap_ir_support(iommu->ecap))
438 if (setup_intr_remapping(iommu, eim))
447 intr_remapping_enabled = 1;
453 * handle error condition gracefully here!
458 static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
459 struct intel_iommu *iommu)
461 struct acpi_dmar_hardware_unit *drhd;
462 struct acpi_dmar_device_scope *scope;
465 drhd = (struct acpi_dmar_hardware_unit *)header;
467 start = (void *)(drhd + 1);
468 end = ((void *)drhd) + header->length;
470 while (start < end) {
472 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
473 if (ir_ioapic_num == MAX_IO_APICS) {
474 printk(KERN_WARNING "Exceeded Max IO APICS\n");
478 printk(KERN_INFO "IOAPIC id %d under DRHD base"
479 " 0x%Lx\n", scope->enumeration_id,
482 ir_ioapic[ir_ioapic_num].iommu = iommu;
483 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
486 start += scope->length;
493 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
496 int __init parse_ioapics_under_ir(void)
498 struct dmar_drhd_unit *drhd;
499 int ir_supported = 0;
501 for_each_drhd_unit(drhd) {
502 struct intel_iommu *iommu = drhd->iommu;
504 if (ecap_ir_support(iommu->ecap)) {
505 if (ir_parse_ioapic_scope(drhd->hdr, iommu))
512 if (ir_supported && ir_ioapic_num != nr_ioapics) {
514 "Not all IO-APIC's listed under remapping hardware\n");