2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/dmi.h>
39 #define PREFIX "DMAR:"
41 /* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
45 LIST_HEAD(dmar_drhd_units);
47 static struct acpi_table_header * __initdata dmar_tbl;
48 static acpi_size dmar_tbl_size;
50 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
59 list_add(&drhd->list, &dmar_drhd_units);
62 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
79 * Some BIOSes list non-exist devices in DMAR table, just
84 PREFIX "Device scope bus [%d] not found\n",
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
97 bus = pdev->subordinate;
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
119 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
122 struct acpi_dmar_device_scope *scope;
128 while (start < end) {
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
134 printk(KERN_WARNING PREFIX
135 "Unsupported device scope\n");
136 start += scope->length;
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
147 while (start < end) {
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
159 start += scope->length;
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
171 dmar_parse_one_drhd(struct acpi_dmar_header *header)
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
177 drhd = (struct acpi_dmar_hardware_unit *)header;
178 if (!drhd->address) {
179 /* Promote an attitude of violence to a BIOS engineer today */
180 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
181 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
182 dmi_get_system_info(DMI_BIOS_VENDOR),
183 dmi_get_system_info(DMI_BIOS_VERSION),
184 dmi_get_system_info(DMI_PRODUCT_VERSION));
187 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
192 dmaru->reg_base_addr = drhd->address;
193 dmaru->segment = drhd->segment;
194 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
196 ret = alloc_iommu(dmaru);
201 dmar_register_drhd_unit(dmaru);
205 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
207 struct acpi_dmar_hardware_unit *drhd;
210 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
212 if (dmaru->include_all)
215 ret = dmar_parse_dev_scope((void *)(drhd + 1),
216 ((void *)drhd) + drhd->header.length,
217 &dmaru->devices_cnt, &dmaru->devices,
220 list_del(&dmaru->list);
227 LIST_HEAD(dmar_rmrr_units);
229 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
231 list_add(&rmrr->list, &dmar_rmrr_units);
236 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
238 struct acpi_dmar_reserved_memory *rmrr;
239 struct dmar_rmrr_unit *rmrru;
241 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
246 rmrr = (struct acpi_dmar_reserved_memory *)header;
247 rmrru->base_address = rmrr->base_address;
248 rmrru->end_address = rmrr->end_address;
250 dmar_register_rmrr_unit(rmrru);
255 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
257 struct acpi_dmar_reserved_memory *rmrr;
260 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
261 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
262 ((void *)rmrr) + rmrr->header.length,
263 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
265 if (ret || (rmrru->devices_cnt == 0)) {
266 list_del(&rmrru->list);
272 static LIST_HEAD(dmar_atsr_units);
274 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
276 struct acpi_dmar_atsr *atsr;
277 struct dmar_atsr_unit *atsru;
279 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
280 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
285 atsru->include_all = atsr->flags & 0x1;
287 list_add(&atsru->list, &dmar_atsr_units);
292 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
295 struct acpi_dmar_atsr *atsr;
297 if (atsru->include_all)
300 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
301 rc = dmar_parse_dev_scope((void *)(atsr + 1),
302 (void *)atsr + atsr->header.length,
303 &atsru->devices_cnt, &atsru->devices,
305 if (rc || !atsru->devices_cnt) {
306 list_del(&atsru->list);
313 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
317 struct acpi_dmar_atsr *atsr;
318 struct dmar_atsr_unit *atsru;
320 list_for_each_entry(atsru, &dmar_atsr_units, list) {
321 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
322 if (atsr->segment == pci_domain_nr(dev->bus))
329 for (bus = dev->bus; bus; bus = bus->parent) {
330 struct pci_dev *bridge = bus->self;
332 if (!bridge || !bridge->is_pcie ||
333 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
336 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
337 for (i = 0; i < atsru->devices_cnt; i++)
338 if (atsru->devices[i] == bridge)
344 if (atsru->include_all)
352 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
354 struct acpi_dmar_hardware_unit *drhd;
355 struct acpi_dmar_reserved_memory *rmrr;
356 struct acpi_dmar_atsr *atsr;
358 switch (header->type) {
359 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
360 drhd = container_of(header, struct acpi_dmar_hardware_unit,
362 printk (KERN_INFO PREFIX
363 "DRHD base: %#016Lx flags: %#x\n",
364 (unsigned long long)drhd->address, drhd->flags);
366 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
367 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
369 printk (KERN_INFO PREFIX
370 "RMRR base: %#016Lx end: %#016Lx\n",
371 (unsigned long long)rmrr->base_address,
372 (unsigned long long)rmrr->end_address);
374 case ACPI_DMAR_TYPE_ATSR:
375 atsr = container_of(header, struct acpi_dmar_atsr, header);
376 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
382 * dmar_table_detect - checks to see if the platform supports DMAR devices
384 static int __init dmar_table_detect(void)
386 acpi_status status = AE_OK;
388 /* if we could find DMAR table, then there are DMAR devices */
389 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
390 (struct acpi_table_header **)&dmar_tbl,
393 if (ACPI_SUCCESS(status) && !dmar_tbl) {
394 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
395 status = AE_NOT_FOUND;
398 return (ACPI_SUCCESS(status) ? 1 : 0);
402 * parse_dmar_table - parses the DMA reporting table
405 parse_dmar_table(void)
407 struct acpi_table_dmar *dmar;
408 struct acpi_dmar_header *entry_header;
412 * Do it again, earlier dmar_tbl mapping could be mapped with
417 dmar = (struct acpi_table_dmar *)dmar_tbl;
421 if (dmar->width < PAGE_SHIFT - 1) {
422 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
426 printk (KERN_INFO PREFIX "Host address width %d\n",
429 entry_header = (struct acpi_dmar_header *)(dmar + 1);
430 while (((unsigned long)entry_header) <
431 (((unsigned long)dmar) + dmar_tbl->length)) {
432 /* Avoid looping forever on bad ACPI tables */
433 if (entry_header->length == 0) {
434 printk(KERN_WARNING PREFIX
435 "Invalid 0-length structure\n");
440 dmar_table_print_dmar_entry(entry_header);
442 switch (entry_header->type) {
443 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
444 ret = dmar_parse_one_drhd(entry_header);
446 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
448 ret = dmar_parse_one_rmrr(entry_header);
451 case ACPI_DMAR_TYPE_ATSR:
453 ret = dmar_parse_one_atsr(entry_header);
457 printk(KERN_WARNING PREFIX
458 "Unknown DMAR structure type\n");
459 ret = 0; /* for forward compatibility */
465 entry_header = ((void *)entry_header + entry_header->length);
470 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
476 for (index = 0; index < cnt; index++)
477 if (dev == devices[index])
480 /* Check our parent */
481 dev = dev->bus->self;
487 struct dmar_drhd_unit *
488 dmar_find_matched_drhd_unit(struct pci_dev *dev)
490 struct dmar_drhd_unit *dmaru = NULL;
491 struct acpi_dmar_hardware_unit *drhd;
493 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
494 drhd = container_of(dmaru->hdr,
495 struct acpi_dmar_hardware_unit,
498 if (dmaru->include_all &&
499 drhd->segment == pci_domain_nr(dev->bus))
502 if (dmar_pci_device_match(dmaru->devices,
503 dmaru->devices_cnt, dev))
510 int __init dmar_dev_scope_init(void)
512 struct dmar_drhd_unit *drhd, *drhd_n;
515 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
516 ret = dmar_parse_dev(drhd);
523 struct dmar_rmrr_unit *rmrr, *rmrr_n;
524 struct dmar_atsr_unit *atsr, *atsr_n;
526 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
527 ret = rmrr_parse_dev(rmrr);
532 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
533 ret = atsr_parse_dev(atsr);
544 int __init dmar_table_init(void)
546 static int dmar_table_initialized;
549 if (dmar_table_initialized)
552 dmar_table_initialized = 1;
554 ret = parse_dmar_table();
557 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
561 if (list_empty(&dmar_drhd_units)) {
562 printk(KERN_INFO PREFIX "No DMAR devices found\n");
567 if (list_empty(&dmar_rmrr_units))
568 printk(KERN_INFO PREFIX "No RMRR found\n");
570 if (list_empty(&dmar_atsr_units))
571 printk(KERN_INFO PREFIX "No ATSR found\n");
574 #ifdef CONFIG_INTR_REMAP
575 parse_ioapics_under_ir();
580 void __init detect_intel_iommu(void)
584 ret = dmar_table_detect();
587 #ifdef CONFIG_INTR_REMAP
588 struct acpi_table_dmar *dmar;
590 * for now we will disable dma-remapping when interrupt
591 * remapping is enabled.
592 * When support for queued invalidation for IOTLB invalidation
593 * is added, we will not need this any more.
595 dmar = (struct acpi_table_dmar *) dmar_tbl;
596 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
598 "Queued invalidation will be enabled to support "
599 "x2apic and Intr-remapping.\n");
602 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
607 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
612 int alloc_iommu(struct dmar_drhd_unit *drhd)
614 struct intel_iommu *iommu;
617 static int iommu_allocated = 0;
621 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
625 iommu->seq_id = iommu_allocated++;
626 sprintf (iommu->name, "dmar%d", iommu->seq_id);
628 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
630 printk(KERN_ERR "IOMMU: can't map the region\n");
633 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
634 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
637 agaw = iommu_calculate_agaw(iommu);
640 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
644 msagaw = iommu_calculate_max_sagaw(iommu);
647 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
653 iommu->msagaw = msagaw;
655 /* the registers might be more than one page */
656 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
657 cap_max_fault_reg_offset(iommu->cap));
658 map_size = VTD_PAGE_ALIGN(map_size);
659 if (map_size > VTD_PAGE_SIZE) {
661 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
663 printk(KERN_ERR "IOMMU: can't map the region\n");
668 ver = readl(iommu->reg + DMAR_VER_REG);
669 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
670 (unsigned long long)drhd->reg_base_addr,
671 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
672 (unsigned long long)iommu->cap,
673 (unsigned long long)iommu->ecap);
675 spin_lock_init(&iommu->register_lock);
684 void free_iommu(struct intel_iommu *iommu)
690 free_dmar_iommu(iommu);
699 * Reclaim all the submitted descriptors which have completed its work.
701 static inline void reclaim_free_desc(struct q_inval *qi)
703 while (qi->desc_status[qi->free_tail] == QI_DONE ||
704 qi->desc_status[qi->free_tail] == QI_ABORT) {
705 qi->desc_status[qi->free_tail] = QI_FREE;
706 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
711 static int qi_check_fault(struct intel_iommu *iommu, int index)
715 struct q_inval *qi = iommu->qi;
716 int wait_index = (index + 1) % QI_LENGTH;
718 if (qi->desc_status[wait_index] == QI_ABORT)
721 fault = readl(iommu->reg + DMAR_FSTS_REG);
724 * If IQE happens, the head points to the descriptor associated
725 * with the error. No new descriptors are fetched until the IQE
728 if (fault & DMA_FSTS_IQE) {
729 head = readl(iommu->reg + DMAR_IQH_REG);
730 if ((head >> DMAR_IQ_SHIFT) == index) {
731 printk(KERN_ERR "VT-d detected invalid descriptor: "
732 "low=%llx, high=%llx\n",
733 (unsigned long long)qi->desc[index].low,
734 (unsigned long long)qi->desc[index].high);
735 memcpy(&qi->desc[index], &qi->desc[wait_index],
736 sizeof(struct qi_desc));
737 __iommu_flush_cache(iommu, &qi->desc[index],
738 sizeof(struct qi_desc));
739 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
745 * If ITE happens, all pending wait_desc commands are aborted.
746 * No new descriptors are fetched until the ITE is cleared.
748 if (fault & DMA_FSTS_ITE) {
749 head = readl(iommu->reg + DMAR_IQH_REG);
750 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
752 tail = readl(iommu->reg + DMAR_IQT_REG);
753 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
755 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
758 if (qi->desc_status[head] == QI_IN_USE)
759 qi->desc_status[head] = QI_ABORT;
760 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
761 } while (head != tail);
763 if (qi->desc_status[wait_index] == QI_ABORT)
767 if (fault & DMA_FSTS_ICE)
768 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
774 * Submit the queued invalidation descriptor to the remapping
775 * hardware unit and wait for its completion.
777 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
780 struct q_inval *qi = iommu->qi;
781 struct qi_desc *hw, wait_desc;
782 int wait_index, index;
793 spin_lock_irqsave(&qi->q_lock, flags);
794 while (qi->free_cnt < 3) {
795 spin_unlock_irqrestore(&qi->q_lock, flags);
797 spin_lock_irqsave(&qi->q_lock, flags);
800 index = qi->free_head;
801 wait_index = (index + 1) % QI_LENGTH;
803 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
807 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
808 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
809 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
811 hw[wait_index] = wait_desc;
813 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
814 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
816 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
820 * update the HW tail register indicating the presence of
823 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
825 while (qi->desc_status[wait_index] != QI_DONE) {
827 * We will leave the interrupts disabled, to prevent interrupt
828 * context to queue another cmd while a cmd is already submitted
829 * and waiting for completion on this cpu. This is to avoid
830 * a deadlock where the interrupt context can wait indefinitely
831 * for free slots in the queue.
833 rc = qi_check_fault(iommu, index);
837 spin_unlock(&qi->q_lock);
839 spin_lock(&qi->q_lock);
842 qi->desc_status[index] = QI_DONE;
844 reclaim_free_desc(qi);
845 spin_unlock_irqrestore(&qi->q_lock, flags);
854 * Flush the global interrupt entry cache.
856 void qi_global_iec(struct intel_iommu *iommu)
860 desc.low = QI_IEC_TYPE;
863 /* should never fail */
864 qi_submit_sync(&desc, iommu);
867 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
872 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
873 | QI_CC_GRAN(type) | QI_CC_TYPE;
876 qi_submit_sync(&desc, iommu);
879 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
880 unsigned int size_order, u64 type)
887 if (cap_write_drain(iommu->cap))
890 if (cap_read_drain(iommu->cap))
893 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
894 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
895 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
896 | QI_IOTLB_AM(size_order);
898 qi_submit_sync(&desc, iommu);
901 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
902 u64 addr, unsigned mask)
907 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
908 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
909 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
911 desc.high = QI_DEV_IOTLB_ADDR(addr);
913 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
916 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
919 qi_submit_sync(&desc, iommu);
923 * Disable Queued Invalidation interface.
925 void dmar_disable_qi(struct intel_iommu *iommu)
929 cycles_t start_time = get_cycles();
931 if (!ecap_qis(iommu->ecap))
934 spin_lock_irqsave(&iommu->register_lock, flags);
936 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
937 if (!(sts & DMA_GSTS_QIES))
941 * Give a chance to HW to complete the pending invalidation requests.
943 while ((readl(iommu->reg + DMAR_IQT_REG) !=
944 readl(iommu->reg + DMAR_IQH_REG)) &&
945 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
948 iommu->gcmd &= ~DMA_GCMD_QIE;
949 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
951 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
952 !(sts & DMA_GSTS_QIES), sts);
954 spin_unlock_irqrestore(&iommu->register_lock, flags);
958 * Enable queued invalidation.
960 static void __dmar_enable_qi(struct intel_iommu *iommu)
964 struct q_inval *qi = iommu->qi;
966 qi->free_head = qi->free_tail = 0;
967 qi->free_cnt = QI_LENGTH;
969 spin_lock_irqsave(&iommu->register_lock, flags);
971 /* write zero to the tail reg */
972 writel(0, iommu->reg + DMAR_IQT_REG);
974 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
976 iommu->gcmd |= DMA_GCMD_QIE;
977 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
979 /* Make sure hardware complete it */
980 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
982 spin_unlock_irqrestore(&iommu->register_lock, flags);
986 * Enable Queued Invalidation interface. This is a must to support
987 * interrupt-remapping. Also used by DMA-remapping, which replaces
988 * register based IOTLB invalidation.
990 int dmar_enable_qi(struct intel_iommu *iommu)
994 if (!ecap_qis(iommu->ecap))
998 * queued invalidation is already setup and enabled.
1003 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1009 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
1016 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1017 if (!qi->desc_status) {
1018 free_page((unsigned long) qi->desc);
1024 qi->free_head = qi->free_tail = 0;
1025 qi->free_cnt = QI_LENGTH;
1027 spin_lock_init(&qi->q_lock);
1029 __dmar_enable_qi(iommu);
1034 /* iommu interrupt handling. Most stuff are MSI-like. */
1042 static const char *dma_remap_fault_reasons[] =
1045 "Present bit in root entry is clear",
1046 "Present bit in context entry is clear",
1047 "Invalid context entry",
1048 "Access beyond MGAW",
1049 "PTE Write access is not set",
1050 "PTE Read access is not set",
1051 "Next page table ptr is invalid",
1052 "Root table address invalid",
1053 "Context table ptr is invalid",
1054 "non-zero reserved fields in RTP",
1055 "non-zero reserved fields in CTP",
1056 "non-zero reserved fields in PTE",
1059 static const char *intr_remap_fault_reasons[] =
1061 "Detected reserved fields in the decoded interrupt-remapped request",
1062 "Interrupt index exceeded the interrupt-remapping table size",
1063 "Present field in the IRTE entry is clear",
1064 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1065 "Detected reserved fields in the IRTE entry",
1066 "Blocked a compatibility format interrupt request",
1067 "Blocked an interrupt request due to source-id verification failure",
1070 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1072 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1074 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1075 ARRAY_SIZE(intr_remap_fault_reasons))) {
1076 *fault_type = INTR_REMAP;
1077 return intr_remap_fault_reasons[fault_reason - 0x20];
1078 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1079 *fault_type = DMA_REMAP;
1080 return dma_remap_fault_reasons[fault_reason];
1082 *fault_type = UNKNOWN;
1087 void dmar_msi_unmask(unsigned int irq)
1089 struct intel_iommu *iommu = get_irq_data(irq);
1093 spin_lock_irqsave(&iommu->register_lock, flag);
1094 writel(0, iommu->reg + DMAR_FECTL_REG);
1095 /* Read a reg to force flush the post write */
1096 readl(iommu->reg + DMAR_FECTL_REG);
1097 spin_unlock_irqrestore(&iommu->register_lock, flag);
1100 void dmar_msi_mask(unsigned int irq)
1103 struct intel_iommu *iommu = get_irq_data(irq);
1106 spin_lock_irqsave(&iommu->register_lock, flag);
1107 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1108 /* Read a reg to force flush the post write */
1109 readl(iommu->reg + DMAR_FECTL_REG);
1110 spin_unlock_irqrestore(&iommu->register_lock, flag);
1113 void dmar_msi_write(int irq, struct msi_msg *msg)
1115 struct intel_iommu *iommu = get_irq_data(irq);
1118 spin_lock_irqsave(&iommu->register_lock, flag);
1119 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1120 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1121 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1122 spin_unlock_irqrestore(&iommu->register_lock, flag);
1125 void dmar_msi_read(int irq, struct msi_msg *msg)
1127 struct intel_iommu *iommu = get_irq_data(irq);
1130 spin_lock_irqsave(&iommu->register_lock, flag);
1131 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1132 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1133 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1134 spin_unlock_irqrestore(&iommu->register_lock, flag);
1137 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1138 u8 fault_reason, u16 source_id, unsigned long long addr)
1143 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1145 if (fault_type == INTR_REMAP)
1146 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1147 "fault index %llx\n"
1148 "INTR-REMAP:[fault reason %02d] %s\n",
1149 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1150 PCI_FUNC(source_id & 0xFF), addr >> 48,
1151 fault_reason, reason);
1154 "DMAR:[%s] Request device [%02x:%02x.%d] "
1155 "fault addr %llx \n"
1156 "DMAR:[fault reason %02d] %s\n",
1157 (type ? "DMA Read" : "DMA Write"),
1158 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1159 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1163 #define PRIMARY_FAULT_REG_LEN (16)
1164 irqreturn_t dmar_fault(int irq, void *dev_id)
1166 struct intel_iommu *iommu = dev_id;
1167 int reg, fault_index;
1171 spin_lock_irqsave(&iommu->register_lock, flag);
1172 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1174 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1177 /* TBD: ignore advanced fault log currently */
1178 if (!(fault_status & DMA_FSTS_PPF))
1181 fault_index = dma_fsts_fault_record_index(fault_status);
1182 reg = cap_fault_reg_offset(iommu->cap);
1190 /* highest 32 bits */
1191 data = readl(iommu->reg + reg +
1192 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1193 if (!(data & DMA_FRCD_F))
1196 fault_reason = dma_frcd_fault_reason(data);
1197 type = dma_frcd_type(data);
1199 data = readl(iommu->reg + reg +
1200 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1201 source_id = dma_frcd_source_id(data);
1203 guest_addr = dmar_readq(iommu->reg + reg +
1204 fault_index * PRIMARY_FAULT_REG_LEN);
1205 guest_addr = dma_frcd_page_addr(guest_addr);
1206 /* clear the fault */
1207 writel(DMA_FRCD_F, iommu->reg + reg +
1208 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1210 spin_unlock_irqrestore(&iommu->register_lock, flag);
1212 dmar_fault_do_one(iommu, type, fault_reason,
1213 source_id, guest_addr);
1216 if (fault_index > cap_num_fault_regs(iommu->cap))
1218 spin_lock_irqsave(&iommu->register_lock, flag);
1221 /* clear all the other faults */
1222 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1223 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1225 spin_unlock_irqrestore(&iommu->register_lock, flag);
1229 int dmar_set_interrupt(struct intel_iommu *iommu)
1234 * Check if the fault interrupt is already initialized.
1241 printk(KERN_ERR "IOMMU: no free vectors\n");
1245 set_irq_data(irq, iommu);
1248 ret = arch_setup_dmar_msi(irq);
1250 set_irq_data(irq, NULL);
1256 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1258 printk(KERN_ERR "IOMMU: can't request irq\n");
1262 int __init enable_drhd_fault_handling(void)
1264 struct dmar_drhd_unit *drhd;
1267 * Enable fault control interrupt.
1269 for_each_drhd_unit(drhd) {
1271 struct intel_iommu *iommu = drhd->iommu;
1272 ret = dmar_set_interrupt(iommu);
1275 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1276 " interrupt, ret %d\n",
1277 (unsigned long long)drhd->reg_base_addr, ret);
1286 * Re-enable Queued Invalidation interface.
1288 int dmar_reenable_qi(struct intel_iommu *iommu)
1290 if (!ecap_qis(iommu->ecap))
1297 * First disable queued invalidation.
1299 dmar_disable_qi(iommu);
1301 * Then enable queued invalidation again. Since there is no pending
1302 * invalidation requests now, it's safe to re-enable queued
1305 __dmar_enable_qi(iommu);