2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
39 #define PREFIX "DMAR: "
41 /* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
45 LIST_HEAD(dmar_drhd_units);
47 static struct acpi_table_header * __initdata dmar_tbl;
48 static acpi_size dmar_tbl_size;
50 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
59 list_add(&drhd->list, &dmar_drhd_units);
62 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
79 * Some BIOSes list non-exist devices in DMAR table, just
84 PREFIX "Device scope bus [%d] not found\n",
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
97 bus = pdev->subordinate;
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
119 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
122 struct acpi_dmar_device_scope *scope;
128 while (start < end) {
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
133 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
134 printk(KERN_WARNING PREFIX
135 "Unsupported device scope\n");
137 start += scope->length;
142 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
148 while (start < end) {
150 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152 ret = dmar_parse_one_dev_scope(scope,
153 &(*devices)[index], segment);
160 start += scope->length;
167 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
168 * structure which uniquely represent one DMA remapping hardware unit
169 * present in the platform
172 dmar_parse_one_drhd(struct acpi_dmar_header *header)
174 struct acpi_dmar_hardware_unit *drhd;
175 struct dmar_drhd_unit *dmaru;
178 drhd = (struct acpi_dmar_hardware_unit *)header;
179 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
184 dmaru->reg_base_addr = drhd->address;
185 dmaru->segment = drhd->segment;
186 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
188 ret = alloc_iommu(dmaru);
193 dmar_register_drhd_unit(dmaru);
197 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
199 struct acpi_dmar_hardware_unit *drhd;
202 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
204 if (dmaru->include_all)
207 ret = dmar_parse_dev_scope((void *)(drhd + 1),
208 ((void *)drhd) + drhd->header.length,
209 &dmaru->devices_cnt, &dmaru->devices,
212 list_del(&dmaru->list);
219 LIST_HEAD(dmar_rmrr_units);
221 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
223 list_add(&rmrr->list, &dmar_rmrr_units);
228 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
230 struct acpi_dmar_reserved_memory *rmrr;
231 struct dmar_rmrr_unit *rmrru;
233 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
238 rmrr = (struct acpi_dmar_reserved_memory *)header;
239 rmrru->base_address = rmrr->base_address;
240 rmrru->end_address = rmrr->end_address;
242 dmar_register_rmrr_unit(rmrru);
247 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
249 struct acpi_dmar_reserved_memory *rmrr;
252 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
253 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
254 ((void *)rmrr) + rmrr->header.length,
255 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
257 if (ret || (rmrru->devices_cnt == 0)) {
258 list_del(&rmrru->list);
264 static LIST_HEAD(dmar_atsr_units);
266 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
268 struct acpi_dmar_atsr *atsr;
269 struct dmar_atsr_unit *atsru;
271 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
272 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
277 atsru->include_all = atsr->flags & 0x1;
279 list_add(&atsru->list, &dmar_atsr_units);
284 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
287 struct acpi_dmar_atsr *atsr;
289 if (atsru->include_all)
292 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
293 rc = dmar_parse_dev_scope((void *)(atsr + 1),
294 (void *)atsr + atsr->header.length,
295 &atsru->devices_cnt, &atsru->devices,
297 if (rc || !atsru->devices_cnt) {
298 list_del(&atsru->list);
305 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
309 struct acpi_dmar_atsr *atsr;
310 struct dmar_atsr_unit *atsru;
312 dev = pci_physfn(dev);
314 list_for_each_entry(atsru, &dmar_atsr_units, list) {
315 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
316 if (atsr->segment == pci_domain_nr(dev->bus))
323 for (bus = dev->bus; bus; bus = bus->parent) {
324 struct pci_dev *bridge = bus->self;
326 if (!bridge || !pci_is_pcie(bridge) ||
327 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
330 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
331 for (i = 0; i < atsru->devices_cnt; i++)
332 if (atsru->devices[i] == bridge)
338 if (atsru->include_all)
345 #ifdef CONFIG_ACPI_NUMA
347 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
349 struct acpi_dmar_rhsa *rhsa;
350 struct dmar_drhd_unit *drhd;
352 rhsa = (struct acpi_dmar_rhsa *)header;
353 for_each_drhd_unit(drhd) {
354 if (drhd->reg_base_addr == rhsa->base_address) {
355 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
357 if (!node_online(node))
359 drhd->iommu->node = node;
363 WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
364 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
366 dmi_get_system_info(DMI_BIOS_VENDOR),
367 dmi_get_system_info(DMI_BIOS_VERSION),
368 dmi_get_system_info(DMI_PRODUCT_VERSION));
375 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
377 struct acpi_dmar_hardware_unit *drhd;
378 struct acpi_dmar_reserved_memory *rmrr;
379 struct acpi_dmar_atsr *atsr;
380 struct acpi_dmar_rhsa *rhsa;
382 switch (header->type) {
383 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
384 drhd = container_of(header, struct acpi_dmar_hardware_unit,
386 printk (KERN_INFO PREFIX
387 "DRHD base: %#016Lx flags: %#x\n",
388 (unsigned long long)drhd->address, drhd->flags);
390 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
391 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
393 printk (KERN_INFO PREFIX
394 "RMRR base: %#016Lx end: %#016Lx\n",
395 (unsigned long long)rmrr->base_address,
396 (unsigned long long)rmrr->end_address);
398 case ACPI_DMAR_TYPE_ATSR:
399 atsr = container_of(header, struct acpi_dmar_atsr, header);
400 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
402 case ACPI_DMAR_HARDWARE_AFFINITY:
403 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
404 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
405 (unsigned long long)rhsa->base_address,
406 rhsa->proximity_domain);
412 * dmar_table_detect - checks to see if the platform supports DMAR devices
414 static int __init dmar_table_detect(void)
416 acpi_status status = AE_OK;
418 /* if we could find DMAR table, then there are DMAR devices */
419 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
420 (struct acpi_table_header **)&dmar_tbl,
423 if (ACPI_SUCCESS(status) && !dmar_tbl) {
424 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
425 status = AE_NOT_FOUND;
428 return (ACPI_SUCCESS(status) ? 1 : 0);
432 * parse_dmar_table - parses the DMA reporting table
435 parse_dmar_table(void)
437 struct acpi_table_dmar *dmar;
438 struct acpi_dmar_header *entry_header;
442 * Do it again, earlier dmar_tbl mapping could be mapped with
448 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
449 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
451 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
453 dmar = (struct acpi_table_dmar *)dmar_tbl;
457 if (dmar->width < PAGE_SHIFT - 1) {
458 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
462 printk (KERN_INFO PREFIX "Host address width %d\n",
465 entry_header = (struct acpi_dmar_header *)(dmar + 1);
466 while (((unsigned long)entry_header) <
467 (((unsigned long)dmar) + dmar_tbl->length)) {
468 /* Avoid looping forever on bad ACPI tables */
469 if (entry_header->length == 0) {
470 printk(KERN_WARNING PREFIX
471 "Invalid 0-length structure\n");
476 dmar_table_print_dmar_entry(entry_header);
478 switch (entry_header->type) {
479 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
480 ret = dmar_parse_one_drhd(entry_header);
482 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
484 ret = dmar_parse_one_rmrr(entry_header);
487 case ACPI_DMAR_TYPE_ATSR:
489 ret = dmar_parse_one_atsr(entry_header);
492 case ACPI_DMAR_HARDWARE_AFFINITY:
493 #ifdef CONFIG_ACPI_NUMA
494 ret = dmar_parse_one_rhsa(entry_header);
498 printk(KERN_WARNING PREFIX
499 "Unknown DMAR structure type %d\n",
501 ret = 0; /* for forward compatibility */
507 entry_header = ((void *)entry_header + entry_header->length);
512 static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
518 for (index = 0; index < cnt; index++)
519 if (dev == devices[index])
522 /* Check our parent */
523 dev = dev->bus->self;
529 struct dmar_drhd_unit *
530 dmar_find_matched_drhd_unit(struct pci_dev *dev)
532 struct dmar_drhd_unit *dmaru = NULL;
533 struct acpi_dmar_hardware_unit *drhd;
535 dev = pci_physfn(dev);
537 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
538 drhd = container_of(dmaru->hdr,
539 struct acpi_dmar_hardware_unit,
542 if (dmaru->include_all &&
543 drhd->segment == pci_domain_nr(dev->bus))
546 if (dmar_pci_device_match(dmaru->devices,
547 dmaru->devices_cnt, dev))
554 int __init dmar_dev_scope_init(void)
556 struct dmar_drhd_unit *drhd, *drhd_n;
559 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
560 ret = dmar_parse_dev(drhd);
567 struct dmar_rmrr_unit *rmrr, *rmrr_n;
568 struct dmar_atsr_unit *atsr, *atsr_n;
570 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
571 ret = rmrr_parse_dev(rmrr);
576 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
577 ret = atsr_parse_dev(atsr);
588 int __init dmar_table_init(void)
590 static int dmar_table_initialized;
593 if (dmar_table_initialized)
596 dmar_table_initialized = 1;
598 ret = parse_dmar_table();
601 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
605 if (list_empty(&dmar_drhd_units)) {
606 printk(KERN_INFO PREFIX "No DMAR devices found\n");
611 if (list_empty(&dmar_rmrr_units))
612 printk(KERN_INFO PREFIX "No RMRR found\n");
614 if (list_empty(&dmar_atsr_units))
615 printk(KERN_INFO PREFIX "No ATSR found\n");
621 static int bios_warned;
623 int __init check_zero_address(void)
625 struct acpi_table_dmar *dmar;
626 struct acpi_dmar_header *entry_header;
627 struct acpi_dmar_hardware_unit *drhd;
629 dmar = (struct acpi_table_dmar *)dmar_tbl;
630 entry_header = (struct acpi_dmar_header *)(dmar + 1);
632 while (((unsigned long)entry_header) <
633 (((unsigned long)dmar) + dmar_tbl->length)) {
634 /* Avoid looping forever on bad ACPI tables */
635 if (entry_header->length == 0) {
636 printk(KERN_WARNING PREFIX
637 "Invalid 0-length structure\n");
641 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
645 drhd = (void *)entry_header;
646 if (!drhd->address) {
647 /* Promote an attitude of violence to a BIOS engineer today */
648 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
649 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
650 dmi_get_system_info(DMI_BIOS_VENDOR),
651 dmi_get_system_info(DMI_BIOS_VERSION),
652 dmi_get_system_info(DMI_PRODUCT_VERSION));
657 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
659 printk("IOMMU: can't validate: %llx\n", drhd->address);
662 cap = dmar_readq(addr + DMAR_CAP_REG);
663 ecap = dmar_readq(addr + DMAR_ECAP_REG);
664 early_iounmap(addr, VTD_PAGE_SIZE);
665 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
666 /* Promote an attitude of violence to a BIOS engineer today */
667 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
668 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
670 dmi_get_system_info(DMI_BIOS_VENDOR),
671 dmi_get_system_info(DMI_BIOS_VERSION),
672 dmi_get_system_info(DMI_PRODUCT_VERSION));
678 entry_header = ((void *)entry_header + entry_header->length);
689 void __init detect_intel_iommu(void)
693 ret = dmar_table_detect();
695 ret = check_zero_address();
697 #ifdef CONFIG_INTR_REMAP
698 struct acpi_table_dmar *dmar;
700 * for now we will disable dma-remapping when interrupt
701 * remapping is enabled.
702 * When support for queued invalidation for IOTLB invalidation
703 * is added, we will not need this any more.
705 dmar = (struct acpi_table_dmar *) dmar_tbl;
706 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
708 "Queued invalidation will be enabled to support "
709 "x2apic and Intr-remapping.\n");
712 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
714 /* Make sure ACS will be enabled */
720 x86_init.iommu.iommu_init = intel_iommu_init;
723 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
728 int alloc_iommu(struct dmar_drhd_unit *drhd)
730 struct intel_iommu *iommu;
733 static int iommu_allocated = 0;
737 if (!drhd->reg_base_addr) {
739 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
740 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
741 dmi_get_system_info(DMI_BIOS_VENDOR),
742 dmi_get_system_info(DMI_BIOS_VERSION),
743 dmi_get_system_info(DMI_PRODUCT_VERSION));
749 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
753 iommu->seq_id = iommu_allocated++;
754 sprintf (iommu->name, "dmar%d", iommu->seq_id);
756 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
758 printk(KERN_ERR "IOMMU: can't map the region\n");
761 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
762 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
764 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
766 /* Promote an attitude of violence to a BIOS engineer today */
767 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
768 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
770 dmi_get_system_info(DMI_BIOS_VENDOR),
771 dmi_get_system_info(DMI_BIOS_VERSION),
772 dmi_get_system_info(DMI_PRODUCT_VERSION));
779 agaw = iommu_calculate_agaw(iommu);
782 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
786 msagaw = iommu_calculate_max_sagaw(iommu);
789 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
795 iommu->msagaw = msagaw;
799 /* the registers might be more than one page */
800 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
801 cap_max_fault_reg_offset(iommu->cap));
802 map_size = VTD_PAGE_ALIGN(map_size);
803 if (map_size > VTD_PAGE_SIZE) {
805 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
807 printk(KERN_ERR "IOMMU: can't map the region\n");
812 ver = readl(iommu->reg + DMAR_VER_REG);
813 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
815 (unsigned long long)drhd->reg_base_addr,
816 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
817 (unsigned long long)iommu->cap,
818 (unsigned long long)iommu->ecap);
820 spin_lock_init(&iommu->register_lock);
832 void free_iommu(struct intel_iommu *iommu)
838 free_dmar_iommu(iommu);
847 * Reclaim all the submitted descriptors which have completed its work.
849 static inline void reclaim_free_desc(struct q_inval *qi)
851 while (qi->desc_status[qi->free_tail] == QI_DONE ||
852 qi->desc_status[qi->free_tail] == QI_ABORT) {
853 qi->desc_status[qi->free_tail] = QI_FREE;
854 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
859 static int qi_check_fault(struct intel_iommu *iommu, int index)
863 struct q_inval *qi = iommu->qi;
864 int wait_index = (index + 1) % QI_LENGTH;
866 if (qi->desc_status[wait_index] == QI_ABORT)
869 fault = readl(iommu->reg + DMAR_FSTS_REG);
872 * If IQE happens, the head points to the descriptor associated
873 * with the error. No new descriptors are fetched until the IQE
876 if (fault & DMA_FSTS_IQE) {
877 head = readl(iommu->reg + DMAR_IQH_REG);
878 if ((head >> DMAR_IQ_SHIFT) == index) {
879 printk(KERN_ERR "VT-d detected invalid descriptor: "
880 "low=%llx, high=%llx\n",
881 (unsigned long long)qi->desc[index].low,
882 (unsigned long long)qi->desc[index].high);
883 memcpy(&qi->desc[index], &qi->desc[wait_index],
884 sizeof(struct qi_desc));
885 __iommu_flush_cache(iommu, &qi->desc[index],
886 sizeof(struct qi_desc));
887 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
893 * If ITE happens, all pending wait_desc commands are aborted.
894 * No new descriptors are fetched until the ITE is cleared.
896 if (fault & DMA_FSTS_ITE) {
897 head = readl(iommu->reg + DMAR_IQH_REG);
898 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
900 tail = readl(iommu->reg + DMAR_IQT_REG);
901 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
903 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
906 if (qi->desc_status[head] == QI_IN_USE)
907 qi->desc_status[head] = QI_ABORT;
908 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
909 } while (head != tail);
911 if (qi->desc_status[wait_index] == QI_ABORT)
915 if (fault & DMA_FSTS_ICE)
916 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
922 * Submit the queued invalidation descriptor to the remapping
923 * hardware unit and wait for its completion.
925 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
928 struct q_inval *qi = iommu->qi;
929 struct qi_desc *hw, wait_desc;
930 int wait_index, index;
941 spin_lock_irqsave(&qi->q_lock, flags);
942 while (qi->free_cnt < 3) {
943 spin_unlock_irqrestore(&qi->q_lock, flags);
945 spin_lock_irqsave(&qi->q_lock, flags);
948 index = qi->free_head;
949 wait_index = (index + 1) % QI_LENGTH;
951 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
955 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
956 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
957 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
959 hw[wait_index] = wait_desc;
961 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
962 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
964 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
968 * update the HW tail register indicating the presence of
971 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
973 while (qi->desc_status[wait_index] != QI_DONE) {
975 * We will leave the interrupts disabled, to prevent interrupt
976 * context to queue another cmd while a cmd is already submitted
977 * and waiting for completion on this cpu. This is to avoid
978 * a deadlock where the interrupt context can wait indefinitely
979 * for free slots in the queue.
981 rc = qi_check_fault(iommu, index);
985 spin_unlock(&qi->q_lock);
987 spin_lock(&qi->q_lock);
990 qi->desc_status[index] = QI_DONE;
992 reclaim_free_desc(qi);
993 spin_unlock_irqrestore(&qi->q_lock, flags);
1002 * Flush the global interrupt entry cache.
1004 void qi_global_iec(struct intel_iommu *iommu)
1006 struct qi_desc desc;
1008 desc.low = QI_IEC_TYPE;
1011 /* should never fail */
1012 qi_submit_sync(&desc, iommu);
1015 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1018 struct qi_desc desc;
1020 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1021 | QI_CC_GRAN(type) | QI_CC_TYPE;
1024 qi_submit_sync(&desc, iommu);
1027 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1028 unsigned int size_order, u64 type)
1032 struct qi_desc desc;
1035 if (cap_write_drain(iommu->cap))
1038 if (cap_read_drain(iommu->cap))
1041 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1042 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1043 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1044 | QI_IOTLB_AM(size_order);
1046 qi_submit_sync(&desc, iommu);
1049 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1050 u64 addr, unsigned mask)
1052 struct qi_desc desc;
1055 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1056 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1057 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1059 desc.high = QI_DEV_IOTLB_ADDR(addr);
1061 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1064 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1067 qi_submit_sync(&desc, iommu);
1071 * Disable Queued Invalidation interface.
1073 void dmar_disable_qi(struct intel_iommu *iommu)
1075 unsigned long flags;
1077 cycles_t start_time = get_cycles();
1079 if (!ecap_qis(iommu->ecap))
1082 spin_lock_irqsave(&iommu->register_lock, flags);
1084 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1085 if (!(sts & DMA_GSTS_QIES))
1089 * Give a chance to HW to complete the pending invalidation requests.
1091 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1092 readl(iommu->reg + DMAR_IQH_REG)) &&
1093 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1096 iommu->gcmd &= ~DMA_GCMD_QIE;
1097 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1099 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1100 !(sts & DMA_GSTS_QIES), sts);
1102 spin_unlock_irqrestore(&iommu->register_lock, flags);
1106 * Enable queued invalidation.
1108 static void __dmar_enable_qi(struct intel_iommu *iommu)
1111 unsigned long flags;
1112 struct q_inval *qi = iommu->qi;
1114 qi->free_head = qi->free_tail = 0;
1115 qi->free_cnt = QI_LENGTH;
1117 spin_lock_irqsave(&iommu->register_lock, flags);
1119 /* write zero to the tail reg */
1120 writel(0, iommu->reg + DMAR_IQT_REG);
1122 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1124 iommu->gcmd |= DMA_GCMD_QIE;
1125 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1127 /* Make sure hardware complete it */
1128 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1130 spin_unlock_irqrestore(&iommu->register_lock, flags);
1134 * Enable Queued Invalidation interface. This is a must to support
1135 * interrupt-remapping. Also used by DMA-remapping, which replaces
1136 * register based IOTLB invalidation.
1138 int dmar_enable_qi(struct intel_iommu *iommu)
1141 struct page *desc_page;
1143 if (!ecap_qis(iommu->ecap))
1147 * queued invalidation is already setup and enabled.
1152 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1159 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1166 qi->desc = page_address(desc_page);
1168 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1169 if (!qi->desc_status) {
1170 free_page((unsigned long) qi->desc);
1176 qi->free_head = qi->free_tail = 0;
1177 qi->free_cnt = QI_LENGTH;
1179 spin_lock_init(&qi->q_lock);
1181 __dmar_enable_qi(iommu);
1186 /* iommu interrupt handling. Most stuff are MSI-like. */
1194 static const char *dma_remap_fault_reasons[] =
1197 "Present bit in root entry is clear",
1198 "Present bit in context entry is clear",
1199 "Invalid context entry",
1200 "Access beyond MGAW",
1201 "PTE Write access is not set",
1202 "PTE Read access is not set",
1203 "Next page table ptr is invalid",
1204 "Root table address invalid",
1205 "Context table ptr is invalid",
1206 "non-zero reserved fields in RTP",
1207 "non-zero reserved fields in CTP",
1208 "non-zero reserved fields in PTE",
1211 static const char *intr_remap_fault_reasons[] =
1213 "Detected reserved fields in the decoded interrupt-remapped request",
1214 "Interrupt index exceeded the interrupt-remapping table size",
1215 "Present field in the IRTE entry is clear",
1216 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1217 "Detected reserved fields in the IRTE entry",
1218 "Blocked a compatibility format interrupt request",
1219 "Blocked an interrupt request due to source-id verification failure",
1222 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1224 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1226 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1227 ARRAY_SIZE(intr_remap_fault_reasons))) {
1228 *fault_type = INTR_REMAP;
1229 return intr_remap_fault_reasons[fault_reason - 0x20];
1230 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1231 *fault_type = DMA_REMAP;
1232 return dma_remap_fault_reasons[fault_reason];
1234 *fault_type = UNKNOWN;
1239 void dmar_msi_unmask(unsigned int irq)
1241 struct intel_iommu *iommu = get_irq_data(irq);
1245 spin_lock_irqsave(&iommu->register_lock, flag);
1246 writel(0, iommu->reg + DMAR_FECTL_REG);
1247 /* Read a reg to force flush the post write */
1248 readl(iommu->reg + DMAR_FECTL_REG);
1249 spin_unlock_irqrestore(&iommu->register_lock, flag);
1252 void dmar_msi_mask(unsigned int irq)
1255 struct intel_iommu *iommu = get_irq_data(irq);
1258 spin_lock_irqsave(&iommu->register_lock, flag);
1259 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1260 /* Read a reg to force flush the post write */
1261 readl(iommu->reg + DMAR_FECTL_REG);
1262 spin_unlock_irqrestore(&iommu->register_lock, flag);
1265 void dmar_msi_write(int irq, struct msi_msg *msg)
1267 struct intel_iommu *iommu = get_irq_data(irq);
1270 spin_lock_irqsave(&iommu->register_lock, flag);
1271 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1272 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1273 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1274 spin_unlock_irqrestore(&iommu->register_lock, flag);
1277 void dmar_msi_read(int irq, struct msi_msg *msg)
1279 struct intel_iommu *iommu = get_irq_data(irq);
1282 spin_lock_irqsave(&iommu->register_lock, flag);
1283 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1284 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1285 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1286 spin_unlock_irqrestore(&iommu->register_lock, flag);
1289 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1290 u8 fault_reason, u16 source_id, unsigned long long addr)
1295 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1297 if (fault_type == INTR_REMAP)
1298 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1299 "fault index %llx\n"
1300 "INTR-REMAP:[fault reason %02d] %s\n",
1301 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1302 PCI_FUNC(source_id & 0xFF), addr >> 48,
1303 fault_reason, reason);
1306 "DMAR:[%s] Request device [%02x:%02x.%d] "
1307 "fault addr %llx \n"
1308 "DMAR:[fault reason %02d] %s\n",
1309 (type ? "DMA Read" : "DMA Write"),
1310 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1311 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1315 #define PRIMARY_FAULT_REG_LEN (16)
1316 irqreturn_t dmar_fault(int irq, void *dev_id)
1318 struct intel_iommu *iommu = dev_id;
1319 int reg, fault_index;
1323 spin_lock_irqsave(&iommu->register_lock, flag);
1324 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1326 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1329 /* TBD: ignore advanced fault log currently */
1330 if (!(fault_status & DMA_FSTS_PPF))
1333 fault_index = dma_fsts_fault_record_index(fault_status);
1334 reg = cap_fault_reg_offset(iommu->cap);
1342 /* highest 32 bits */
1343 data = readl(iommu->reg + reg +
1344 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1345 if (!(data & DMA_FRCD_F))
1348 fault_reason = dma_frcd_fault_reason(data);
1349 type = dma_frcd_type(data);
1351 data = readl(iommu->reg + reg +
1352 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1353 source_id = dma_frcd_source_id(data);
1355 guest_addr = dmar_readq(iommu->reg + reg +
1356 fault_index * PRIMARY_FAULT_REG_LEN);
1357 guest_addr = dma_frcd_page_addr(guest_addr);
1358 /* clear the fault */
1359 writel(DMA_FRCD_F, iommu->reg + reg +
1360 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1362 spin_unlock_irqrestore(&iommu->register_lock, flag);
1364 dmar_fault_do_one(iommu, type, fault_reason,
1365 source_id, guest_addr);
1368 if (fault_index >= cap_num_fault_regs(iommu->cap))
1370 spin_lock_irqsave(&iommu->register_lock, flag);
1373 /* clear all the other faults */
1374 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1375 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1377 spin_unlock_irqrestore(&iommu->register_lock, flag);
1381 int dmar_set_interrupt(struct intel_iommu *iommu)
1386 * Check if the fault interrupt is already initialized.
1393 printk(KERN_ERR "IOMMU: no free vectors\n");
1397 set_irq_data(irq, iommu);
1400 ret = arch_setup_dmar_msi(irq);
1402 set_irq_data(irq, NULL);
1408 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1410 printk(KERN_ERR "IOMMU: can't request irq\n");
1414 int __init enable_drhd_fault_handling(void)
1416 struct dmar_drhd_unit *drhd;
1419 * Enable fault control interrupt.
1421 for_each_drhd_unit(drhd) {
1423 struct intel_iommu *iommu = drhd->iommu;
1424 ret = dmar_set_interrupt(iommu);
1427 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1428 " interrupt, ret %d\n",
1429 (unsigned long long)drhd->reg_base_addr, ret);
1438 * Re-enable Queued Invalidation interface.
1440 int dmar_reenable_qi(struct intel_iommu *iommu)
1442 if (!ecap_qis(iommu->ecap))
1449 * First disable queued invalidation.
1451 dmar_disable_qi(iommu);
1453 * Then enable queued invalidation again. Since there is no pending
1454 * invalidation requests now, it's safe to re-enable queued
1457 __dmar_enable_qi(iommu);
1463 * Check interrupt remapping support in DMAR table description.
1465 int __init dmar_ir_support(void)
1467 struct acpi_table_dmar *dmar;
1468 dmar = (struct acpi_table_dmar *)dmar_tbl;
1471 return dmar->flags & 0x1;