]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/pci/intel-iommu.c
x86: disable intel_iommu support by default
[karo-tx-linux.git] / drivers / pci / intel-iommu.c
index f1380269cabd7c0cfb7b4cddcf8f3f0452884e9a..f4b7c79023ffe7d05589dd93526531ea3dc3708f 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/mempool.h>
 #include <linux/timer.h>
 #include <linux/iova.h>
+#include <linux/iommu.h>
 #include <linux/intel-iommu.h>
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
@@ -204,7 +205,7 @@ static inline bool dma_pte_present(struct dma_pte *pte)
 }
 
 /* devices under the same p2p bridge are owned in one domain */
-#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 < 0)
+#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
 
 /* domain represents a virtual machine, more than one devices
  * across iommus may be owned in one domain, e.g. kvm guest.
@@ -230,6 +231,7 @@ struct dmar_domain {
        int             iommu_coherency;/* indicate coherency of iommu access */
        int             iommu_count;    /* reference count of iommu */
        spinlock_t      iommu_lock;     /* protect iommu set in domain */
+       u64             max_addr;       /* maximum mapped address */
 };
 
 /* PCI domain-device relationship */
@@ -266,7 +268,12 @@ static long list_size;
 
 static void domain_remove_dev_info(struct dmar_domain *domain);
 
-int dmar_disabled;
+#ifdef CONFIG_DMAR_DEFAULT_ON
+int dmar_disabled = 0;
+#else
+int dmar_disabled = 1;
+#endif /*CONFIG_DMAR_DEFAULT_ON*/
+
 static int __initdata dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
@@ -275,14 +282,19 @@ static int intel_iommu_strict;
 static DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
+static struct iommu_ops intel_iommu_ops;
+
 static int __init intel_iommu_setup(char *str)
 {
        if (!str)
                return -EINVAL;
        while (*str) {
-               if (!strncmp(str, "off", 3)) {
+               if (!strncmp(str, "on", 2)) {
+                       dmar_disabled = 0;
+                       printk(KERN_INFO "Intel-IOMMU: enabled\n");
+               } else if (!strncmp(str, "off", 3)) {
                        dmar_disabled = 1;
-                       printk(KERN_INFO"Intel-IOMMU: disabled\n");
+                       printk(KERN_INFO "Intel-IOMMU: disabled\n");
                } else if (!strncmp(str, "igfx_off", 8)) {
                        dmar_map_gfx = 0;
                        printk(KERN_INFO
@@ -434,7 +446,8 @@ static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
                        continue;
 
                for (i = 0; i < drhd->devices_cnt; i++)
-                       if (drhd->devices[i]->bus->number == bus &&
+                       if (drhd->devices[i] &&
+                           drhd->devices[i]->bus->number == bus &&
                            drhd->devices[i]->devfn == devfn)
                                return drhd->iommu;
 
@@ -2727,6 +2740,9 @@ int __init intel_iommu_init(void)
        init_timer(&unmap_timer);
        force_iommu = 1;
        dma_ops = &intel_dma_ops;
+
+       register_iommu(&intel_iommu_ops);
+
        return 0;
 }
 
@@ -2849,6 +2865,22 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
 /* domain id for virtual machine, it won't be set in context */
 static unsigned long vm_domid;
 
+static int vm_domain_min_agaw(struct dmar_domain *domain)
+{
+       int i;
+       int min_agaw = domain->agaw;
+
+       i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
+       for (; i < g_num_of_iommus; ) {
+               if (min_agaw > g_iommus[i]->agaw)
+                       min_agaw = g_iommus[i]->agaw;
+
+               i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
+       }
+
+       return min_agaw;
+}
+
 static struct dmar_domain *iommu_alloc_vm_domain(void)
 {
        struct dmar_domain *domain;
@@ -2883,6 +2915,7 @@ static int vm_domain_init(struct dmar_domain *domain, int guest_width)
 
        domain->iommu_count = 0;
        domain->iommu_coherency = 0;
+       domain->max_addr = 0;
 
        /* always allocate the top pgd */
        domain->pgd = (struct dma_pte *)alloc_pgtable_page();
@@ -2944,36 +2977,43 @@ static void vm_domain_exit(struct dmar_domain *domain)
        free_domain_mem(domain);
 }
 
-struct dmar_domain *intel_iommu_alloc_domain(void)
+static int intel_iommu_domain_init(struct iommu_domain *domain)
 {
-       struct dmar_domain *domain;
+       struct dmar_domain *dmar_domain;
 
-       domain = iommu_alloc_vm_domain();
-       if (!domain) {
+       dmar_domain = iommu_alloc_vm_domain();
+       if (!dmar_domain) {
                printk(KERN_ERR
-                       "intel_iommu_domain_alloc: domain == NULL\n");
-               return NULL;
+                       "intel_iommu_domain_init: dmar_domain == NULL\n");
+               return -ENOMEM;
        }
-       if (vm_domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+       if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
                printk(KERN_ERR
-                       "intel_iommu_domain_alloc: domain_init() failed\n");
-               vm_domain_exit(domain);
-               return NULL;
+                       "intel_iommu_domain_init() failed\n");
+               vm_domain_exit(dmar_domain);
+               return -ENOMEM;
        }
+       domain->priv = dmar_domain;
 
-       return domain;
+       return 0;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_alloc_domain);
 
-void intel_iommu_free_domain(struct dmar_domain *domain)
+static void intel_iommu_domain_destroy(struct iommu_domain *domain)
 {
-       vm_domain_exit(domain);
+       struct dmar_domain *dmar_domain = domain->priv;
+
+       domain->priv = NULL;
+       vm_domain_exit(dmar_domain);
 }
-EXPORT_SYMBOL_GPL(intel_iommu_free_domain);
 
-int intel_iommu_attach_device(struct dmar_domain *domain,
-                             struct pci_dev *pdev)
+static int intel_iommu_attach_device(struct iommu_domain *domain,
+                                    struct device *dev)
 {
+       struct dmar_domain *dmar_domain = domain->priv;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct intel_iommu *iommu;
+       int addr_width;
+       u64 end;
        int ret;
 
        /* normally pdev is not mapped */
@@ -2982,65 +3022,118 @@ int intel_iommu_attach_device(struct dmar_domain *domain,
 
                old_domain = find_domain(pdev);
                if (old_domain) {
-                       if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+                       if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
                                vm_domain_remove_one_dev_info(old_domain, pdev);
                        else
                                domain_remove_dev_info(old_domain);
                }
        }
 
-       ret = domain_context_mapping(domain, pdev);
+       iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+       if (!iommu)
+               return -ENODEV;
+
+       /* check if this iommu agaw is sufficient for max mapped address */
+       addr_width = agaw_to_width(iommu->agaw);
+       end = DOMAIN_MAX_ADDR(addr_width);
+       end = end & VTD_PAGE_MASK;
+       if (end < dmar_domain->max_addr) {
+               printk(KERN_ERR "%s: iommu agaw (%d) is not "
+                      "sufficient for the mapped address (%llx)\n",
+                      __func__, iommu->agaw, dmar_domain->max_addr);
+               return -EFAULT;
+       }
+
+       ret = domain_context_mapping(dmar_domain, pdev);
        if (ret)
                return ret;
 
-       ret = vm_domain_add_dev_info(domain, pdev);
+       ret = vm_domain_add_dev_info(dmar_domain, pdev);
        return ret;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_attach_device);
 
-void intel_iommu_detach_device(struct dmar_domain *domain,
-                              struct pci_dev *pdev)
+static void intel_iommu_detach_device(struct iommu_domain *domain,
+                                     struct device *dev)
 {
-       vm_domain_remove_one_dev_info(domain, pdev);
+       struct dmar_domain *dmar_domain = domain->priv;
+       struct pci_dev *pdev = to_pci_dev(dev);
+
+       vm_domain_remove_one_dev_info(dmar_domain, pdev);
 }
-EXPORT_SYMBOL_GPL(intel_iommu_detach_device);
 
-int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova,
-                           u64 hpa, size_t size, int prot)
+static int intel_iommu_map_range(struct iommu_domain *domain,
+                                unsigned long iova, phys_addr_t hpa,
+                                size_t size, int iommu_prot)
 {
+       struct dmar_domain *dmar_domain = domain->priv;
+       u64 max_addr;
+       int addr_width;
+       int prot = 0;
        int ret;
-       ret = domain_page_mapping(domain, iova, hpa, size, prot);
+
+       if (iommu_prot & IOMMU_READ)
+               prot |= DMA_PTE_READ;
+       if (iommu_prot & IOMMU_WRITE)
+               prot |= DMA_PTE_WRITE;
+
+       max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
+       if (dmar_domain->max_addr < max_addr) {
+               int min_agaw;
+               u64 end;
+
+               /* check if minimum agaw is sufficient for mapped address */
+               min_agaw = vm_domain_min_agaw(dmar_domain);
+               addr_width = agaw_to_width(min_agaw);
+               end = DOMAIN_MAX_ADDR(addr_width);
+               end = end & VTD_PAGE_MASK;
+               if (end < max_addr) {
+                       printk(KERN_ERR "%s: iommu agaw (%d) is not "
+                              "sufficient for the mapped address (%llx)\n",
+                              __func__, min_agaw, max_addr);
+                       return -EFAULT;
+               }
+               dmar_domain->max_addr = max_addr;
+       }
+
+       ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
        return ret;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_map_address);
 
-void intel_iommu_unmap_address(struct dmar_domain *domain,
-                              dma_addr_t iova, size_t size)
+static void intel_iommu_unmap_range(struct iommu_domain *domain,
+                                   unsigned long iova, size_t size)
 {
+       struct dmar_domain *dmar_domain = domain->priv;
        dma_addr_t base;
 
        /* The address might not be aligned */
        base = iova & VTD_PAGE_MASK;
        size = VTD_PAGE_ALIGN(size);
-       dma_pte_clear_range(domain, base, base + size);
-}
-EXPORT_SYMBOL_GPL(intel_iommu_unmap_address);
+       dma_pte_clear_range(dmar_domain, base, base + size);
 
-int intel_iommu_found(void)
-{
-       return g_num_of_iommus;
+       if (dmar_domain->max_addr == base + size)
+               dmar_domain->max_addr = base;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_found);
 
-u64 intel_iommu_iova_to_phys(struct dmar_domain *domain, u64 iova)
+static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
+                                           unsigned long iova)
 {
+       struct dmar_domain *dmar_domain = domain->priv;
        struct dma_pte *pte;
        u64 phys = 0;
 
-       pte = addr_to_dma_pte(domain, iova);
+       pte = addr_to_dma_pte(dmar_domain, iova);
        if (pte)
                phys = dma_pte_addr(pte);
 
        return phys;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_iova_to_phys);
+
+static struct iommu_ops intel_iommu_ops = {
+       .domain_init    = intel_iommu_domain_init,
+       .domain_destroy = intel_iommu_domain_destroy,
+       .attach_dev     = intel_iommu_attach_device,
+       .detach_dev     = intel_iommu_detach_device,
+       .map            = intel_iommu_map_range,
+       .unmap          = intel_iommu_unmap_range,
+       .iova_to_phys   = intel_iommu_iova_to_phys,
+};