]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/x86/kernel/amd_iommu.c
x86/amd-iommu: Move reset_iommu_command_buffer out of locked code
[mv-sheeva.git] / arch / x86 / kernel / amd_iommu.c
index e3363fd5eef5b0dd436f49cecffbec760f05e57a..b75fcd9b6a0fe9886b94f49b768b770f30e9aed7 100644 (file)
@@ -146,6 +146,8 @@ static int iommu_init_device(struct device *dev)
        if (!dev_data)
                return -ENOMEM;
 
+       dev_data->dev = dev;
+
        devid = get_device_id(dev);
        alias = amd_iommu_alias_table[devid];
        pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
@@ -283,6 +285,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
                break;
        case EVENT_TYPE_ILL_CMD:
                printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
+               iommu->reset_in_progress = true;
                reset_iommu_command_buffer(iommu);
                dump_command(address);
                break;
@@ -405,11 +408,8 @@ static void __iommu_wait_for_completion(struct amd_iommu *iommu)
        status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
        writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
 
-       if (unlikely(i == EXIT_LOOP_COUNT)) {
-               spin_unlock(&iommu->lock);
-               reset_iommu_command_buffer(iommu);
-               spin_lock(&iommu->lock);
-       }
+       if (unlikely(i == EXIT_LOOP_COUNT))
+               iommu->reset_in_progress = true;
 }
 
 /*
@@ -456,6 +456,9 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
 out:
        spin_unlock_irqrestore(&iommu->lock, flags);
 
+       if (iommu->reset_in_progress)
+               reset_iommu_command_buffer(iommu);
+
        return 0;
 }
 
@@ -478,20 +481,21 @@ static void iommu_flush_complete(struct protection_domain *domain)
 /*
  * Command send function for invalidating a device table entry
  */
-static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
+static int iommu_flush_device(struct device *dev)
 {
+       struct amd_iommu *iommu;
        struct iommu_cmd cmd;
-       int ret;
+       u16 devid;
 
-       BUG_ON(iommu == NULL);
+       devid = get_device_id(dev);
+       iommu = amd_iommu_rlookup_table[devid];
 
+       /* Build command */
        memset(&cmd, 0, sizeof(cmd));
        CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
        cmd.data[0] = devid;
 
-       ret = iommu_queue_command(iommu, &cmd);
-
-       return ret;
+       return iommu_queue_command(iommu, &cmd);
 }
 
 static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
@@ -581,30 +585,43 @@ static void iommu_flush_tlb_pde(struct protection_domain *domain)
        __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
 }
 
+
 /*
- * This function flushes all domains that have devices on the given IOMMU
+ * This function flushes the DTEs for all devices in domain
  */
-static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
+static void iommu_flush_domain_devices(struct protection_domain *domain)
+{
+       struct iommu_dev_data *dev_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&domain->lock, flags);
+
+       list_for_each_entry(dev_data, &domain->dev_list, list)
+               iommu_flush_device(dev_data->dev);
+
+       spin_unlock_irqrestore(&domain->lock, flags);
+}
+
+static void iommu_flush_all_domain_devices(void)
 {
-       u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
        struct protection_domain *domain;
        unsigned long flags;
 
        spin_lock_irqsave(&amd_iommu_pd_lock, flags);
 
        list_for_each_entry(domain, &amd_iommu_pd_list, list) {
-               if (domain->dev_iommu[iommu->index] == 0)
-                       continue;
-
-               spin_lock(&domain->lock);
-               iommu_queue_inv_iommu_pages(iommu, address, domain->id, 1, 1);
+               iommu_flush_domain_devices(domain);
                iommu_flush_complete(domain);
-               spin_unlock(&domain->lock);
        }
 
        spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
 }
 
+void amd_iommu_flush_all_devices(void)
+{
+       iommu_flush_all_domain_devices();
+}
+
 /*
  * This function uses heavy locking and may disable irqs for some time. But
  * this is no issue because it is only called during resume.
@@ -626,38 +643,6 @@ void amd_iommu_flush_all_domains(void)
        spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
 }
 
-static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
-{
-       int i;
-
-       for (i = 0; i <= amd_iommu_last_bdf; ++i) {
-               if (iommu != amd_iommu_rlookup_table[i])
-                       continue;
-
-               iommu_queue_inv_dev_entry(iommu, i);
-               iommu_completion_wait(iommu);
-       }
-}
-
-static void flush_devices_by_domain(struct protection_domain *domain)
-{
-       struct amd_iommu *iommu;
-       int i;
-
-       for (i = 0; i <= amd_iommu_last_bdf; ++i) {
-               if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
-                   (amd_iommu_pd_table[i] != domain))
-                       continue;
-
-               iommu = amd_iommu_rlookup_table[i];
-               if (!iommu)
-                       continue;
-
-               iommu_queue_inv_dev_entry(iommu, i);
-               iommu_completion_wait(iommu);
-       }
-}
-
 static void reset_iommu_command_buffer(struct amd_iommu *iommu)
 {
        pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
@@ -665,20 +650,13 @@ static void reset_iommu_command_buffer(struct amd_iommu *iommu)
        if (iommu->reset_in_progress)
                panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
 
-       iommu->reset_in_progress = true;
-
        amd_iommu_reset_cmd_buffer(iommu);
-       flush_all_devices_for_iommu(iommu);
-       flush_all_domains_on_iommu(iommu);
+       amd_iommu_flush_all_devices();
+       amd_iommu_flush_all_domains();
 
        iommu->reset_in_progress = false;
 }
 
-void amd_iommu_flush_all_devices(void)
-{
-       flush_devices_by_domain(NULL);
-}
-
 /****************************************************************************
  *
  * The functions below are used the create the page table mappings for
@@ -1382,7 +1360,7 @@ static void do_attach(struct device *dev, struct protection_domain *domain)
        domain->dev_cnt                 += 1;
 
        /* Flush the DTE entry */
-       iommu_queue_inv_dev_entry(iommu, devid);
+       iommu_flush_device(dev);
 }
 
 static void do_detach(struct device *dev)
@@ -1405,7 +1383,7 @@ static void do_detach(struct device *dev)
        clear_dte_entry(devid);
 
        /* Flush the DTE entry */
-       iommu_queue_inv_dev_entry(iommu, devid);
+       iommu_flush_device(dev);
 }
 
 /*
@@ -1610,7 +1588,7 @@ static int device_change_notifier(struct notifier_block *nb,
                goto out;
        }
 
-       iommu_queue_inv_dev_entry(iommu, devid);
+       iommu_flush_device(dev);
        iommu_completion_wait(iommu);
 
 out:
@@ -1681,7 +1659,7 @@ static void update_domain(struct protection_domain *domain)
                return;
 
        update_device_table(domain);
-       flush_devices_by_domain(domain);
+       iommu_flush_domain_devices(domain);
        iommu_flush_tlb_pde(domain);
 
        domain->updated = false;
@@ -2393,7 +2371,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
        if (!iommu)
                return;
 
-       iommu_queue_inv_dev_entry(iommu, devid);
+       iommu_flush_device(dev);
        iommu_completion_wait(iommu);
 }