]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branches 'iommu/fixes', 'iommu/page-sizes' and 'iommu/group-id' into next
authorJoerg Roedel <joerg.roedel@amd.com>
Tue, 15 Nov 2011 11:49:31 +0000 (12:49 +0100)
committerJoerg Roedel <joerg.roedel@amd.com>
Tue, 15 Nov 2011 11:49:31 +0000 (12:49 +0100)
Conflicts:
drivers/iommu/amd_iommu.c
drivers/iommu/intel-iommu.c
include/linux/iommu.h

1  2  3 
drivers/iommu/amd_iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/iommu.c
drivers/iommu/omap-iovmm.c
include/linux/iommu.h

index 4ee277a8521a49eb41b5056daebf13cadc3d68dd,341573821864e48b0715a02d7712a0dbcbc84d91,6f7553684c1e61373ac5115b8470c2840f36ce1f..ad074dcb14028b672f3f3c0d2b961183fecf3fc2
@@@@ -2782,6 -2797,7 -2802,7 +2817,8 @@@@ static struct iommu_ops amd_iommu_ops 
        .unmap = amd_iommu_unmap,
        .iova_to_phys = amd_iommu_iova_to_phys,
        .domain_has_cap = amd_iommu_domain_has_cap,
++      .device_group = amd_iommu_device_group,
+ +     .pgsize_bitmap  = AMD_IOMMU_PGSIZES,
   };
   
   /*****************************************************************************
index c0c7820d4c46b406465e0d2d8e059a80ce819476,4c780efff16936521c7d94e1d3ad19cf6b85e9b6,9ef16d664a993702b5671c2af79319296d581a6d..e918f72da6a865d0ead1a80e73b4911f5946f0ca
@@@@ -4069,6 -4084,7 -4117,7 +4132,8 @@@@ static struct iommu_ops intel_iommu_op
        .unmap          = intel_iommu_unmap,
        .iova_to_phys   = intel_iommu_iova_to_phys,
        .domain_has_cap = intel_iommu_domain_has_cap,
++      .device_group   = intel_iommu_device_group,
+ +     .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
   };
   
   static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
index 2fb2963df55376a3a8efbf09490457e08b28b836,84cdd8ac81f14b21fe348062b4a7040c6acfa558,9c35be4b333f1904f6f43a51da2df7dfaa17cb84..7cc3c65e3f0a378e4fc3ff5d61c79328db814f55
@@@@ -164,11 -169,69 -215,11 +220,69 @@@@ int iommu_map(struct iommu_domain *doma
        if (unlikely(domain->ops->map == NULL))
                return -ENODEV;
   
- -     size         = PAGE_SIZE << gfp_order;
+ +     /* find out the minimum page size supported */
+ +     min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+ +
+ +     /*
+ +      * both the virtual address and the physical one, as well as
+ +      * the size of the mapping, must be aligned (at least) to the
+ +      * size of the smallest page supported by the hardware
+ +      */
+ +     if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
+ +             pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
+ +                     "0x%x\n", iova, (unsigned long)paddr,
+ +                     (unsigned long)size, min_pagesz);
+ +             return -EINVAL;
+ +     }
+ +
+ +     pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
+ +                             (unsigned long)paddr, (unsigned long)size);
+ +
+ +     while (size) {
+ +             unsigned long pgsize, addr_merge = iova | paddr;
+ +             unsigned int pgsize_idx;
+ +
+ +             /* Max page size that still fits into 'size' */
+ +             pgsize_idx = __fls(size);
+ +
+ +             /* need to consider alignment requirements ? */
+ +             if (likely(addr_merge)) {
+ +                     /* Max page size allowed by both iova and paddr */
+ +                     unsigned int align_pgsize_idx = __ffs(addr_merge);
+ +
+ +                     pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+ +             }
+ +
+ +             /* build a mask of acceptable page sizes */
+ +             pgsize = (1UL << (pgsize_idx + 1)) - 1;
  +
-       BUG_ON(!IS_ALIGNED(iova | paddr, size));
+ +             /* throw away page sizes not supported by the hardware */
+ +             pgsize &= domain->ops->pgsize_bitmap;
   
-       return domain->ops->map(domain, iova, paddr, gfp_order, prot);
  -     BUG_ON(!IS_ALIGNED(iova | paddr, size));
+ +             /* make sure we're still sane */
+ +             BUG_ON(!pgsize);
+  
  -     return domain->ops->map(domain, iova, paddr, gfp_order, prot);
+ +             /* pick the biggest page */
+ +             pgsize_idx = __fls(pgsize);
+ +             pgsize = 1UL << pgsize_idx;
+ +
+ +             pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
+ +                                     (unsigned long)paddr, pgsize);
+ +
+ +             ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
+ +             if (ret)
+ +                     break;
+ +
+ +             iova += pgsize;
+ +             paddr += pgsize;
+ +             size -= pgsize;
+ +     }
+ +
+ +     /* unroll mapping in case something went wrong */
+ +     if (ret)
+ +             iommu_unmap(domain, orig_iova, orig_size - size);
+ +
+ +     return ret;
   }
   EXPORT_SYMBOL_GPL(iommu_map);
   
@@@@ -179,10 -243,41 -230,19 +294,50 @@@@ size_t iommu_unmap(struct iommu_domain 
        if (unlikely(domain->ops->unmap == NULL))
                return -ENODEV;
   
- -     size         = PAGE_SIZE << gfp_order;
  -
  -     BUG_ON(!IS_ALIGNED(iova, size));
  -
  -     return domain->ops->unmap(domain, iova, gfp_order);
+ +     /* find out the minimum page size supported */
+ +     min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+ +
+ +     /*
+ +      * The virtual address, as well as the size of the mapping, must be
+ +      * aligned (at least) to the size of the smallest page supported
+ +      * by the hardware
+ +      */
+ +     if (!IS_ALIGNED(iova | size, min_pagesz)) {
+ +             pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
+ +                                     iova, (unsigned long)size, min_pagesz);
+ +             return -EINVAL;
+ +     }
+ +
+ +     pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
+ +                                                     (unsigned long)size);
+ +
+ +     /*
+ +      * Keep iterating until we either unmap 'size' bytes (or more)
+ +      * or we hit an area that isn't mapped.
+ +      */
+ +     while (unmapped < size) {
+ +             size_t left = size - unmapped;
+ +
+ +             unmapped_page = domain->ops->unmap(domain, iova, left);
+ +             if (!unmapped_page)
+ +                     break;
+ +
+ +             pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
+ +                                     (unsigned long)unmapped_page);
+ +
+ +             iova += unmapped_page;
+ +             unmapped += unmapped_page;
+ +     }
+ +
+ +     return unmapped;
+  }
+  EXPORT_SYMBOL_GPL(iommu_unmap);
 + 
-       BUG_ON(!IS_ALIGNED(iova, size));
++ int iommu_device_group(struct device *dev, unsigned int *groupid)
++ {
++      if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
++              return dev->bus->iommu_ops->device_group(dev, groupid);
 + 
-       return domain->ops->unmap(domain, iova, gfp_order);
++      return -ENODEV;
 + }
-  EXPORT_SYMBOL_GPL(iommu_unmap);
++ EXPORT_SYMBOL_GPL(iommu_device_group);
Simple merge
index 432acc4c054df1134dddb9be501de5c27d84d926,cc26f89c4ee6303ae670196905055dbfae654510,0f318fd549beb73d2311096e56d33ca8bf5fb5df..d937580417ba668d343b30b1741d59139f7924b9
@@@@ -61,6 -74,7 -61,7 +74,8 @@@@ struct iommu_ops 
                                    unsigned long iova);
        int (*domain_has_cap)(struct iommu_domain *domain,
                              unsigned long cap);
++      int (*device_group)(struct device *dev, unsigned int *groupid);
+ +     unsigned long pgsize_bitmap;
   };
   
   extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);