From: Joerg Roedel Date: Tue, 6 Dec 2011 14:19:36 +0000 (+0100) Subject: Merge branches 'iommu/fixes', 'arm/omap', 'iommu/page-sizes' and 'iommu/group-id... X-Git-Tag: next-20111207~30^2 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=3a4786b016fe49b17fbe141ac62659b0c1be514c;p=karo-tx-linux.git Merge branches 'iommu/fixes', 'arm/omap', 'iommu/page-sizes' and 'iommu/group-id' into next Conflicts: drivers/iommu/amd_iommu.c drivers/iommu/intel-iommu.c include/linux/iommu.h --- 3a4786b016fe49b17fbe141ac62659b0c1be514c diff --cc drivers/iommu/amd_iommu.c index 4ee277a8521a,4ee277a8521a,341573821864,6f7553684c1e..ad074dcb1402 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@@@@ -2782,6 -2782,6 -2797,7 -2802,7 +2817,8 @@@@@ static struct iommu_ops amd_iommu_ops .unmap = amd_iommu_unmap, .iova_to_phys = amd_iommu_iova_to_phys, .domain_has_cap = amd_iommu_domain_has_cap, +++ .device_group = amd_iommu_device_group, ++ + .pgsize_bitmap = AMD_IOMMU_PGSIZES, }; /***************************************************************************** diff --cc drivers/iommu/intel-iommu.c index c0c7820d4c46,c0c7820d4c46,4c780efff169,9ef16d664a99..e918f72da6a8 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@@@@ -4069,6 -4069,6 -4084,7 -4117,7 +4132,8 @@@@@ static struct iommu_ops intel_iommu_op .unmap = intel_iommu_unmap, .iova_to_phys = intel_iommu_iova_to_phys, .domain_has_cap = intel_iommu_domain_has_cap, +++ .device_group = intel_iommu_device_group, ++ + .pgsize_bitmap = INTEL_IOMMU_PGSIZES, }; static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) diff --cc drivers/iommu/iommu.c index 2fb2963df553,2fb2963df553,84cdd8ac81f1,9c35be4b333f..7cc3c65e3f0a --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@@@@ -164,11 -164,11 -169,69 -215,11 +220,69 @@@@@ int iommu_map(struct iommu_domain *doma if (unlikely(domain->ops->map == NULL)) return -ENODEV; -- - size = PAGE_SIZE << gfp_order; ++ + /* find out the minimum page size supported */ ++ + min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); ++ + ++ + /* ++ + * both the virtual address and the physical one, as well as ++ + * the size of the mapping, must be aligned (at least) to the ++ + * size of the smallest page supported by the hardware ++ + */ ++ + if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { ++ + pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz " ++ + "0x%x\n", iova, (unsigned long)paddr, ++ + (unsigned long)size, min_pagesz); ++ + return -EINVAL; ++ + } ++ + ++ + pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova, ++ + (unsigned long)paddr, (unsigned long)size); ++ + ++ + while (size) { ++ + unsigned long pgsize, addr_merge = iova | paddr; ++ + unsigned int pgsize_idx; ++ + ++ + /* Max page size that still fits into 'size' */ ++ + pgsize_idx = __fls(size); ++ + ++ + /* need to consider alignment requirements ? */ ++ + if (likely(addr_merge)) { ++ + /* Max page size allowed by both iova and paddr */ ++ + unsigned int align_pgsize_idx = __ffs(addr_merge); ++ + ++ + pgsize_idx = min(pgsize_idx, align_pgsize_idx); ++ + } ++ + ++ + /* build a mask of acceptable page sizes */ ++ + pgsize = (1UL << (pgsize_idx + 1)) - 1; + -- BUG_ON(!IS_ALIGNED(iova | paddr, size)); ++ + /* throw away page sizes not supported by the hardware */ ++ + pgsize &= domain->ops->pgsize_bitmap; -- return domain->ops->map(domain, iova, paddr, gfp_order, prot); - BUG_ON(!IS_ALIGNED(iova | paddr, size)); ++ + /* make sure we're still sane */ ++ + BUG_ON(!pgsize); ++ - return domain->ops->map(domain, iova, paddr, gfp_order, prot); ++ + /* pick the biggest page */ ++ + pgsize_idx = __fls(pgsize); ++ + pgsize = 1UL << pgsize_idx; ++ + ++ + pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova, ++ + (unsigned long)paddr, pgsize); ++ + ++ + ret = domain->ops->map(domain, iova, paddr, pgsize, prot); ++ + if (ret) ++ + break; ++ + ++ + iova += pgsize; ++ + paddr += pgsize; ++ + size -= pgsize; ++ + } ++ + ++ + /* unroll mapping in case something went wrong */ ++ + if (ret) ++ + iommu_unmap(domain, orig_iova, orig_size - size); ++ + ++ + return ret; } EXPORT_SYMBOL_GPL(iommu_map); @@@@@ -179,10 -179,10 -243,41 -230,19 +294,50 @@@@@ size_t iommu_unmap(struct iommu_domain if (unlikely(domain->ops->unmap == NULL)) return -ENODEV; -- - size = PAGE_SIZE << gfp_order; - - BUG_ON(!IS_ALIGNED(iova, size)); - - return domain->ops->unmap(domain, iova, gfp_order); ++ + /* find out the minimum page size supported */ ++ + min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); ++ + ++ + /* ++ + * The virtual address, as well as the size of the mapping, must be ++ + * aligned (at least) to the size of the smallest page supported ++ + * by the hardware ++ + */ ++ + if (!IS_ALIGNED(iova | size, min_pagesz)) { ++ + pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n", ++ + iova, (unsigned long)size, min_pagesz); ++ + return -EINVAL; ++ + } ++ + ++ + pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova, ++ + (unsigned long)size); ++ + ++ + /* ++ + * Keep iterating until we either unmap 'size' bytes (or more) ++ + * or we hit an area that isn't mapped. ++ + */ ++ + while (unmapped < size) { ++ + size_t left = size - unmapped; ++ + ++ + unmapped_page = domain->ops->unmap(domain, iova, left); ++ + if (!unmapped_page) ++ + break; ++ + ++ + pr_debug("unmapped: iova 0x%lx size %lx\n", iova, ++ + (unsigned long)unmapped_page); ++ + ++ + iova += unmapped_page; ++ + unmapped += unmapped_page; ++ + } ++ + ++ + return unmapped; ++ } ++ EXPORT_SYMBOL_GPL(iommu_unmap); + -- BUG_ON(!IS_ALIGNED(iova, size)); +++ int iommu_device_group(struct device *dev, unsigned int *groupid) +++ { +++ if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group) +++ return dev->bus->iommu_ops->device_group(dev, groupid); + -- return domain->ops->unmap(domain, iova, gfp_order); +++ return -ENODEV; + } -- EXPORT_SYMBOL_GPL(iommu_unmap); +++ EXPORT_SYMBOL_GPL(iommu_device_group); diff --cc include/linux/iommu.h index 432acc4c054d,432acc4c054d,cc26f89c4ee6,0f318fd549be..d937580417ba --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@@@@ -61,6 -61,6 -74,7 -61,7 +74,8 @@@@@ struct iommu_ops unsigned long iova); int (*domain_has_cap)(struct iommu_domain *domain, unsigned long cap); +++ int (*device_group)(struct device *dev, unsigned int *groupid); ++ + unsigned long pgsize_bitmap; }; extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);