if (unlikely(domain->ops->map == NULL))
return -ENODEV;
-- - size = PAGE_SIZE << gfp_order;
++ + /* find out the minimum page size supported */
++ + min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
++ +
++ + /*
++ + * both the virtual address and the physical one, as well as
++ + * the size of the mapping, must be aligned (at least) to the
++ + * size of the smallest page supported by the hardware
++ + */
++ + if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
++ + pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
++ + "0x%x\n", iova, (unsigned long)paddr,
++ + (unsigned long)size, min_pagesz);
++ + return -EINVAL;
++ + }
++ +
++ + pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
++ + (unsigned long)paddr, (unsigned long)size);
++ +
++ + while (size) {
++ + unsigned long pgsize, addr_merge = iova | paddr;
++ + unsigned int pgsize_idx;
++ +
++ + /* Max page size that still fits into 'size' */
++ + pgsize_idx = __fls(size);
++ +
++ + /* need to consider alignment requirements ? */
++ + if (likely(addr_merge)) {
++ + /* Max page size allowed by both iova and paddr */
++ + unsigned int align_pgsize_idx = __ffs(addr_merge);
++ +
++ + pgsize_idx = min(pgsize_idx, align_pgsize_idx);
++ + }
++ +
++ + /* build a mask of acceptable page sizes */
++ + pgsize = (1UL << (pgsize_idx + 1)) - 1;
+
-- BUG_ON(!IS_ALIGNED(iova | paddr, size));
++ + /* throw away page sizes not supported by the hardware */
++ + pgsize &= domain->ops->pgsize_bitmap;
-- return domain->ops->map(domain, iova, paddr, gfp_order, prot);
- BUG_ON(!IS_ALIGNED(iova | paddr, size));
++ + /* make sure we're still sane */
++ + BUG_ON(!pgsize);
++
- return domain->ops->map(domain, iova, paddr, gfp_order, prot);
++ + /* pick the biggest page */
++ + pgsize_idx = __fls(pgsize);
++ + pgsize = 1UL << pgsize_idx;
++ +
++ + pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
++ + (unsigned long)paddr, pgsize);
++ +
++ + ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
++ + if (ret)
++ + break;
++ +
++ + iova += pgsize;
++ + paddr += pgsize;
++ + size -= pgsize;
++ + }
++ +
++ + /* unroll mapping in case something went wrong */
++ + if (ret)
++ + iommu_unmap(domain, orig_iova, orig_size - size);
++ +
++ + return ret;
}
EXPORT_SYMBOL_GPL(iommu_map);
if (unlikely(domain->ops->unmap == NULL))
return -ENODEV;
-- - size = PAGE_SIZE << gfp_order;
-
- BUG_ON(!IS_ALIGNED(iova, size));
-
- return domain->ops->unmap(domain, iova, gfp_order);
++ + /* find out the minimum page size supported */
++ + min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
++ +
++ + /*
++ + * The virtual address, as well as the size of the mapping, must be
++ + * aligned (at least) to the size of the smallest page supported
++ + * by the hardware
++ + */
++ + if (!IS_ALIGNED(iova | size, min_pagesz)) {
++ + pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
++ + iova, (unsigned long)size, min_pagesz);
++ + return -EINVAL;
++ + }
++ +
++ + pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
++ + (unsigned long)size);
++ +
++ + /*
++ + * Keep iterating until we either unmap 'size' bytes (or more)
++ + * or we hit an area that isn't mapped.
++ + */
++ + while (unmapped < size) {
++ + size_t left = size - unmapped;
++ +
++ + unmapped_page = domain->ops->unmap(domain, iova, left);
++ + if (!unmapped_page)
++ + break;
++ +
++ + pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
++ + (unsigned long)unmapped_page);
++ +
++ + iova += unmapped_page;
++ + unmapped += unmapped_page;
++ + }
++ +
++ + return unmapped;
++ }
++ EXPORT_SYMBOL_GPL(iommu_unmap);
+
-- BUG_ON(!IS_ALIGNED(iova, size));
+++ int iommu_device_group(struct device *dev, unsigned int *groupid)
+++ {
+++ if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
+++ return dev->bus->iommu_ops->device_group(dev, groupid);
+
-- return domain->ops->unmap(domain, iova, gfp_order);
+++ return -ENODEV;
+ }
-- EXPORT_SYMBOL_GPL(iommu_unmap);
+++ EXPORT_SYMBOL_GPL(iommu_device_group);