return 0;
}
-static struct page **__iommu_get_pages(void *cpu_addr)
+static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
{
struct vm_struct *area;
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return cpu_addr;
+
area = find_vm_area(cpu_addr);
if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
return area->pages;
if (*handle == DMA_ERROR_CODE)
goto err_buffer;
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return pages;
+
addr = __iommu_alloc_remap(pages, size, gfp, prot,
__builtin_return_address(0));
if (!addr)
{
unsigned long uaddr = vma->vm_start;
unsigned long usize = vma->vm_end - vma->vm_start;
- struct page **pages = __iommu_get_pages(cpu_addr);
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, struct dma_attrs *attrs)
{
- struct page **pages = __iommu_get_pages(cpu_addr);
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
size = PAGE_ALIGN(size);
if (!pages) {
return;
}
- unmap_kernel_range((unsigned long)cpu_addr, size);
- vunmap(cpu_addr);
+ if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
+ unmap_kernel_range((unsigned long)cpu_addr, size);
+ vunmap(cpu_addr);
+ }
__iommu_remove_mapping(dev, handle, size);
__iommu_free_buffer(dev, pages, size);