size = PAGE_ALIGN(size);
want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
- if (is_coherent || nommu())
+ if (nommu())
+ addr = __alloc_simple_buffer(dev, size, gfp, &page);
+ else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
+ addr = __alloc_from_contiguous(dev, size, prot, &page,
+ caller, want_vaddr);
+ else if (is_coherent)
addr = __alloc_simple_buffer(dev, size, gfp, &page);
else if (!(gfp & __GFP_WAIT))
addr = __alloc_from_pool(size, &page);
- else if (!dev_get_cma_area(dev))
- addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
else
- addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
+ addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
+ caller, want_vaddr);
if (page)
*handle = pfn_to_dma(dev, page_to_pfn(page));
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
- pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory;
- return __dma_alloc(dev, size, handle, gfp, prot, true,
+ return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
attrs, __builtin_return_address(0));
}
size = PAGE_ALIGN(size);
- if (is_coherent || nommu()) {
+ if (nommu()) {
__dma_free_buffer(page, size);
- } else if (__free_from_pool(cpu_addr, size)) {
+ } else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
return;
} else if (!dev_get_cma_area(dev)) {
- if (want_vaddr)
+ if (want_vaddr && !is_coherent)
__dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size);
} else {