From: Joonsoo Kim Date: Thu, 26 Jun 2014 00:42:32 +0000 (+1000) Subject: DMA, CMA: support alignment constraint on CMA region X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=b6c350b89e54763ac351cb1a5cd7bf6bc96ea3d8;p=karo-tx-linux.git DMA, CMA: support alignment constraint on CMA region PPC KVM's CMA area management needs alignment constraint on CMA region. So support it to prepare generalization of CMA area management functionality. Additionally, add some comments which tell us why alignment constraint is needed on CMA region. Signed-off-by: Joonsoo Kim Acked-by: Michal Nazarewicz Reviewed-by: Aneesh Kumar K.V Cc: Alexander Graf Cc: Aneesh Kumar K.V Cc: Gleb Natapov Acked-by: Marek Szyprowski Tested-by: Marek Szyprowski Cc: Minchan Kim Cc: Paolo Bonzini Cc: Zhang Yanfei Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Signed-off-by: Andrew Morton --- diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 9021762227a7..5f62c284072c 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -32,6 +32,7 @@ #include #include #include +#include struct cma { unsigned long base_pfn; @@ -215,17 +216,16 @@ core_initcall(cma_init_reserved_areas); static int __init __dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, phys_addr_t limit, + phys_addr_t alignment, struct cma **res_cma, bool fixed) { struct cma *cma = &cma_areas[cma_area_count]; - phys_addr_t alignment; int ret = 0; - pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, - (unsigned long)size, (unsigned long)base, - (unsigned long)limit); + pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n", + __func__, (unsigned long)size, (unsigned long)base, + (unsigned long)limit, (unsigned long)alignment); - /* Sanity checks */ if (cma_area_count == ARRAY_SIZE(cma_areas)) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; @@ -234,8 +234,17 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size, if (!size) return -EINVAL; - /* Sanitise input arguments */ - alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); + if (alignment && !is_power_of_2(alignment)) + return -EINVAL; + + /* + * Sanitise input arguments. + * Pages both ends in CMA area could be merged into adjacent unmovable + * migratetype page by page allocator's buddy algorithm. In the case, + * you couldn't get a contiguous memory, which is not what we want. + */ + alignment = max(alignment, + (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); base = ALIGN(base, alignment); size = ALIGN(size, alignment); limit &= ~(alignment - 1); @@ -299,7 +308,8 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, { int ret; - ret = __dma_contiguous_reserve_area(size, base, limit, res_cma, fixed); + ret = __dma_contiguous_reserve_area(size, base, limit, 0, + res_cma, fixed); if (ret) return ret;