#include <linux/swap.h>
#include <linux/mm_types.h>
#include <linux/dma-contiguous.h>
+#include <linux/log2.h>
struct cma {
unsigned long base_pfn;
static int __init __dma_contiguous_reserve_area(phys_addr_t size,
phys_addr_t base, phys_addr_t limit,
+ phys_addr_t alignment,
struct cma **res_cma, bool fixed)
{
struct cma *cma = &cma_areas[cma_area_count];
- phys_addr_t alignment;
int ret = 0;
- pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
- (unsigned long)size, (unsigned long)base,
- (unsigned long)limit);
+ pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
+ __func__, (unsigned long)size, (unsigned long)base,
+ (unsigned long)limit, (unsigned long)alignment);
- /* Sanity checks */
if (cma_area_count == ARRAY_SIZE(cma_areas)) {
pr_err("Not enough slots for CMA reserved regions!\n");
return -ENOSPC;
if (!size)
return -EINVAL;
- /* Sanitise input arguments */
- alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
+ if (alignment && !is_power_of_2(alignment))
+ return -EINVAL;
+
+ /*
+ * Sanitise input arguments.
+ * Pages both ends in CMA area could be merged into adjacent unmovable
+ * migratetype page by page allocator's buddy algorithm. In the case,
+ * you couldn't get a contiguous memory, which is not what we want.
+ */
+ alignment = max(alignment,
+ (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
base = ALIGN(base, alignment);
size = ALIGN(size, alignment);
limit &= ~(alignment - 1);
{
int ret;
- ret = __dma_contiguous_reserve_area(size, base, limit, res_cma, fixed);
+ ret = __dma_contiguous_reserve_area(size, base, limit, 0,
+ res_cma, fixed);
if (ret)
return ret;