From 0425d0a8eddbe8065a5ca705340646b7ad3d34a6 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 7 Nov 2008 00:07:15 +0000 Subject: [PATCH] ARM: xsc3: fix xsc3_l2_inv_range commit c7cf72dcadbe39c2077b32460f86c9f8167be3be upstream When 'start' and 'end' are less than a cacheline apart and 'start' is unaligned we are done after cleaning and invalidating the first cacheline. So check for (start < end) which will not walk off into invalid address ranges when (start > end). This issue was caught by drivers/dma/dmatest. 2.6.27 is susceptible. Cc: Cc: Haavard Skinnemoen Cc: Lothar Wafmann Cc: Lennert Buytenhek Cc: Eric Miao Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- arch/arm/mm/cache-xsc3l2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index 158bd96763d3..99ec0302adf3 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c @@ -97,7 +97,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) /* * Clean and invalidate partial last cache line. */ - if (end & (CACHE_LINE_SIZE - 1)) { + if (start < end && (end & (CACHE_LINE_SIZE - 1))) { xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1)); xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); end &= ~(CACHE_LINE_SIZE - 1); @@ -106,7 +106,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) /* * Invalidate all full cache lines between 'start' and 'end'. */ - while (start != end) { + while (start < end) { xsc3_l2_inv_pa(start); start += CACHE_LINE_SIZE; } -- 2.39.2