]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
ARM: 7768/1: prevent risks of out-of-bound access in ASID allocator
authorMarc Zyngier <Marc.Zyngier@arm.com>
Fri, 21 Jun 2013 11:06:55 +0000 (12:06 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 22 Jul 2013 01:21:34 +0000 (18:21 -0700)
commit b8e4a4740fa2b17c0a447b3ab783b3dc10702e27 upstream.

On a CPU that never ran anything, both the active and reserved ASID
fields are set to zero. In this case the ASID_TO_IDX() macro will
return -1, which is not a very useful value to index a bitmap.

Instead of trying to offset the ASID so that ASID #1 is actually
bit 0 in the asid_map bitmap, just always ignore bit 0 and start
the search from bit 1. This makes the code a bit more readable,
and without risk of OoB access.

Acked-by: Will Deacon <will.deacon@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm/mm/context.c

index 8e12fcbb2c633b84085c3e0553f67fe8e212d19d..83e09058f96f7db3fca51bbc2906778e8d31cb99 100644 (file)
  * non 64-bit operations.
  */
 #define ASID_FIRST_VERSION     (1ULL << ASID_BITS)
-#define NUM_USER_ASIDS         (ASID_FIRST_VERSION - 1)
-
-#define ASID_TO_IDX(asid)      ((asid & ~ASID_MASK) - 1)
-#define IDX_TO_ASID(idx)       ((idx + 1) & ~ASID_MASK)
+#define NUM_USER_ASIDS         ASID_FIRST_VERSION
 
 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
@@ -137,7 +134,7 @@ static void flush_context(unsigned int cpu)
                         */
                        if (asid == 0)
                                asid = per_cpu(reserved_asids, i);
-                       __set_bit(ASID_TO_IDX(asid), asid_map);
+                       __set_bit(asid & ~ASID_MASK, asid_map);
                }
                per_cpu(reserved_asids, i) = asid;
        }
@@ -176,17 +173,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
                /*
                 * Allocate a free ASID. If we can't find one, take a
                 * note of the currently active ASIDs and mark the TLBs
-                * as requiring flushes.
+                * as requiring flushes. We always count from ASID #1,
+                * as we reserve ASID #0 to switch via TTBR0 and indicate
+                * rollover events.
                 */
-               asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
+               asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
                if (asid == NUM_USER_ASIDS) {
                        generation = atomic64_add_return(ASID_FIRST_VERSION,
                                                         &asid_generation);
                        flush_context(cpu);
-                       asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
+                       asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
                }
                __set_bit(asid, asid_map);
-               asid = generation | IDX_TO_ASID(asid);
+               asid |= generation;
                cpumask_clear(mm_cpumask(mm));
        }