]> git.karo-electronics.de Git - linux-beck.git/commitdiff
ARM: Fix ioremap() of address zero
authorRussell King <rmk+kernel@arm.linux.org.uk>
Sat, 25 Aug 2012 08:03:15 +0000 (09:03 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Sat, 25 Aug 2012 08:11:40 +0000 (09:11 +0100)
Murali Nalajala reports a regression that ioremapping address zero
results in an oops dump:

Unable to handle kernel paging request at virtual address fa200000
pgd = d4f80000
[fa200000] *pgd=00000000
Internal error: Oops: 5 [#1] PREEMPT SMP ARM
Modules linked in:
CPU: 0    Tainted: G        W (3.4.0-g3b5f728-00009-g638207a #13)
PC is at msm_pm_config_rst_vector_before_pc+0x8/0x30
LR is at msm_pm_boot_config_before_pc+0x18/0x20
pc : [<c0078f84>]    lr : [<c007903c>]    psr: a0000093
sp : c0837ef0  ip : cfe00000  fp : 0000000d
r10: da7efc17  r9 : 225c4278  r8 : 00000006
r7 : 0003c000  r6 : c085c824  r5 : 00000001  r4 : fa101000
r3 : fa200000  r2 : c095080c  r1 : 002250fc  r0 : 00000000
Flags: NzCv  IRQs off  FIQs on  Mode SVC_32  ISA ARM Segment kernel
Control: 10c5387d  Table: 25180059  DAC: 00000015
[<c0078f84>] (msm_pm_config_rst_vector_before_pc+0x8/0x30) from [<c007903c>] (msm_pm_boot_config_before_pc+0x18/0x20)
[<c007903c>] (msm_pm_boot_config_before_pc+0x18/0x20) from [<c007a55c>] (msm_pm_power_collapse+0x410/0xb04)
[<c007a55c>] (msm_pm_power_collapse+0x410/0xb04) from [<c007b17c>] (arch_idle+0x294/0x3e0)
[<c007b17c>] (arch_idle+0x294/0x3e0) from [<c000eed8>] (default_idle+0x18/0x2c)
[<c000eed8>] (default_idle+0x18/0x2c) from [<c000f254>] (cpu_idle+0x90/0xe4)
[<c000f254>] (cpu_idle+0x90/0xe4) from [<c057231c>] (rest_init+0x88/0xa0)
[<c057231c>] (rest_init+0x88/0xa0) from [<c07ff890>] (start_kernel+0x3a8/0x40c)
Code: c0704256 e12fff1e e59f2020 e5923000 (e5930000)

This is caused by the 'reserved' entries which we insert (see
19b52abe3c5d7 - ARM: 7438/1: fill possible PMD empty section gaps)
which get matched for physical address zero.

Resolve this by marking these reserved entries with a different flag.

Cc: <stable@vger.kernel.org>
Tested-by: Murali Nalajala <mnalajal@codeaurora.org>
Acked-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/mm/mm.h
arch/arm/mm/mmu.c

index 6776160618ef0ede79d07b4f6a0873c30df2a1d0..a8ee92da3544926138f68116fbddbc2c55b64dbf 100644 (file)
@@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
 /* permanent static mappings from iotable_init() */
 #define VM_ARM_STATIC_MAPPING  0x40000000
 
+/* empty mapping */
+#define VM_ARM_EMPTY_MAPPING   0x20000000
+
 /* mapping type (attributes) for permanent static mappings */
 #define VM_ARM_MTYPE(mt)               ((mt) << 20)
 #define VM_ARM_MTYPE_MASK      (0x1f << 20)
index 4c2d0451e84af1c2a0347a6fe462dd2e3306db3e..eab94bc6f8059373550dbb971c11c22010c647c1 100644 (file)
@@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr)
        vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
        vm->addr = (void *)addr;
        vm->size = SECTION_SIZE;
-       vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+       vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
        vm->caller = pmd_empty_section_gap;
        vm_area_add_early(vm);
 }
@@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void)
 
        /* we're still single threaded hence no lock needed here */
        for (vm = vmlist; vm; vm = vm->next) {
-               if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+               if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
                        continue;
                addr = (unsigned long)vm->addr;
                if (addr < next)