Patch from Catalin Marinas
If the low interrupt latency mode is enabled for the CPU (from ARMv6
onwards), the ldm/stm instructions are no longer atomic. An ldm instruction
restoring the sp and pc registers can be interrupted immediately after sp
was updated but before the pc. If this happens, the CPU restores the base
register to the value before the ldm instruction but if the base register
is not sp, the interrupt routine will corrupt the stack and the restarted
ldm instruction will load garbage.
Note that future ARM cores might always run in the low interrupt latency
mode.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
ldmia %1, {r8 - r14}\n\
msr cpsr_c, %0 @ return to SVC mode\n\
mov r0, r0\n\
ldmia %1, {r8 - r14}\n\
msr cpsr_c, %0 @ return to SVC mode\n\
mov r0, r0\n\
- ldmea fp, {fp, sp, pc}"
+ ldmfd sp, {fp, sp, pc}"
: "=&r" (tmp)
: "r" (®s->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
}
: "=&r" (tmp)
: "r" (®s->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
}
stmia %1, {r8 - r14}\n\
msr cpsr_c, %0 @ return to SVC mode\n\
mov r0, r0\n\
stmia %1, {r8 - r14}\n\
msr cpsr_c, %0 @ return to SVC mode\n\
mov r0, r0\n\
- ldmea fp, {fp, sp, pc}"
+ ldmfd sp, {fp, sp, pc}"
: "=&r" (tmp)
: "r" (®s->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
}
: "=&r" (tmp)
: "r" (®s->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
}
stmfd sp!, {r1, r4 - r8, fp, ip, lr, pc}
stmfd sp!, {r1, r4 - r8, fp, ip, lr, pc}
- .macro load_regs,flags
- LOADREGS(\flags,fp,{r1, r4 - r8, fp, sp, pc})
+ .macro load_regs
+ ldmfd sp, {r1, r4 - r8, fp, sp, pc}
.endm
.macro load1b, reg1
.endm
.macro load1b, reg1
sum .req r3
.Lzero: mov r0, sum
sum .req r3
.Lzero: mov r0, sum
/*
* Align an unaligned destination pointer. We know that
/*
* Align an unaligned destination pointer. We know that
cmp len, #8 @ Ensure that we have at least
blo .Lless8 @ 8 bytes to copy.
cmp len, #8 @ Ensure that we have at least
blo .Lless8 @ 8 bytes to copy.
ldr sum, [sp, #0] @ dst
tst sum, #1
movne r0, r0, ror #8
ldr sum, [sp, #0] @ dst
tst sum, #1
movne r0, r0, ror #8
.Lsrc_not_aligned:
adc sum, sum, #0 @ include C from dst alignment
.Lsrc_not_aligned:
adc sum, sum, #0 @ include C from dst alignment
stmfd sp!, {r1 - r2, r4 - r8, fp, ip, lr, pc}
stmfd sp!, {r1 - r2, r4 - r8, fp, ip, lr, pc}
- .macro load_regs,flags
- ldm\flags fp, {r1, r2, r4-r8, fp, sp, pc}
+ .macro load_regs
+ ldmfd sp, {r1, r2, r4-r8, fp, sp, pc}
.endm
.macro load1b, reg1
.endm
.macro load1b, reg1
6002: teq r2, r1
strneb r0, [r1], #1
bne 6002b
6002: teq r2, r1
strneb r0, [r1], #1
bne 6002b