#define SPFIX(code...)
#endif
--- .macro svc_entry, stack_hole=0
+++ .macro svc_entry, stack_hole=0, trace=1
UNWIND(.fnstart )
UNWIND(.save {r0 - pc} )
sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
@
stmia r7, {r2 - r6}
+++ .if \trace
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
+++ .endif
.endm
.align 5
UNWIND(.fnend )
ENDPROC(__pabt_svc)
+++ .align 5
+++__fiq_svc:
+++ svc_entry trace=0
+++ mov r0, sp @ struct pt_regs *regs
+++ bl handle_fiq_as_nmi
+++ svc_exit_via_fiq
+++ UNWIND(.fnend )
+++ENDPROC(__fiq_svc)
+++
.align 5
.LCcralign:
.word cr_alignment
.LCfp:
.word fp_enter
+++/*
+++ * Abort mode handlers
+++ */
+++
+++@
+++@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
+++@ and reuses the same macros. However in abort mode we must also
+++@ save/restore lr_abt and spsr_abt to make nested aborts safe.
+++@
+++ .align 5
+++__fiq_abt:
+++ svc_entry trace=0
+++
+++ ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
+++ THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
+++ THUMB( msr cpsr_c, r0 )
+++ mov r1, lr @ Save lr_abt
+++ mrs r2, spsr @ Save spsr_abt, abort is now safe
+++ ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
+++ THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
+++ THUMB( msr cpsr_c, r0 )
+++ stmfd sp!, {r1 - r2}
+++
+++ add r0, sp, #8 @ struct pt_regs *regs
+++ bl handle_fiq_as_nmi
+++
+++ ldmfd sp!, {r1 - r2}
+++ ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
+++ THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
+++ THUMB( msr cpsr_c, r0 )
+++ mov lr, r1 @ Restore lr_abt, abort is unsafe
+++ msr spsr_cxsf, r2 @ Restore spsr_abt
+++ ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
+++ THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
+++ THUMB( msr cpsr_c, r0 )
+++
+++ svc_exit_via_fiq
+++ UNWIND(.fnend )
+++ENDPROC(__fiq_abt)
+++
/*
* User mode handlers
*
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif
--- .macro usr_entry
+++ .macro usr_entry, trace=1
UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space
sub sp, sp, #S_FRAME_SIZE
ARM( stmib sp, {r1 - r12} )
THUMB( stmia sp, {r0 - r12} )
+++ ATRAP( mrc p15, 0, r7, c1, c0, 0)
+++ ATRAP( ldr r8, .LCcralign)
+++
ldmia r0, {r3 - r5}
add r0, sp, #S_PC @ here for interlock avoidance
mov r6, #-1 @ "" "" "" ""
str r3, [sp] @ save the "real" r0 copied
@ from the exception stack
+++ ATRAP( ldr r8, [r8, #0])
+++
@
@ We are now ready to fill in the remaining blanks on the stack:
@
ARM( stmdb r0, {sp, lr}^ )
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
--- @
@ Enable the alignment trap while in kernel mode
--- @
--- alignment_trap r0, .LCcralign
+++ ATRAP( teq r8, r7)
+++ ATRAP( mcrne p15, 0, r8, c1, c0, 0)
@
@ Clear FP to mark the first stack frame
@
zero_fp
+++ .if \trace
#ifdef CONFIG_IRQSOFF_TRACER
bl trace_hardirqs_off
#endif
ct_user_exit save = 0
+++ .endif
.endm
.macro kuser_cmpxchg_check
ENDPROC(__pabt_usr)
ENDPROC(ret_from_exception)
+++ .align 5
+++__fiq_usr:
+++ usr_entry trace=0
+++ kuser_cmpxchg_check
+++ mov r0, sp @ struct pt_regs *regs
+++ bl handle_fiq_as_nmi
+++ get_thread_info tsk
+++ restore_user_regs fast = 0, offset = 0
+++ UNWIND(.fnend )
+++ENDPROC(__fiq_usr)
+++
/*
* Register switch for ARMv3 and ARMv4 processors
* r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
b vector_addrexcptn
/*=============================================================================
--- * Undefined FIQs
+++ * FIQ "NMI" handler
*-----------------------------------------------------------------------------
--- * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
--- * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
--- * Basically to switch modes, we *HAVE* to clobber one register... brain
--- * damage alert! I don't think that we can execute any code in here in any
--- * other mode than FIQ... Ok you can switch to another mode, but you can't
--- * get out of that mode without clobbering one register.
+++ * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
+++ * systems.
*/
---vector_fiq:
--- subs pc, lr, #4
+++ vector_stub fiq, FIQ_MODE, 4
+++
+++ .long __fiq_usr @ 0 (USR_26 / USR_32)
+++ .long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
+++ .long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
+++ .long __fiq_svc @ 3 (SVC_26 / SVC_32)
+++ .long __fiq_svc @ 4
+++ .long __fiq_svc @ 5
+++ .long __fiq_svc @ 6
+++ .long __fiq_abt @ 7
+++ .long __fiq_svc @ 8
+++ .long __fiq_svc @ 9
+++ .long __fiq_svc @ a
+++ .long __fiq_svc @ b
+++ .long __fiq_svc @ c
+++ .long __fiq_svc @ d
+++ .long __fiq_svc @ e
+++ .long __fiq_svc @ f
.globl vector_fiq_offset
.equ vector_fiq_offset, vector_fiq
#endif
.endm
--- .macro alignment_trap, rtemp, label
#ifdef CONFIG_ALIGNMENT_TRAP
--- ldr \rtemp, \label
--- ldr \rtemp, [\rtemp]
--- mcr p15, 0, \rtemp, c1, c0
+++ #define ATRAP(x...) x
+++ #else
+++ #define ATRAP(x...)
+++ #endif
+++
+++ .macro alignment_trap, rtmp1, rtmp2, label
+++ #ifdef CONFIG_ALIGNMENT_TRAP
+++ mrc p15, 0, \rtmp2, c1, c0, 0
+++ ldr \rtmp1, \label
+++ ldr \rtmp1, [\rtmp1]
+++ teq \rtmp1, \rtmp2
+++ mcrne p15, 0, \rtmp1, c1, c0, 0
#endif
.endm
#endif
.endif
msr spsr_cxsf, \rpsr
-#if defined(CONFIG_CPU_V6)
- ldr r0, [sp]
- strex r1, r2, [sp] @ clear the exclusive monitor
- ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
-#elif defined(CONFIG_CPU_32v6K)
- clrex @ clear the exclusive monitor
- ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
-#else
- ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
+ @ We must avoid clrex due to Cortex-A15 erratum #830321
+ sub r0, sp, #4 @ uninhabited address
+ strex r1, r2, [r0] @ clear the exclusive monitor
#endif
+ ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ .endm
+
+++ @
+++ @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
+++ @
+++ @ This macro acts in a similar manner to svc_exit but switches to FIQ
+++ @ mode to restore the final part of the register state.
+++ @
+++ @ We cannot use the normal svc_exit procedure because that would
+++ @ clobber spsr_svc (FIQ could be delivered during the first few
+++ @ instructions of vector_swi meaning its contents have not been
+++ @ saved anywhere).
+++ @
+++ @ Note that, unlike svc_exit, this macro also does not allow a caller
+++ @ supplied rpsr. This is because the FIQ exceptions are not re-entrant
+++ @ and the handlers cannot call into the scheduler (meaning the value
+++ @ on the stack remains correct).
+++ @
+++ .macro svc_exit_via_fiq
+++ mov r0, sp
+++ ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will
+++ @ clobber state restored below)
+++ msr cpsr_c, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
+++ add r8, r0, #S_PC
+++ ldr r9, [r0, #S_PSR]
+++ msr spsr_cxsf, r9
+++ ldr r0, [r0, #S_R0]
+++ ldmia r8, {pc}^
++ .endm
++
.macro restore_user_regs, fast = 0, offset = 0
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
ldr lr, [sp, #\offset + S_PC]! @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc
-#if defined(CONFIG_CPU_V6)
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
+ @ We must avoid clrex due to Cortex-A15 erratum #830321
strex r1, r2, [sp] @ clear the exclusive monitor
-#elif defined(CONFIG_CPU_32v6K)
- clrex @ clear the exclusive monitor
#endif
.if \fast
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
.endif
ldr lr, [sp, #S_SP] @ top of the stack
ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
- clrex @ clear the exclusive monitor
+
+ @ We must avoid clrex due to Cortex-A15 erratum #830321
+ strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
+
stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
ldmia sp, {r0 - r12}
mov sp, lr
rfeia sp!
.endm
+++ @
+++ @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
+++ @
+++ @ For full details see non-Thumb implementation above.
+++ @
+++ .macro svc_exit_via_fiq
+++ add r0, sp, #S_R2
+++ ldr lr, [sp, #S_LR]
+++ ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
+++ @ clobber state restored below)
+++ ldmia r0, {r2 - r12}
+++ mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
+++ msr cpsr_c, r1
+++ sub r0, #S_R2
+++ add r8, r0, #S_PC
+++ ldmia r0, {r0 - r1}
+++ rfeia r8
+++ .endm
+++
#ifdef CONFIG_CPU_V7M
/*
* Note we don't need to do clrex here as clearing the local monitor is
.endm
#else /* ifdef CONFIG_CPU_V7M */
.macro restore_user_regs, fast = 0, offset = 0
- clrex @ clear the exclusive monitor
mov r2, sp
load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
ldr lr, [sp, #\offset + S_PC] @ get pc
add sp, sp, #\offset + S_SP
msr spsr_cxsf, r1 @ save in spsr_svc
+
+ @ We must avoid clrex due to Cortex-A15 erratum #830321
+ strex r1, r2, [sp] @ clear the exclusive monitor
+
.if \fast
ldmdb sp, {r1 - r12} @ get calling r1 - r12
.else
c = irq_data_get_irq_chip(d);
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
- - else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+ + else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
cpumask_copy(d->affinity, affinity);
return ret;
raw_spin_unlock(&desc->lock);
if (affinity_broken && printk_ratelimit())
--- pr_warning("IRQ%u no longer affine to CPU%u\n", i,
--- smp_processor_id());
+++ pr_warn("IRQ%u no longer affine to CPU%u\n",
+++ i, smp_processor_id());
}
local_irq_restore(flags);
static void cpu_pmu_enable_percpu_irq(void *data)
{
- - struct arm_pmu *cpu_pmu = data;
- - struct platform_device *pmu_device = cpu_pmu->plat_device;
- - int irq = platform_get_irq(pmu_device, 0);
+ + int irq = *(int *)data;
enable_percpu_irq(irq, IRQ_TYPE_NONE);
- - cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
}
static void cpu_pmu_disable_percpu_irq(void *data)
{
- - struct arm_pmu *cpu_pmu = data;
- - struct platform_device *pmu_device = cpu_pmu->plat_device;
- - int irq = platform_get_irq(pmu_device, 0);
+ + int irq = *(int *)data;
- - cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
disable_percpu_irq(irq);
}
irq = platform_get_irq(pmu_device, 0);
if (irq >= 0 && irq_is_percpu(irq)) {
- - on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1);
+ + on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
free_percpu_irq(irq, &percpu_pmu);
} else {
for (i = 0; i < irqs; ++i) {
irq);
return err;
}
- - on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1);
+ + on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
} else {
for (i = 0; i < irqs; ++i) {
err = 0;
* continue. Otherwise, continue without this interrupt.
*/
if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
--- pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
--- irq, i);
+++ pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
+++ irq, i);
continue;
}
void show_regs(struct pt_regs * regs)
{
--- printk("\n");
__show_regs(regs);
dump_stack();
}
memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
memset(&thread->fpstate, 0, sizeof(union fp_state));
+ + flush_tls();
+ +
thread_notify(THREAD_NOTIFY_FLUSH, thread);
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
--- return is_gate_vma(vma) ? "[vectors]" :
--- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
--- "[sigpage]" : NULL;
+++ return is_gate_vma(vma) ? "[vectors]" : NULL;
+++ }
+++
+++ /* If possible, provide a placement hint at a random offset from the
+++ * stack for the signal page.
+++ */
+++ static unsigned long sigpage_addr(const struct mm_struct *mm,
+++ unsigned int npages)
+++ {
+++ unsigned long offset;
+++ unsigned long first;
+++ unsigned long last;
+++ unsigned long addr;
+++ unsigned int slots;
+++
+++ first = PAGE_ALIGN(mm->start_stack);
+++
+++ last = TASK_SIZE - (npages << PAGE_SHIFT);
+++
+++ /* No room after stack? */
+++ if (first > last)
+++ return 0;
+++
+++ /* Just enough room? */
+++ if (first == last)
+++ return first;
+++
+++ slots = ((last - first) >> PAGE_SHIFT) + 1;
+++
+++ offset = get_random_int() % slots;
+++
+++ addr = first + (offset << PAGE_SHIFT);
+++
+++ return addr;
}
static struct page *signal_page;
extern struct page *get_signal_page(void);
+++ static const struct vm_special_mapping sigpage_mapping = {
+++ .name = "[sigpage]",
+++ .pages = &signal_page,
+++ };
+++
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
+++ struct vm_area_struct *vma;
unsigned long addr;
--- int ret;
+++ unsigned long hint;
+++ int ret = 0;
if (!signal_page)
signal_page = get_signal_page();
return -ENOMEM;
down_write(&mm->mmap_sem);
--- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+++ hint = sigpage_addr(mm, 1);
+++ addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;
goto up_fail;
}
--- ret = install_special_mapping(mm, addr, PAGE_SIZE,
+++ vma = _install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
--- &signal_page);
+++ &sigpage_mapping);
+++
+++ if (IS_ERR(vma)) {
+++ ret = PTR_ERR(vma);
+++ goto up_fail;
+++ }
--- if (ret == 0)
--- mm->context.sigpage = addr;
+++ mm->context.sigpage = addr;
up_fail:
up_write(&mm->mmap_sem);
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/sched.h>
+++#include <linux/irq.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
}
---asmlinkage void do_unexp_fiq (struct pt_regs *regs)
+++/*
+++ * Handle FIQ similarly to NMI on x86 systems.
+++ *
+++ * The runtime environment for NMIs is extremely restrictive
+++ * (NMIs can pre-empt critical sections meaning almost all locking is
+++ * forbidden) meaning this default FIQ handling must only be used in
+++ * circumstances where non-maskability improves robustness, such as
+++ * watchdog or debug logic.
+++ *
+++ * This handler is not appropriate for general purpose use in drivers
+++ * platform code and can be overrideen using set_fiq_handler.
+++ */
+++asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
{
--- printk("Hmm. Unexpected FIQ received, but trying to continue\n");
--- printk("You may have a hardware problem...\n");
+++ struct pt_regs *old_regs = set_irq_regs(regs);
+++
+++ nmi_enter();
+++
+++ /* nop. FIQ handlers for special arch/arm features can be added here. */
+++
+++ nmi_exit();
+++
+++ set_irq_regs(old_regs);
}
/*
#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
asmlinkage int arm_syscall(int no, struct pt_regs *regs)
{
- - struct thread_info *thread = current_thread_info();
siginfo_t info;
if ((no >> 16) != (__ARM_NR_BASE>> 16))
return regs->ARM_r0;
case NR(set_tls):
- - thread->tp_value[0] = regs->ARM_r0;
- - if (tls_emu)
- - return 0;
- - if (has_tls_reg) {
- - asm ("mcr p15, 0, %0, c13, c0, 3"
- - : : "r" (regs->ARM_r0));
- - } else {
- - /*
- - * User space must never try to access this directly.
- - * Expect your app to break eventually if you do so.
- - * The user helper at 0xffff0fe0 must be used instead.
- - * (see entry-armv.S for details)
- - */
- - *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
- - }
+ + set_tls(regs->ARM_r0);
return 0;
#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
#endif
-- - if (!efi_enabled(EFI_MEMMAP))
-- - early_init_fdt_scan_reserved_mem();
++ + early_init_fdt_scan_reserved_mem();
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA))
void free_initrd_mem(unsigned long start, unsigned long end)
{
--- if (!keep_initrd)
+++ if (!keep_initrd) {
+++ if (start == initrd_start)
+++ start = round_down(start, PAGE_SIZE);
+++ if (end == initrd_end)
+++ end = round_up(end, PAGE_SIZE);
+++
free_reserved_area((void *)start, (void *)end, 0, "initrd");
+++ }
}
static int __init keepinitrd_setup(char *__unused)