config OABI_COMPAT
bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
- depends on AEABI && EXPERIMENTAL
+ depends on AEABI && EXPERIMENTAL && !THUMB2_KERNEL
default y
help
This option preserves the old syscall interface along with the
#ifdef CONFIG_SMP_ON_UP
+ __INIT
__fixup_smp:
and r3, r9, #0x000f0000 @ architecture version
teq r3, #0x000f0000 @ CPU ID supported?
sub r3, r0, r3
add r4, r4, r3
add r5, r5, r3
-2: cmp r4, r5
- movhs pc, lr
- ldmia r4!, {r0, r6}
- ARM( str r6, [r0, r3] )
- THUMB( add r0, r0, r3 )
-#ifdef __ARMEB__
- THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
-#endif
- THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
- THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
- THUMB( strh r6, [r0] )
- b 2b
+ b __do_fixup_smp_on_up
ENDPROC(__fixup_smp)
.align
ALT_SMP(.long 1)
ALT_UP(.long 0)
.popsection
+#endif
+ .text
+__do_fixup_smp_on_up:
+ cmp r4, r5
+ movhs pc, lr
+ ldmia r4!, {r0, r6}
+ ARM( str r6, [r0, r3] )
+ THUMB( add r0, r0, r3 )
+#ifdef __ARMEB__
+ THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
#endif
+ THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
+ THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
+ THUMB( strh r6, [r0] )
+ b __do_fixup_smp_on_up
+ENDPROC(__do_fixup_smp_on_up)
+
+ENTRY(fixup_smp)
+ stmfd sp!, {r4 - r6, lr}
+ mov r4, r0
+ add r5, r0, r1
+ mov r3, #0
+ bl __do_fixup_smp_on_up
+ ldmfd sp!, {r4 - r6, pc}
+ENDPROC(fixup_smp)
#include "head-common.S"
u32 didr;
/* Do we implement the extended CPUID interface? */
- if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
- pr_warning("CPUID feature registers not supported. "
- "Assuming v6 debug is present.\n");
+ if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf),
+ "CPUID feature registers not supported. "
+ "Assuming v6 debug is present.\n"))
return ARM_DEBUG_ARCH_V6;
- }
ARM_DBG_READ(c0, 0, didr);
return (didr >> 16) & 0xf;
return debug_arch;
}
+static int debug_arch_supported(void)
+{
+ u8 arch = get_debug_arch();
+ return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14;
+}
+
/* Determine number of BRP register available. */
static int get_num_brp_resources(void)
{
int hw_breakpoint_slots(int type)
{
+ if (!debug_arch_supported())
+ return 0;
+
/*
* We can be called early, so don't rely on
* our static variables being initialised.
/*
* v7 debug contains save and restore registers so that debug state
- * can be maintained across low-power modes without leaving
- * the debug logic powered up. It is IMPLEMENTATION DEFINED whether
- * we can write to the debug registers out of reset, so we must
- * unlock the OS Lock Access Register to avoid taking undefined
- * instruction exceptions later on.
+ * can be maintained across low-power modes without leaving the debug
+ * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
+ * the debug registers out of reset, so we must unlock the OS Lock
+ * Access Register to avoid taking undefined instruction exceptions
+ * later on.
*/
if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
/*
debug_arch = get_debug_arch();
- if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) {
+ if (!debug_arch_supported()) {
pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
return 0;
}
pr_info("%d breakpoint(s) reserved for watchpoint "
"single-step.\n", core_num_reserved_brps);
+ /*
+ * Reset the breakpoint resources. We assume that a halting
+ * debugger will leave the world in a nice state for us.
+ */
+ on_each_cpu(reset_ctrl_regs, NULL, 1);
+
ARM_DBG_READ(c1, 0, dscr);
if (dscr & ARM_DSCR_HDBGEN) {
+ max_watchpoint_len = 4;
pr_warning("halting debug mode enabled. Assuming maximum "
- "watchpoint size of 4 bytes.");
+ "watchpoint size of %u bytes.", max_watchpoint_len);
} else {
- /*
- * Reset the breakpoint resources. We assume that a halting
- * debugger will leave the world in a nice state for us.
- */
- smp_call_function(reset_ctrl_regs, NULL, 1);
- reset_ctrl_regs(NULL);
-
/* Work out the maximum supported watchpoint length. */
max_watchpoint_len = get_max_wp_len();
pr_info("maximum watchpoint size is %u bytes.\n",
#include <asm/pgtable.h>
#include <asm/sections.h>
+#include <asm/smp_plat.h>
#include <asm/unwind.h>
#ifdef CONFIG_XIP_KERNEL
const Elf_Shdr *txt_sec;
};
+static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
+ const Elf_Shdr *sechdrs, const char *name)
+{
+ const Elf_Shdr *s, *se;
+ const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++)
+ if (strcmp(name, secstrs + s->sh_name) == 0)
+ return s;
+
+ return NULL;
+}
+
+extern void fixup_smp(const void *, unsigned long);
+
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *mod)
{
+ const Elf_Shdr * __maybe_unused s = NULL;
#ifdef CONFIG_ARM_UNWIND
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
- const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
+ const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
struct mod_unwind_map maps[ARM_SEC_MAX];
int i;
maps[i].txt_sec->sh_addr,
maps[i].txt_sec->sh_size);
#endif
+ s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
+ if (s && !is_smp())
+ fixup_smp((void *)s->sh_addr, s->sh_size);
return 0;
}
* Frame pointers should strictly progress back up the stack
* (towards higher addresses).
*/
- if (tail >= buftail.fp)
+ if (tail + 1 >= buftail.fp)
return NULL;
return buftail.fp - 1;
struct platform_device collie_locomo_device = {
.name = "locomo",
.id = 0,
+ .dev = {
+ .platform_data = &locomo_info,
+ },
.num_resources = ARRAY_SIZE(locomo_resources),
.resource = locomo_resources,
};
config CPU_32v6K
bool "Support ARM V6K processor extensions" if !SMP
depends on CPU_V6 || CPU_V7
- default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
+ default y if SMP
help
Say Y here if your ARMv6 processor supports the 'K' extension.
This enables the kernel to use some instructions not present
# ARMv7
config CPU_V7
bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
- select CPU_32v6K if !ARCH_OMAP2
+ select CPU_32v6K
select CPU_32v7
select CPU_ABRT_EV7
select CPU_PABRT_V7
config SWP_EMULATE
bool "Emulate SWP/SWPB instructions"
- depends on CPU_V7 && !CPU_V6
+ depends on !CPU_USE_DOMAINS && CPU_V7 && !CPU_V6
select HAVE_PROC_CPU if PROC_FS
default y if SMP
help
/* frame pointers should strictly progress back up the stack
* (towards higher addresses) */
- if (tail >= buftail[0].fp)
+ if (tail + 1 >= buftail[0].fp)
return NULL;
return buftail[0].fp-1;