From dc21af99fadcfa0ae65b52fd0895f85824f0c288 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Jan 2011 19:09:43 +0000 Subject: [PATCH] ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching This idea came from Nicolas, Eric Miao produced an initial version, which was then rewritten into this. Patch the physical to virtual translations at runtime. As we modify the code, this makes it incompatible with XIP kernels, but allows us to achieve this with minimal loss of performance. As many translations are of the form: physical = virtual + (PHYS_OFFSET - PAGE_OFFSET) virtual = physical - (PHYS_OFFSET - PAGE_OFFSET) we generate an 'add' instruction for __virt_to_phys(), and a 'sub' instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET - PAGE_OFFSET) by comparing the address prior to MMU initialization with where it should be once the MMU has been initialized, and place this constant into the above add/sub instructions. Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for the C-mode PHYS_OFFSET variable definition to use. At present, we are unable to support Realview with Sparsemem enabled as this uses a complex mapping function, and MSM as this requires a constant which will not fit in our math instruction. Add a module version magic string for this feature to prevent incompatible modules being loaded. Tested-by: Tony Lindgren Reviewed-by: Nicolas Pitre Tested-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/Kconfig | 13 +++++++ arch/arm/include/asm/memory.h | 55 +++++++++++++++++++++------- arch/arm/include/asm/module.h | 15 ++++++-- arch/arm/kernel/armksyms.c | 4 +++ arch/arm/kernel/head.S | 68 +++++++++++++++++++++++++++++++++++ arch/arm/kernel/module.c | 23 +++++++++++- arch/arm/kernel/vmlinux.lds.S | 4 +++ 7 files changed, 167 insertions(+), 15 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5cff165b7eb..4147f76e798 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -191,6 +191,19 @@ config VECTORS_BASE help The base address of exception vectors. +config ARM_PATCH_PHYS_VIRT + bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)" + depends on EXPERIMENTAL + depends on !XIP_KERNEL && !THUMB2_KERNEL && MMU + depends on !ARCH_MSM + depends on !ARCH_REALVIEW || !SPARSEMEM + help + Patch phys-to-virt translation functions at runtime according to + the position of the kernel in system memory. + + This can only be used with non-XIP, non-Thumb2, MMU kernels where + the base of physical memory is at a 16MB boundary. + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 2efec578a62..7197879e1cb 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -24,8 +24,6 @@ */ #define UL(x) _AC(x, UL) -#define PHYS_OFFSET PLAT_PHYS_OFFSET - #ifdef CONFIG_MMU /* @@ -134,16 +132,6 @@ #define DTCM_OFFSET UL(0xfffe8000) #endif -/* - * Physical vs virtual RAM address space conversion. These are - * private definitions which should NOT be used outside memory.h - * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. - */ -#ifndef __virt_to_phys -#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) -#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) -#endif - /* * Convert a physical address to a Page Frame Number and back */ @@ -158,6 +146,49 @@ #ifndef __ASSEMBLY__ +/* + * Physical vs virtual RAM address space conversion. These are + * private definitions which should NOT be used outside memory.h + * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. + */ +#ifndef __virt_to_phys +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + +extern unsigned long __pv_phys_offset; +#define PHYS_OFFSET __pv_phys_offset + +#define __pv_stub(from,to,instr) \ + __asm__("@ __pv_stub\n" \ + "1: " instr " %0, %1, %2\n" \ + " .pushsection .pv_table,\"a\"\n" \ + " .long 1b\n" \ + " .popsection\n" \ + : "=r" (to) \ + : "r" (from), "I" (0x81000000)) + +static inline unsigned long __virt_to_phys(unsigned long x) +{ + unsigned long t; + __pv_stub(x, t, "add"); + return t; +} + +static inline unsigned long __phys_to_virt(unsigned long x) +{ + unsigned long t; + __pv_stub(x, t, "sub"); + return t; +} +#else +#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) +#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) +#endif +#endif + +#ifndef PHYS_OFFSET +#define PHYS_OFFSET PLAT_PHYS_OFFSET +#endif + /* * The DMA mask corresponding to the maximum bus address allocatable * using GFP_DMA. The default here places no restriction on DMA diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index 12c8e680cbf..d072c21332e 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -25,8 +25,19 @@ struct mod_arch_specific { }; /* - * Include the ARM architecture version. + * Add the ARM architecture version to the version magic string */ -#define MODULE_ARCH_VERMAGIC "ARMv" __stringify(__LINUX_ARM_ARCH__) " " +#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " " + +/* Add __virt_to_phys patching state as well */ +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT +#define MODULE_ARCH_VERMAGIC_P2V "p2v8 " +#else +#define MODULE_ARCH_VERMAGIC_P2V "" +#endif + +#define MODULE_ARCH_VERMAGIC \ + MODULE_ARCH_VERMAGIC_ARMVSN \ + MODULE_ARCH_VERMAGIC_P2V #endif /* _ASM_ARM_MODULE_H */ diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index e5e1e538767..9615423c37d 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -170,3 +170,7 @@ EXPORT_SYMBOL(mcount); #endif EXPORT_SYMBOL(__gnu_mcount_nc); #endif + +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT +EXPORT_SYMBOL(__pv_phys_offset); +#endif diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 03a588b6e15..1db8ead2e33 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -97,6 +97,9 @@ ENTRY(stext) bl __vet_atags #ifdef CONFIG_SMP_ON_UP bl __fixup_smp +#endif +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + bl __fixup_pv_table #endif bl __create_page_tables @@ -438,4 +441,69 @@ smp_on_up: #endif +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + +/* __fixup_pv_table - patch the stub instructions with the delta between + * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and + * can be expressed by an immediate shifter operand. The stub instruction + * has a form of '(add|sub) rd, rn, #imm'. + */ + __HEAD +__fixup_pv_table: + adr r0, 1f + ldmia r0, {r3-r5, r7} + sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET + add r4, r4, r3 @ adjust table start address + add r5, r5, r3 @ adjust table end address + str r8, [r7, r3]! @ save computed PHYS_OFFSET to __pv_phys_offset + mov r6, r3, lsr #24 @ constant for add/sub instructions + teq r3, r6, lsl #24 @ must be 16MiB aligned + bne __error + str r6, [r7, #4] @ save to __pv_offset + b __fixup_a_pv_table +ENDPROC(__fixup_pv_table) + + .align +1: .long . + .long __pv_table_begin + .long __pv_table_end +2: .long __pv_phys_offset + + .text +__fixup_a_pv_table: + b 3f +2: ldr ip, [r7, r3] + bic ip, ip, #0x000000ff + orr ip, ip, r6 + str ip, [r7, r3] +3: cmp r4, r5 + ldrcc r7, [r4], #4 @ use branch for delay slot + bcc 2b + mov pc, lr +ENDPROC(__fixup_a_pv_table) + +ENTRY(fixup_pv_table) + stmfd sp!, {r4 - r7, lr} + ldr r2, 2f @ get address of __pv_phys_offset + mov r3, #0 @ no offset + mov r4, r0 @ r0 = table start + add r5, r0, r1 @ r1 = table size + ldr r6, [r2, #4] @ get __pv_offset + bl __fixup_a_pv_table + ldmfd sp!, {r4 - r7, pc} +ENDPROC(fixup_pv_table) + + .align +2: .long __pv_phys_offset + + .data + .globl __pv_phys_offset + .type __pv_phys_offset, %object +__pv_phys_offset: + .long 0 + .size __pv_phys_offset, . - __pv_phys_offset +__pv_offset: + .long 0 +#endif + #include "head-common.S" diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 2cfe8161b47..c5679f6d9f6 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -268,12 +268,28 @@ struct mod_unwind_map { const Elf_Shdr *txt_sec; }; +static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr, + const Elf_Shdr *sechdrs, const char *name) +{ + const Elf_Shdr *s, *se; + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; + + return NULL; +} + +extern void fixup_pv_table(const void *, unsigned long); + int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { + const Elf_Shdr *s = NULL; #ifdef CONFIG_ARM_UNWIND const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; - const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; + const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum; struct mod_unwind_map maps[ARM_SEC_MAX]; int i; @@ -314,6 +330,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, maps[i].unw_sec->sh_size, maps[i].txt_sec->sh_addr, maps[i].txt_sec->sh_size); +#endif +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + s = find_mod_section(hdr, sechdrs, ".pv_table"); + if (s) + fixup_pv_table((void *)s->sh_addr, s->sh_size); #endif return 0; } diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 86b66f3f203..45b5651777e 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -57,6 +57,10 @@ SECTIONS __smpalt_end = .; #endif + __pv_table_begin = .; + *(.pv_table) + __pv_table_end = .; + INIT_SETUP(16) INIT_CALLS -- 2.39.5