]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branch 'linus' into efi/core, to refresh the branch and to pick up recent fixes
authorIngo Molnar <mingo@kernel.org>
Wed, 3 Feb 2016 10:30:36 +0000 (11:30 +0100)
committerIngo Molnar <mingo@kernel.org>
Wed, 3 Feb 2016 10:30:36 +0000 (11:30 +0100)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
13 files changed:
Documentation/efi-stub.txt
Documentation/x86/x86_64/mm.txt
arch/x86/include/asm/efi.h
arch/x86/kernel/vmlinux.lds.S
arch/x86/mm/pageattr.c
arch/x86/platform/efi/efi-bgrt.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_32.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/efi_stub_64.S
arch/x86/platform/efi/quirks.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/esrt.c

index 7747024d3bb70023fbff500cd3fc44546b31511b..e157469882614ae96ddced101a5fc77315d49043 100644 (file)
@@ -10,12 +10,12 @@ arch/x86/boot/header.S and arch/x86/boot/compressed/eboot.c,
 respectively. For ARM the EFI stub is implemented in
 arch/arm/boot/compressed/efi-header.S and
 arch/arm/boot/compressed/efi-stub.c. EFI stub code that is shared
-between architectures is in drivers/firmware/efi/efi-stub-helper.c.
+between architectures is in drivers/firmware/efi/libstub.
 
 For arm64, there is no compressed kernel support, so the Image itself
 masquerades as a PE/COFF image and the EFI stub is linked into the
 kernel. The arm64 EFI stub lives in arch/arm64/kernel/efi-entry.S
-and arch/arm64/kernel/efi-stub.c.
+and drivers/firmware/efi/libstub/arm64-stub.c.
 
 By using the EFI boot stub it's possible to boot a Linux kernel
 without the use of a conventional EFI boot loader, such as grub or
index 05712ac83e3826bfe7802e01932bde593128b129..c518dce7da4d62da22b192cbc3595ebf62ae3037 100644 (file)
@@ -16,6 +16,8 @@ ffffec0000000000 - fffffc0000000000 (=44 bits) kasan shadow memory (16TB)
 ... unused hole ...
 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
 ... unused hole ...
+ffffffef00000000 - ffffffff00000000 (=64 GB) EFI region mapping space
+... unused hole ...
 ffffffff80000000 - ffffffffa0000000 (=512 MB)  kernel text mapping, from phys 0
 ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
 ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
@@ -32,11 +34,9 @@ reference.
 Current X86-64 implementations only support 40 bits of address space,
 but we support up to 46 bits. This expands into MBZ space in the page tables.
 
-->trampoline_pgd:
-
-We map EFI runtime services in the aforementioned PGD in the virtual
-range of 64Gb (arbitrarily set, can be raised if needed)
-
-0xffffffef00000000 - 0xffffffff00000000
+We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
+memory window (this size is arbitrary, it can be raised later if needed).
+The mappings are not part of any other kernel PGD and are only available
+during EFI runtime calls.
 
 -Andi Kleen, Jul 2004
index 0010c78c4998cf0702299ea2f8a9229e09bb6438..8fd9e637629a0ab22d49eb0ef51ef4feb9aa3987 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <asm/fpu/api.h>
 #include <asm/pgtable.h>
+#include <asm/tlb.h>
 
 /*
  * We map the EFI regions needed for runtime services non-contiguously,
@@ -64,6 +65,17 @@ extern u64 asmlinkage efi_call(void *fp, ...);
 
 #define efi_call_phys(f, args...)              efi_call((f), args)
 
+/*
+ * Scratch space used for switching the pagetable in the EFI stub
+ */
+struct efi_scratch {
+       u64     r15;
+       u64     prev_cr3;
+       pgd_t   *efi_pgt;
+       bool    use_pgd;
+       u64     phys_stack;
+} __packed;
+
 #define efi_call_virt(f, ...)                                          \
 ({                                                                     \
        efi_status_t __s;                                               \
@@ -71,7 +83,20 @@ extern u64 asmlinkage efi_call(void *fp, ...);
        efi_sync_low_kernel_mappings();                                 \
        preempt_disable();                                              \
        __kernel_fpu_begin();                                           \
+                                                                       \
+       if (efi_scratch.use_pgd) {                                      \
+               efi_scratch.prev_cr3 = read_cr3();                      \
+               write_cr3((unsigned long)efi_scratch.efi_pgt);          \
+               __flush_tlb_all();                                      \
+       }                                                               \
+                                                                       \
        __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__);    \
+                                                                       \
+       if (efi_scratch.use_pgd) {                                      \
+               write_cr3(efi_scratch.prev_cr3);                        \
+               __flush_tlb_all();                                      \
+       }                                                               \
+                                                                       \
        __kernel_fpu_end();                                             \
        preempt_enable();                                               \
        __s;                                                            \
@@ -111,6 +136,7 @@ extern void __init efi_memory_uc(u64 addr, unsigned long size);
 extern void __init efi_map_region(efi_memory_desc_t *md);
 extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
 extern void efi_sync_low_kernel_mappings(void);
+extern int __init efi_alloc_page_tables(void);
 extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
 extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
 extern void __init old_map_region(efi_memory_desc_t *md);
index 74e4bf11f562e0354c227518421e2375ec16fafa..4f1994257a18a2fa92706ab27ad77b8ccec7871a 100644 (file)
@@ -325,6 +325,7 @@ SECTIONS
                __brk_limit = .;
        }
 
+       . = ALIGN(PAGE_SIZE);
        _end = .;
 
         STABS_DEBUG
index 2440814b00699e33809ce061d6f86a48437f7a10..632d34d2023779ecb8ed2224ac83cce566175492 100644 (file)
@@ -910,15 +910,10 @@ static void populate_pte(struct cpa_data *cpa,
        pte = pte_offset_kernel(pmd, start);
 
        while (num_pages-- && start < end) {
-
-               /* deal with the NX bit */
-               if (!(pgprot_val(pgprot) & _PAGE_NX))
-                       cpa->pfn &= ~_PAGE_NX;
-
-               set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot));
+               set_pte(pte, pfn_pte(cpa->pfn, pgprot));
 
                start    += PAGE_SIZE;
-               cpa->pfn += PAGE_SIZE;
+               cpa->pfn++;
                pte++;
        }
 }
@@ -974,11 +969,11 @@ static int populate_pmd(struct cpa_data *cpa,
 
                pmd = pmd_offset(pud, start);
 
-               set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
+               set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
                                   massage_pgprot(pmd_pgprot)));
 
                start     += PMD_SIZE;
-               cpa->pfn  += PMD_SIZE;
+               cpa->pfn  += PMD_SIZE >> PAGE_SHIFT;
                cur_pages += PMD_SIZE >> PAGE_SHIFT;
        }
 
@@ -1047,11 +1042,11 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
         * Map everything starting from the Gb boundary, possibly with 1G pages
         */
        while (end - start >= PUD_SIZE) {
-               set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
+               set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
                                   massage_pgprot(pud_pgprot)));
 
                start     += PUD_SIZE;
-               cpa->pfn  += PUD_SIZE;
+               cpa->pfn  += PUD_SIZE >> PAGE_SHIFT;
                cur_pages += PUD_SIZE >> PAGE_SHIFT;
                pud++;
        }
index ea48449b2e63d1428de814d654ba58087dee436a..b0970661870a7efbd823ed35a47519422721e9a4 100644 (file)
@@ -10,6 +10,9 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/acpi.h>
@@ -28,8 +31,7 @@ struct bmp_header {
 void __init efi_bgrt_init(void)
 {
        acpi_status status;
-       void __iomem *image;
-       bool ioremapped = false;
+       void *image;
        struct bmp_header bmp_header;
 
        if (acpi_disabled)
@@ -70,20 +72,14 @@ void __init efi_bgrt_init(void)
                return;
        }
 
-       image = efi_lookup_mapped_addr(bgrt_tab->image_address);
+       image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB);
        if (!image) {
-               image = early_ioremap(bgrt_tab->image_address,
-                                      sizeof(bmp_header));
-               ioremapped = true;
-               if (!image) {
-                       pr_err("Ignoring BGRT: failed to map image header memory\n");
-                       return;
-               }
+               pr_err("Ignoring BGRT: failed to map image header memory\n");
+               return;
        }
 
-       memcpy_fromio(&bmp_header, image, sizeof(bmp_header));
-       if (ioremapped)
-               early_iounmap(image, sizeof(bmp_header));
+       memcpy(&bmp_header, image, sizeof(bmp_header));
+       memunmap(image);
        bgrt_image_size = bmp_header.size;
 
        bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
@@ -93,18 +89,14 @@ void __init efi_bgrt_init(void)
                return;
        }
 
-       if (ioremapped) {
-               image = early_ioremap(bgrt_tab->image_address,
-                                      bmp_header.size);
-               if (!image) {
-                       pr_err("Ignoring BGRT: failed to map image memory\n");
-                       kfree(bgrt_image);
-                       bgrt_image = NULL;
-                       return;
-               }
+       image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB);
+       if (!image) {
+               pr_err("Ignoring BGRT: failed to map image memory\n");
+               kfree(bgrt_image);
+               bgrt_image = NULL;
+               return;
        }
 
-       memcpy_fromio(bgrt_image, image, bgrt_image_size);
-       if (ioremapped)
-               early_iounmap(image, bmp_header.size);
+       memcpy(bgrt_image, image, bgrt_image_size);
+       memunmap(image);
 }
index ad285404ea7f58ac74e998d5658a8c72a9be019d..bdd9477f937c9af59cea61aa8f5f38a2c8baafc8 100644 (file)
@@ -815,6 +815,7 @@ static void __init kexec_enter_virtual_mode(void)
 {
 #ifdef CONFIG_KEXEC_CORE
        efi_memory_desc_t *md;
+       unsigned int num_pages;
        void *p;
 
        efi.systab = NULL;
@@ -829,6 +830,12 @@ static void __init kexec_enter_virtual_mode(void)
                return;
        }
 
+       if (efi_alloc_page_tables()) {
+               pr_err("Failed to allocate EFI page tables\n");
+               clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+               return;
+       }
+
        /*
        * Map efi regions which were passed via setup_data. The virt_addr is a
        * fixed addr which was used in first kernel of a kexec boot.
@@ -843,6 +850,14 @@ static void __init kexec_enter_virtual_mode(void)
 
        BUG_ON(!efi.systab);
 
+       num_pages = ALIGN(memmap.nr_map * memmap.desc_size, PAGE_SIZE);
+       num_pages >>= PAGE_SHIFT;
+
+       if (efi_setup_page_tables(memmap.phys_map, num_pages)) {
+               clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+               return;
+       }
+
        efi_sync_low_kernel_mappings();
 
        /*
@@ -869,7 +884,7 @@ static void __init kexec_enter_virtual_mode(void)
  * This function will switch the EFI runtime services to virtual mode.
  * Essentially, we look through the EFI memmap and map every region that
  * has the runtime attribute bit set in its memory descriptor into the
- * ->trampoline_pgd page table using a top-down VA allocation scheme.
+ * efi_pgd page table.
  *
  * The old method which used to update that memory descriptor with the
  * virtual address obtained from ioremap() is still supported when the
@@ -879,8 +894,8 @@ static void __init kexec_enter_virtual_mode(void)
  *
  * The new method does a pagetable switch in a preemption-safe manner
  * so that we're in a different address space when calling a runtime
- * function. For function arguments passing we do copy the PGDs of the
- * kernel page table into ->trampoline_pgd prior to each call.
+ * function. For function arguments passing we do copy the PUDs of the
+ * kernel page table into efi_pgd prior to each call.
  *
  * Specially for kexec boot, efi runtime maps in previous kernel should
  * be passed in via setup_data. In that case runtime ranges will be mapped
@@ -895,6 +910,12 @@ static void __init __efi_enter_virtual_mode(void)
 
        efi.systab = NULL;
 
+       if (efi_alloc_page_tables()) {
+               pr_err("Failed to allocate EFI page tables\n");
+               clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+               return;
+       }
+
        efi_merge_regions();
        new_memmap = efi_map_regions(&count, &pg_shift);
        if (!new_memmap) {
@@ -954,28 +975,11 @@ static void __init __efi_enter_virtual_mode(void)
        efi_runtime_mkexec();
 
        /*
-        * We mapped the descriptor array into the EFI pagetable above but we're
-        * not unmapping it here. Here's why:
-        *
-        * We're copying select PGDs from the kernel page table to the EFI page
-        * table and when we do so and make changes to those PGDs like unmapping
-        * stuff from them, those changes appear in the kernel page table and we
-        * go boom.
-        *
-        * From setup_real_mode():
-        *
-        * ...
-        * trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
-        *
-        * In this particular case, our allocation is in PGD 0 of the EFI page
-        * table but we've copied that PGD from PGD[272] of the EFI page table:
-        *
-        *      pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272
-        *
-        * where the direct memory mapping in kernel space is.
-        *
-        * new_memmap's VA comes from that direct mapping and thus clearing it,
-        * it would get cleared in the kernel page table too.
+        * We mapped the descriptor array into the EFI pagetable above
+        * but we're not unmapping it here because if we're running in
+        * EFI mixed mode we need all of memory to be accessible when
+        * we pass parameters to the EFI runtime services in the
+        * thunking code.
         *
         * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
         */
index ed5b67338294f1325fffe5f7d9fce637731d5917..58d669bc8250bb98caee8b37714a123050456efb 100644 (file)
  * say 0 - 3G.
  */
 
+int __init efi_alloc_page_tables(void)
+{
+       return 0;
+}
+
 void efi_sync_low_kernel_mappings(void) {}
 void __init efi_dump_pagetable(void) {}
 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
index a0ac0f9c307f661c8b3ed08c4ca6d23507772e36..b492521503fe3db968bdfc0f7ced68039dea98e5 100644 (file)
@@ -15,6 +15,8 @@
  *
  */
 
+#define pr_fmt(fmt) "efi: " fmt
+
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/mm.h>
@@ -40,6 +42,7 @@
 #include <asm/fixmap.h>
 #include <asm/realmode.h>
 #include <asm/time.h>
+#include <asm/pgalloc.h>
 
 /*
  * We allocate runtime services regions bottom-up, starting from -4G, i.e.
  */
 static u64 efi_va = EFI_VA_START;
 
-/*
- * Scratch space used for switching the pagetable in the EFI stub
- */
-struct efi_scratch {
-       u64 r15;
-       u64 prev_cr3;
-       pgd_t *efi_pgt;
-       bool use_pgd;
-       u64 phys_stack;
-} __packed;
+struct efi_scratch efi_scratch;
 
 static void __init early_code_mapping_set_exec(int executable)
 {
@@ -83,8 +77,11 @@ pgd_t * __init efi_call_phys_prolog(void)
        int pgd;
        int n_pgds;
 
-       if (!efi_enabled(EFI_OLD_MEMMAP))
-               return NULL;
+       if (!efi_enabled(EFI_OLD_MEMMAP)) {
+               save_pgd = (pgd_t *)read_cr3();
+               write_cr3((unsigned long)efi_scratch.efi_pgt);
+               goto out;
+       }
 
        early_code_mapping_set_exec(1);
 
@@ -96,6 +93,7 @@ pgd_t * __init efi_call_phys_prolog(void)
                vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
                set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
        }
+out:
        __flush_tlb_all();
 
        return save_pgd;
@@ -109,8 +107,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
        int pgd_idx;
        int nr_pgds;
 
-       if (!save_pgd)
+       if (!efi_enabled(EFI_OLD_MEMMAP)) {
+               write_cr3((unsigned long)save_pgd);
+               __flush_tlb_all();
                return;
+       }
 
        nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
 
@@ -123,27 +124,98 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
        early_code_mapping_set_exec(0);
 }
 
+static pgd_t *efi_pgd;
+
+/*
+ * We need our own copy of the higher levels of the page tables
+ * because we want to avoid inserting EFI region mappings (EFI_VA_END
+ * to EFI_VA_START) into the standard kernel page tables. Everything
+ * else can be shared, see efi_sync_low_kernel_mappings().
+ */
+int __init efi_alloc_page_tables(void)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       gfp_t gfp_mask;
+
+       if (efi_enabled(EFI_OLD_MEMMAP))
+               return 0;
+
+       gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO;
+       efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
+       if (!efi_pgd)
+               return -ENOMEM;
+
+       pgd = efi_pgd + pgd_index(EFI_VA_END);
+
+       pud = pud_alloc_one(NULL, 0);
+       if (!pud) {
+               free_page((unsigned long)efi_pgd);
+               return -ENOMEM;
+       }
+
+       pgd_populate(NULL, pgd, pud);
+
+       return 0;
+}
+
 /*
  * Add low kernel mappings for passing arguments to EFI functions.
  */
 void efi_sync_low_kernel_mappings(void)
 {
-       unsigned num_pgds;
-       pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+       unsigned num_entries;
+       pgd_t *pgd_k, *pgd_efi;
+       pud_t *pud_k, *pud_efi;
 
        if (efi_enabled(EFI_OLD_MEMMAP))
                return;
 
-       num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET);
+       /*
+        * We can share all PGD entries apart from the one entry that
+        * covers the EFI runtime mapping space.
+        *
+        * Make sure the EFI runtime region mappings are guaranteed to
+        * only span a single PGD entry and that the entry also maps
+        * other important kernel regions.
+        */
+       BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
+       BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
+                       (EFI_VA_END & PGDIR_MASK));
+
+       pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
+       pgd_k = pgd_offset_k(PAGE_OFFSET);
+
+       num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
+       memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
+
+       /*
+        * We share all the PUD entries apart from those that map the
+        * EFI regions. Copy around them.
+        */
+       BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
+       BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
+
+       pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
+       pud_efi = pud_offset(pgd_efi, 0);
 
-       memcpy(pgd + pgd_index(PAGE_OFFSET),
-               init_mm.pgd + pgd_index(PAGE_OFFSET),
-               sizeof(pgd_t) * num_pgds);
+       pgd_k = pgd_offset_k(EFI_VA_END);
+       pud_k = pud_offset(pgd_k, 0);
+
+       num_entries = pud_index(EFI_VA_END);
+       memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
+
+       pud_efi = pud_offset(pgd_efi, EFI_VA_START);
+       pud_k = pud_offset(pgd_k, EFI_VA_START);
+
+       num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
+       memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
 }
 
 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 {
-       unsigned long text;
+       unsigned long pfn, text;
+       efi_memory_desc_t *md;
        struct page *page;
        unsigned npages;
        pgd_t *pgd;
@@ -151,8 +223,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
        if (efi_enabled(EFI_OLD_MEMMAP))
                return 0;
 
-       efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
-       pgd = __va(efi_scratch.efi_pgt);
+       efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd);
+       pgd = efi_pgd;
 
        /*
         * It can happen that the physical address of new_memmap lands in memory
@@ -160,7 +232,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
         * and ident-map those pages containing the map before calling
         * phys_efi_set_virtual_address_map().
         */
-       if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) {
+       pfn = pa_memmap >> PAGE_SHIFT;
+       if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX)) {
                pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
                return 1;
        }
@@ -176,6 +249,25 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
        if (!IS_ENABLED(CONFIG_EFI_MIXED))
                return 0;
 
+       /*
+        * Map all of RAM so that we can access arguments in the 1:1
+        * mapping when making EFI runtime calls.
+        */
+       for_each_efi_memory_desc(&memmap, md) {
+               if (md->type != EFI_CONVENTIONAL_MEMORY &&
+                   md->type != EFI_LOADER_DATA &&
+                   md->type != EFI_LOADER_CODE)
+                       continue;
+
+               pfn = md->phys_addr >> PAGE_SHIFT;
+               npages = md->num_pages;
+
+               if (kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, npages, 0)) {
+                       pr_err("Failed to map 1:1 memory\n");
+                       return 1;
+               }
+       }
+
        page = alloc_page(GFP_KERNEL|__GFP_DMA32);
        if (!page)
                panic("Unable to allocate EFI runtime stack < 4GB\n");
@@ -185,8 +277,9 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 
        npages = (_end - _text) >> PAGE_SHIFT;
        text = __pa(_text);
+       pfn = text >> PAGE_SHIFT;
 
-       if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) {
+       if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, 0)) {
                pr_err("Failed to map kernel text 1:1\n");
                return 1;
        }
@@ -196,20 +289,20 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 
 void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 {
-       pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
-
-       kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages);
+       kernel_unmap_pages_in_pgd(efi_pgd, pa_memmap, num_pages);
 }
 
 static void __init __map_region(efi_memory_desc_t *md, u64 va)
 {
-       pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
-       unsigned long pf = 0;
+       unsigned long flags = 0;
+       unsigned long pfn;
+       pgd_t *pgd = efi_pgd;
 
        if (!(md->attribute & EFI_MEMORY_WB))
-               pf |= _PAGE_PCD;
+               flags |= _PAGE_PCD;
 
-       if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
+       pfn = md->phys_addr >> PAGE_SHIFT;
+       if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
                pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
                           md->phys_addr, va);
 }
@@ -312,9 +405,7 @@ void __init efi_runtime_mkexec(void)
 void __init efi_dump_pagetable(void)
 {
 #ifdef CONFIG_EFI_PGT_DUMP
-       pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
-
-       ptdump_walk_pgd_level(NULL, pgd);
+       ptdump_walk_pgd_level(NULL, efi_pgd);
 #endif
 }
 
index 86d0f9e08dd95eb1023d5ac7ec4fb006aafb72c9..32020cb8bb08ce0e12ca0fbf6428cf81344e9054 100644 (file)
        mov %rsi, %cr0;                 \
        mov (%rsp), %rsp
 
-       /* stolen from gcc */
-       .macro FLUSH_TLB_ALL
-       movq %r15, efi_scratch(%rip)
-       movq %r14, efi_scratch+8(%rip)
-       movq %cr4, %r15
-       movq %r15, %r14
-       andb $0x7f, %r14b
-       movq %r14, %cr4
-       movq %r15, %cr4
-       movq efi_scratch+8(%rip), %r14
-       movq efi_scratch(%rip), %r15
-       .endm
-
-       .macro SWITCH_PGT
-       cmpb $0, efi_scratch+24(%rip)
-       je 1f
-       movq %r15, efi_scratch(%rip)            # r15
-       # save previous CR3
-       movq %cr3, %r15
-       movq %r15, efi_scratch+8(%rip)          # prev_cr3
-       movq efi_scratch+16(%rip), %r15         # EFI pgt
-       movq %r15, %cr3
-       1:
-       .endm
-
-       .macro RESTORE_PGT
-       cmpb $0, efi_scratch+24(%rip)
-       je 2f
-       movq efi_scratch+8(%rip), %r15
-       movq %r15, %cr3
-       movq efi_scratch(%rip), %r15
-       FLUSH_TLB_ALL
-       2:
-       .endm
-
 ENTRY(efi_call)
        SAVE_XMM
        mov (%rsp), %rax
@@ -83,16 +48,8 @@ ENTRY(efi_call)
        mov %r8, %r9
        mov %rcx, %r8
        mov %rsi, %rcx
-       SWITCH_PGT
        call *%rdi
-       RESTORE_PGT
        addq $48, %rsp
        RESTORE_XMM
        ret
 ENDPROC(efi_call)
-
-       .data
-ENTRY(efi_scratch)
-       .fill 3,8,0
-       .byte 0
-       .quad 0
index 2d66db8f80f992d3b0609373502031aacf91f161..453504662a3316608a05647b84bafe3e8ed366ef 100644 (file)
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) "efi: " fmt
+
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
@@ -267,7 +269,7 @@ void __init efi_apply_memmap_quirks(void)
         * services.
         */
        if (!efi_runtime_supported()) {
-               pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
+               pr_info("Setup done, disabling due to 32/64-bit mismatch\n");
                efi_unmap_memmap();
        }
 
index 2cd37dad67a63645b15d9f2f496630a1e820ce19..9b815c8136870c9c68ad1ded4824f5c4593321f1 100644 (file)
@@ -326,38 +326,6 @@ u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
        return end;
 }
 
-/*
- * We can't ioremap data in EFI boot services RAM, because we've already mapped
- * it as RAM.  So, look it up in the existing EFI memory map instead.  Only
- * callable after efi_enter_virtual_mode and before efi_free_boot_services.
- */
-void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
-{
-       struct efi_memory_map *map;
-       void *p;
-       map = efi.memmap;
-       if (!map)
-               return NULL;
-       if (WARN_ON(!map->map))
-               return NULL;
-       for (p = map->map; p < map->map_end; p += map->desc_size) {
-               efi_memory_desc_t *md = p;
-               u64 size = md->num_pages << EFI_PAGE_SHIFT;
-               u64 end = md->phys_addr + size;
-               if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
-                   md->type != EFI_BOOT_SERVICES_CODE &&
-                   md->type != EFI_BOOT_SERVICES_DATA)
-                       continue;
-               if (!md->virt_addr)
-                       continue;
-               if (phys_addr >= md->phys_addr && phys_addr < end) {
-                       phys_addr += md->virt_addr - md->phys_addr;
-                       return (__force void __iomem *)(unsigned long)phys_addr;
-               }
-       }
-       return NULL;
-}
-
 static __initdata efi_config_table_type_t common_tables[] = {
        {ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
        {ACPI_TABLE_GUID, "ACPI", &efi.acpi},
index 22c5285f77050f27d2d96a22b5ade945bae54316..75feb3f5829bab96067054dc7063400a723569f6 100644 (file)
@@ -167,14 +167,11 @@ static struct kset *esrt_kset;
 static int esre_create_sysfs_entry(void *esre, int entry_num)
 {
        struct esre_entry *entry;
-       char name[20];
 
        entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
 
-       sprintf(name, "entry%d", entry_num);
-
        entry->kobj.kset = esrt_kset;
 
        if (esrt->fw_resource_version == 1) {
@@ -182,7 +179,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
 
                entry->esre.esre1 = esre;
                rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
-                                         "%s", name);
+                                         "entry%d", entry_num);
                if (rc) {
                        kfree(entry);
                        return rc;