4 #include <asm/fpu/api.h>
5 #include <asm/pgtable.h>
9 * We map the EFI regions needed for runtime services non-contiguously,
10 * with preserved alignment on virtual addresses starting from -4G down
11 * for a total max space of 64G. This way, we provide for stable runtime
12 * services addresses across kernels so that a kexec'd kernel can still
15 * This is the main reason why we're doing stable VA mappings for RT
18 * This flag is used in conjuction with a chicken bit called
19 * "efi=old_map" which can be used as a fallback to the old runtime
20 * services mapping method in case there's some b0rkage with a
21 * particular EFI implementation (haha, it is hard to hold up the
24 #define EFI_OLD_MEMMAP EFI_ARCH_1
26 #define EFI32_LOADER_SIGNATURE "EL32"
27 #define EFI64_LOADER_SIGNATURE "EL64"
32 extern unsigned long asmlinkage efi_call_phys(void *, ...);
35 * Wrap all the virtual calls in a way that forces the parameters on the stack.
38 /* Use this macro if your virtual returns a non-void value */
39 #define efi_call_virt(f, args...) \
43 __s = ((efi_##f##_t __attribute__((regparm(0)))*) \
44 efi.systab->runtime->f)(args); \
49 /* Use this macro if your virtual call does not return any value */
50 #define __efi_call_virt(f, args...) \
53 ((efi_##f##_t __attribute__((regparm(0)))*) \
54 efi.systab->runtime->f)(args); \
58 #define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
60 #else /* !CONFIG_X86_32 */
62 #define EFI_LOADER_SIGNATURE "EL64"
64 extern u64 asmlinkage efi_call(void *fp, ...);
66 #define efi_call_phys(f, args...) efi_call((f), args)
69 * Scratch space used for switching the pagetable in the EFI stub
79 #define efi_call_virt(f, ...) \
83 efi_sync_low_kernel_mappings(); \
85 __kernel_fpu_begin(); \
87 if (efi_scratch.use_pgd) { \
88 efi_scratch.prev_cr3 = read_cr3(); \
89 write_cr3((unsigned long)efi_scratch.efi_pgt); \
93 __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \
95 if (efi_scratch.use_pgd) { \
96 write_cr3(efi_scratch.prev_cr3); \
100 __kernel_fpu_end(); \
106 * All X86_64 virt calls return non-void values. Thus, use non-void call for
107 * virt calls that would be void on X86_32.
109 #define __efi_call_virt(f, args...) efi_call_virt(f, args)
111 extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
112 u32 type, u64 attribute);
116 * CONFIG_KASAN may redefine memset to __memset. __memset function is present
117 * only in kernel binary. Since the EFI stub linked into a separate binary it
118 * doesn't have __memset(). So we should use standard memset from
119 * arch/x86/boot/compressed/string.c. The same applies to memcpy and memmove.
126 #endif /* CONFIG_X86_32 */
128 extern struct efi_scratch efi_scratch;
129 extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
130 extern int __init efi_memblock_x86_reserve_range(void);
131 extern pgd_t * __init efi_call_phys_prolog(void);
132 extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
133 extern void __init efi_print_memmap(void);
134 extern void __init efi_unmap_memmap(void);
135 extern void __init efi_memory_uc(u64 addr, unsigned long size);
136 extern void __init efi_map_region(efi_memory_desc_t *md);
137 extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
138 extern void efi_sync_low_kernel_mappings(void);
139 extern int __init efi_alloc_page_tables(void);
140 extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
141 extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
142 extern void __init old_map_region(efi_memory_desc_t *md);
143 extern void __init runtime_code_page_mkexec(void);
144 extern void __init efi_runtime_mkexec(void);
145 extern void __init efi_dump_pagetable(void);
146 extern void __init efi_apply_memmap_quirks(void);
147 extern int __init efi_reuse_config(u64 tables, int nr_tables);
148 extern void efi_delete_dummy_variable(void);
150 struct efi_setup_data {
158 extern u64 efi_setup;
162 static inline bool efi_is_native(void)
164 return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
167 static inline bool efi_runtime_supported(void)
172 if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
178 extern struct console early_efi_console;
179 extern void parse_efi_setup(u64 phys_addr, u32 data_len);
181 #ifdef CONFIG_EFI_MIXED
182 extern void efi_thunk_runtime_setup(void);
183 extern efi_status_t efi_thunk_set_virtual_address_map(
184 void *phys_set_virtual_address_map,
185 unsigned long memory_map_size,
186 unsigned long descriptor_size,
187 u32 descriptor_version,
188 efi_memory_desc_t *virtual_map);
190 static inline void efi_thunk_runtime_setup(void) {}
191 static inline efi_status_t efi_thunk_set_virtual_address_map(
192 void *phys_set_virtual_address_map,
193 unsigned long memory_map_size,
194 unsigned long descriptor_size,
195 u32 descriptor_version,
196 efi_memory_desc_t *virtual_map)
200 #endif /* CONFIG_EFI_MIXED */
203 /* arch specific definitions used by the stub code */
215 u64 exit_boot_services;
217 efi_status_t (*call)(unsigned long, ...);
221 __pure const struct efi_config *__efi_early(void);
223 #define efi_call_early(f, ...) \
224 __efi_early()->call(__efi_early()->f, __VA_ARGS__);
226 extern bool efi_reboot_required(void);
229 static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
230 static inline bool efi_reboot_required(void)
234 #endif /* CONFIG_EFI */
236 #endif /* _ASM_X86_EFI_H */