2 * x86_64 specific EFI support functions
3 * Based on Extensible Firmware Interface Specification version 1.0
5 * Copyright (C) 2005-2008 Intel Co.
6 * Fenghua Yu <fenghua.yu@intel.com>
7 * Bibo Mao <bibo.mao@intel.com>
8 * Chandramouli Narayanan <mouli@linux.intel.com>
9 * Huang Ying <ying.huang@intel.com>
11 * Code to convert EFI to E820 map has been implemented in elilo bootloader
12 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
13 * is setup appropriately for EFI runtime code.
18 #define pr_fmt(fmt) "efi: " fmt
20 #include <linux/kernel.h>
21 #include <linux/init.h>
23 #include <linux/types.h>
24 #include <linux/spinlock.h>
25 #include <linux/bootmem.h>
26 #include <linux/ioport.h>
27 #include <linux/init.h>
28 #include <linux/mc146818rtc.h>
29 #include <linux/efi.h>
30 #include <linux/uaccess.h>
32 #include <linux/reboot.h>
33 #include <linux/slab.h>
34 #include <linux/ucs2_string.h>
36 #include <asm/setup.h>
38 #include <asm/e820/api.h>
39 #include <asm/pgtable.h>
40 #include <asm/tlbflush.h>
41 #include <asm/proto.h>
43 #include <asm/cacheflush.h>
44 #include <asm/fixmap.h>
45 #include <asm/realmode.h>
47 #include <asm/pgalloc.h>
50 * We allocate runtime services regions top-down, starting from -4G, i.e.
51 * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
53 static u64 efi_va = EFI_VA_START;
55 struct efi_scratch efi_scratch;
57 static void __init early_code_mapping_set_exec(int executable)
59 efi_memory_desc_t *md;
61 if (!(__supported_pte_mask & _PAGE_NX))
64 /* Make EFI service code area executable */
65 for_each_efi_memory_desc(md) {
66 if (md->type == EFI_RUNTIME_SERVICES_CODE ||
67 md->type == EFI_BOOT_SERVICES_CODE)
68 efi_set_executable(md, executable);
72 pgd_t * __init efi_call_phys_prolog(void)
74 unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
75 pgd_t *save_pgd, *pgd_k, *pgd_efi;
76 p4d_t *p4d, *p4d_k, *p4d_efi;
82 if (!efi_enabled(EFI_OLD_MEMMAP)) {
83 save_pgd = (pgd_t *)read_cr3();
84 write_cr3((unsigned long)efi_scratch.efi_pgt);
88 early_code_mapping_set_exec(1);
90 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
91 save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
94 * Build 1:1 identity mapping for efi=old_map usage. Note that
95 * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
96 * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
97 * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
98 * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
99 * This means here we can only reuse the PMD tables of the direct mapping.
101 for (pgd = 0; pgd < n_pgds; pgd++) {
102 addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
103 vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
104 pgd_efi = pgd_offset_k(addr_pgd);
105 save_pgd[pgd] = *pgd_efi;
107 p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
109 pr_err("Failed to allocate p4d table!\n");
113 for (i = 0; i < PTRS_PER_P4D; i++) {
114 addr_p4d = addr_pgd + i * P4D_SIZE;
115 p4d_efi = p4d + p4d_index(addr_p4d);
117 pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
119 pr_err("Failed to allocate pud table!\n");
123 for (j = 0; j < PTRS_PER_PUD; j++) {
124 addr_pud = addr_p4d + j * PUD_SIZE;
126 if (addr_pud > (max_pfn << PAGE_SHIFT))
129 vaddr = (unsigned long)__va(addr_pud);
131 pgd_k = pgd_offset_k(vaddr);
132 p4d_k = p4d_offset(pgd_k, vaddr);
133 pud[j] = *pud_offset(p4d_k, vaddr);
143 void __init efi_call_phys_epilog(pgd_t *save_pgd)
146 * After the lock is released, the original page table is restored.
154 if (!efi_enabled(EFI_OLD_MEMMAP)) {
155 write_cr3((unsigned long)save_pgd);
160 nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
162 for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
163 pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
164 set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
166 if (!(pgd_val(*pgd) & _PAGE_PRESENT))
169 for (i = 0; i < PTRS_PER_P4D; i++) {
170 p4d = p4d_offset(pgd,
171 pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
173 if (!(p4d_val(*p4d) & _PAGE_PRESENT))
176 pud = (pud_t *)p4d_page_vaddr(*p4d);
177 pud_free(&init_mm, pud);
180 p4d = (p4d_t *)pgd_page_vaddr(*pgd);
181 p4d_free(&init_mm, p4d);
187 early_code_mapping_set_exec(0);
190 static pgd_t *efi_pgd;
193 * We need our own copy of the higher levels of the page tables
194 * because we want to avoid inserting EFI region mappings (EFI_VA_END
195 * to EFI_VA_START) into the standard kernel page tables. Everything
196 * else can be shared, see efi_sync_low_kernel_mappings().
198 int __init efi_alloc_page_tables(void)
205 if (efi_enabled(EFI_OLD_MEMMAP))
208 gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
209 efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
213 pgd = efi_pgd + pgd_index(EFI_VA_END);
214 p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
216 free_page((unsigned long)efi_pgd);
220 pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
222 if (CONFIG_PGTABLE_LEVELS > 4)
223 free_page((unsigned long) pgd_page_vaddr(*pgd));
224 free_page((unsigned long)efi_pgd);
232 * Add low kernel mappings for passing arguments to EFI functions.
234 void efi_sync_low_kernel_mappings(void)
236 unsigned num_entries;
237 pgd_t *pgd_k, *pgd_efi;
238 p4d_t *p4d_k, *p4d_efi;
239 pud_t *pud_k, *pud_efi;
241 if (efi_enabled(EFI_OLD_MEMMAP))
245 * We can share all PGD entries apart from the one entry that
246 * covers the EFI runtime mapping space.
248 * Make sure the EFI runtime region mappings are guaranteed to
249 * only span a single PGD entry and that the entry also maps
250 * other important kernel regions.
252 BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
253 BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
254 (EFI_VA_END & PGDIR_MASK));
256 pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
257 pgd_k = pgd_offset_k(PAGE_OFFSET);
259 num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
260 memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
263 * As with PGDs, we share all P4D entries apart from the one entry
264 * that covers the EFI runtime mapping space.
266 BUILD_BUG_ON(p4d_index(EFI_VA_END) != p4d_index(MODULES_END));
267 BUILD_BUG_ON((EFI_VA_START & P4D_MASK) != (EFI_VA_END & P4D_MASK));
269 pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
270 pgd_k = pgd_offset_k(EFI_VA_END);
271 p4d_efi = p4d_offset(pgd_efi, 0);
272 p4d_k = p4d_offset(pgd_k, 0);
274 num_entries = p4d_index(EFI_VA_END);
275 memcpy(p4d_efi, p4d_k, sizeof(p4d_t) * num_entries);
278 * We share all the PUD entries apart from those that map the
279 * EFI regions. Copy around them.
281 BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
282 BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
284 p4d_efi = p4d_offset(pgd_efi, EFI_VA_END);
285 p4d_k = p4d_offset(pgd_k, EFI_VA_END);
286 pud_efi = pud_offset(p4d_efi, 0);
287 pud_k = pud_offset(p4d_k, 0);
289 num_entries = pud_index(EFI_VA_END);
290 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
292 pud_efi = pud_offset(p4d_efi, EFI_VA_START);
293 pud_k = pud_offset(p4d_k, EFI_VA_START);
295 num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
296 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
300 * Wrapper for slow_virt_to_phys() that handles NULL addresses.
302 static inline phys_addr_t
303 virt_to_phys_or_null_size(void *va, unsigned long size)
310 if (virt_addr_valid(va))
311 return virt_to_phys(va);
314 * A fully aligned variable on the stack is guaranteed not to
315 * cross a page bounary. Try to catch strings on the stack by
316 * checking that 'size' is a power of two.
318 bad_size = size > PAGE_SIZE || !is_power_of_2(size);
320 WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
322 return slow_virt_to_phys(va);
325 #define virt_to_phys_or_null(addr) \
326 virt_to_phys_or_null_size((addr), sizeof(*(addr)))
328 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
330 unsigned long pfn, text;
335 if (efi_enabled(EFI_OLD_MEMMAP))
338 efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd);
342 * It can happen that the physical address of new_memmap lands in memory
343 * which is not mapped in the EFI page table. Therefore we need to go
344 * and ident-map those pages containing the map before calling
345 * phys_efi_set_virtual_address_map().
347 pfn = pa_memmap >> PAGE_SHIFT;
348 if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX | _PAGE_RW)) {
349 pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
353 efi_scratch.use_pgd = true;
356 * Certain firmware versions are way too sentimential and still believe
357 * they are exclusive and unquestionable owners of the first physical page,
358 * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
359 * (but then write-access it later during SetVirtualAddressMap()).
361 * Create a 1:1 mapping for this page, to avoid triple faults during early
362 * boot with such firmware. We are free to hand this page to the BIOS,
363 * as trim_bios_range() will reserve the first page and isolate it away
364 * from memory allocators anyway.
366 if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
367 pr_err("Failed to create 1:1 mapping for the first page!\n");
372 * When making calls to the firmware everything needs to be 1:1
373 * mapped and addressable with 32-bit pointers. Map the kernel
374 * text and allocate a new stack because we can't rely on the
375 * stack pointer being < 4GB.
377 if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native())
380 page = alloc_page(GFP_KERNEL|__GFP_DMA32);
382 panic("Unable to allocate EFI runtime stack < 4GB\n");
384 efi_scratch.phys_stack = virt_to_phys(page_address(page));
385 efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */
387 npages = (_etext - _text) >> PAGE_SHIFT;
389 pfn = text >> PAGE_SHIFT;
391 if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, _PAGE_RW)) {
392 pr_err("Failed to map kernel text 1:1\n");
399 static void __init __map_region(efi_memory_desc_t *md, u64 va)
401 unsigned long flags = _PAGE_RW;
403 pgd_t *pgd = efi_pgd;
405 if (!(md->attribute & EFI_MEMORY_WB))
408 pfn = md->phys_addr >> PAGE_SHIFT;
409 if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
410 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
414 void __init efi_map_region(efi_memory_desc_t *md)
416 unsigned long size = md->num_pages << PAGE_SHIFT;
417 u64 pa = md->phys_addr;
419 if (efi_enabled(EFI_OLD_MEMMAP))
420 return old_map_region(md);
423 * Make sure the 1:1 mappings are present as a catch-all for b0rked
424 * firmware which doesn't update all internal pointers after switching
425 * to virtual mode and would otherwise crap on us.
427 __map_region(md, md->phys_addr);
430 * Enforce the 1:1 mapping as the default virtual address when
431 * booting in EFI mixed mode, because even though we may be
432 * running a 64-bit kernel, the firmware may only be 32-bit.
434 if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED)) {
435 md->virt_addr = md->phys_addr;
441 /* Is PA 2M-aligned? */
442 if (!(pa & (PMD_SIZE - 1))) {
445 u64 pa_offset = pa & (PMD_SIZE - 1);
446 u64 prev_va = efi_va;
448 /* get us the same offset within this 2M page */
449 efi_va = (efi_va & PMD_MASK) + pa_offset;
451 if (efi_va > prev_va)
455 if (efi_va < EFI_VA_END) {
456 pr_warn(FW_WARN "VA address range overflow!\n");
461 __map_region(md, efi_va);
462 md->virt_addr = efi_va;
466 * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
467 * md->virt_addr is the original virtual address which had been mapped in kexec
470 void __init efi_map_region_fixed(efi_memory_desc_t *md)
472 __map_region(md, md->phys_addr);
473 __map_region(md, md->virt_addr);
476 void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
477 u32 type, u64 attribute)
479 unsigned long last_map_pfn;
481 if (type == EFI_MEMORY_MAPPED_IO)
482 return ioremap(phys_addr, size);
484 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
485 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
486 unsigned long top = last_map_pfn << PAGE_SHIFT;
487 efi_ioremap(top, size - (top - phys_addr), type, attribute);
490 if (!(attribute & EFI_MEMORY_WB))
491 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
493 return (void __iomem *)__va(phys_addr);
496 void __init parse_efi_setup(u64 phys_addr, u32 data_len)
498 efi_setup = phys_addr + sizeof(struct setup_data);
501 static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf)
504 pgd_t *pgd = efi_pgd;
507 /* Update the 1:1 mapping */
508 pfn = md->phys_addr >> PAGE_SHIFT;
509 err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
511 pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
512 md->phys_addr, md->virt_addr);
515 err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
517 pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
518 md->phys_addr, md->virt_addr);
524 static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md)
526 unsigned long pf = 0;
528 if (md->attribute & EFI_MEMORY_XP)
531 if (!(md->attribute & EFI_MEMORY_RO))
534 return efi_update_mappings(md, pf);
537 void __init efi_runtime_update_mappings(void)
539 efi_memory_desc_t *md;
541 if (efi_enabled(EFI_OLD_MEMMAP)) {
542 if (__supported_pte_mask & _PAGE_NX)
543 runtime_code_page_mkexec();
548 * Use the EFI Memory Attribute Table for mapping permissions if it
549 * exists, since it is intended to supersede EFI_PROPERTIES_TABLE.
551 if (efi_enabled(EFI_MEM_ATTR)) {
552 efi_memattr_apply_permissions(NULL, efi_update_mem_attr);
557 * EFI_MEMORY_ATTRIBUTES_TABLE is intended to replace
558 * EFI_PROPERTIES_TABLE. So, use EFI_PROPERTIES_TABLE to update
559 * permissions only if EFI_MEMORY_ATTRIBUTES_TABLE is not
560 * published by the firmware. Even if we find a buggy implementation of
561 * EFI_MEMORY_ATTRIBUTES_TABLE, don't fall back to
562 * EFI_PROPERTIES_TABLE, because of the same reason.
565 if (!efi_enabled(EFI_NX_PE_DATA))
568 for_each_efi_memory_desc(md) {
569 unsigned long pf = 0;
571 if (!(md->attribute & EFI_MEMORY_RUNTIME))
574 if (!(md->attribute & EFI_MEMORY_WB))
577 if ((md->attribute & EFI_MEMORY_XP) ||
578 (md->type == EFI_RUNTIME_SERVICES_DATA))
581 if (!(md->attribute & EFI_MEMORY_RO) &&
582 (md->type != EFI_RUNTIME_SERVICES_CODE))
585 efi_update_mappings(md, pf);
589 void __init efi_dump_pagetable(void)
591 #ifdef CONFIG_EFI_PGT_DUMP
592 ptdump_walk_pgd_level(NULL, efi_pgd);
596 #ifdef CONFIG_EFI_MIXED
597 extern efi_status_t efi64_thunk(u32, ...);
599 #define runtime_service32(func) \
601 u32 table = (u32)(unsigned long)efi.systab; \
604 rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime)); \
605 ___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \
610 * Switch to the EFI page tables early so that we can access the 1:1
611 * runtime services mappings which are not mapped in any other page
612 * tables. This function must be called before runtime_service32().
614 * Also, disable interrupts because the IDT points to 64-bit handlers,
615 * which aren't going to function correctly when we switch to 32-bit.
617 #define efi_thunk(f, ...) \
620 unsigned long __flags; \
623 local_irq_save(__flags); \
624 arch_efi_call_virt_setup(); \
626 __func = runtime_service32(f); \
627 __s = efi64_thunk(__func, __VA_ARGS__); \
629 arch_efi_call_virt_teardown(); \
630 local_irq_restore(__flags); \
635 efi_status_t efi_thunk_set_virtual_address_map(
636 void *phys_set_virtual_address_map,
637 unsigned long memory_map_size,
638 unsigned long descriptor_size,
639 u32 descriptor_version,
640 efi_memory_desc_t *virtual_map)
646 efi_sync_low_kernel_mappings();
647 local_irq_save(flags);
649 efi_scratch.prev_cr3 = read_cr3();
650 write_cr3((unsigned long)efi_scratch.efi_pgt);
653 func = (u32)(unsigned long)phys_set_virtual_address_map;
654 status = efi64_thunk(func, memory_map_size, descriptor_size,
655 descriptor_version, virtual_map);
657 write_cr3(efi_scratch.prev_cr3);
659 local_irq_restore(flags);
664 static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
667 u32 phys_tm, phys_tc;
669 spin_lock(&rtc_lock);
671 phys_tm = virt_to_phys_or_null(tm);
672 phys_tc = virt_to_phys_or_null(tc);
674 status = efi_thunk(get_time, phys_tm, phys_tc);
676 spin_unlock(&rtc_lock);
681 static efi_status_t efi_thunk_set_time(efi_time_t *tm)
686 spin_lock(&rtc_lock);
688 phys_tm = virt_to_phys_or_null(tm);
690 status = efi_thunk(set_time, phys_tm);
692 spin_unlock(&rtc_lock);
698 efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
702 u32 phys_enabled, phys_pending, phys_tm;
704 spin_lock(&rtc_lock);
706 phys_enabled = virt_to_phys_or_null(enabled);
707 phys_pending = virt_to_phys_or_null(pending);
708 phys_tm = virt_to_phys_or_null(tm);
710 status = efi_thunk(get_wakeup_time, phys_enabled,
711 phys_pending, phys_tm);
713 spin_unlock(&rtc_lock);
719 efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
724 spin_lock(&rtc_lock);
726 phys_tm = virt_to_phys_or_null(tm);
728 status = efi_thunk(set_wakeup_time, enabled, phys_tm);
730 spin_unlock(&rtc_lock);
735 static unsigned long efi_name_size(efi_char16_t *name)
737 return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
741 efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
742 u32 *attr, unsigned long *data_size, void *data)
745 u32 phys_name, phys_vendor, phys_attr;
746 u32 phys_data_size, phys_data;
748 phys_data_size = virt_to_phys_or_null(data_size);
749 phys_vendor = virt_to_phys_or_null(vendor);
750 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
751 phys_attr = virt_to_phys_or_null(attr);
752 phys_data = virt_to_phys_or_null_size(data, *data_size);
754 status = efi_thunk(get_variable, phys_name, phys_vendor,
755 phys_attr, phys_data_size, phys_data);
761 efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
762 u32 attr, unsigned long data_size, void *data)
764 u32 phys_name, phys_vendor, phys_data;
767 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
768 phys_vendor = virt_to_phys_or_null(vendor);
769 phys_data = virt_to_phys_or_null_size(data, data_size);
771 /* If data_size is > sizeof(u32) we've got problems */
772 status = efi_thunk(set_variable, phys_name, phys_vendor,
773 attr, data_size, phys_data);
779 efi_thunk_get_next_variable(unsigned long *name_size,
784 u32 phys_name_size, phys_name, phys_vendor;
786 phys_name_size = virt_to_phys_or_null(name_size);
787 phys_vendor = virt_to_phys_or_null(vendor);
788 phys_name = virt_to_phys_or_null_size(name, *name_size);
790 status = efi_thunk(get_next_variable, phys_name_size,
791 phys_name, phys_vendor);
797 efi_thunk_get_next_high_mono_count(u32 *count)
802 phys_count = virt_to_phys_or_null(count);
803 status = efi_thunk(get_next_high_mono_count, phys_count);
809 efi_thunk_reset_system(int reset_type, efi_status_t status,
810 unsigned long data_size, efi_char16_t *data)
814 phys_data = virt_to_phys_or_null_size(data, data_size);
816 efi_thunk(reset_system, reset_type, status, data_size, phys_data);
820 efi_thunk_update_capsule(efi_capsule_header_t **capsules,
821 unsigned long count, unsigned long sg_list)
824 * To properly support this function we would need to repackage
825 * 'capsules' because the firmware doesn't understand 64-bit
828 return EFI_UNSUPPORTED;
832 efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
833 u64 *remaining_space,
834 u64 *max_variable_size)
837 u32 phys_storage, phys_remaining, phys_max;
839 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
840 return EFI_UNSUPPORTED;
842 phys_storage = virt_to_phys_or_null(storage_space);
843 phys_remaining = virt_to_phys_or_null(remaining_space);
844 phys_max = virt_to_phys_or_null(max_variable_size);
846 status = efi_thunk(query_variable_info, attr, phys_storage,
847 phys_remaining, phys_max);
853 efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
854 unsigned long count, u64 *max_size,
858 * To properly support this function we would need to repackage
859 * 'capsules' because the firmware doesn't understand 64-bit
862 return EFI_UNSUPPORTED;
865 void efi_thunk_runtime_setup(void)
867 efi.get_time = efi_thunk_get_time;
868 efi.set_time = efi_thunk_set_time;
869 efi.get_wakeup_time = efi_thunk_get_wakeup_time;
870 efi.set_wakeup_time = efi_thunk_set_wakeup_time;
871 efi.get_variable = efi_thunk_get_variable;
872 efi.get_next_variable = efi_thunk_get_next_variable;
873 efi.set_variable = efi_thunk_set_variable;
874 efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
875 efi.reset_system = efi_thunk_reset_system;
876 efi.query_variable_info = efi_thunk_query_variable_info;
877 efi.update_capsule = efi_thunk_update_capsule;
878 efi.query_capsule_caps = efi_thunk_query_capsule_caps;
880 #endif /* CONFIG_EFI_MIXED */