From a8147dba75b188bff87d4ad072db84a0b70d716d Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 24 Dec 2019 16:10:06 +0100 Subject: efi/x86: Rename efi_is_native() to efi_is_mixed() The ARM architecture does not permit combining 32-bit and 64-bit code at the same privilege level, and so EFI mixed mode is strictly a x86 concept. In preparation of turning the 32/64 bit distinction in shared stub code to a native vs mixed one, refactor x86's current use of the helper function efi_is_native() into efi_is_mixed(). Signed-off-by: Ard Biesheuvel Cc: Arvind Sankar Cc: Borislav Petkov Cc: James Morse Cc: Matt Fleming Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/20191224151025.32482-7-ardb@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/efi.c | 8 ++++---- arch/x86/platform/efi/efi_64.c | 4 ++-- arch/x86/platform/efi/quirks.c | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 38d44f36d5ed..e188b7ce0796 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -828,7 +828,7 @@ static bool should_map_region(efi_memory_desc_t *md) * Map all of RAM so that we can access arguments in the 1:1 * mapping when making EFI runtime calls. */ - if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_is_native()) { + if (efi_is_mixed()) { if (md->type == EFI_CONVENTIONAL_MEMORY || md->type == EFI_LOADER_DATA || md->type == EFI_LOADER_CODE) @@ -903,7 +903,7 @@ static void __init kexec_enter_virtual_mode(void) * kexec kernel because in the initial boot something else might * have been mapped at these virtual addresses. */ - if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) { + if (efi_is_mixed() || efi_enabled(EFI_OLD_MEMMAP)) { efi_memmap_unmap(); clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); return; @@ -1040,7 +1040,7 @@ static void __init __efi_enter_virtual_mode(void) efi_sync_low_kernel_mappings(); - if (efi_is_native()) { + if (!efi_is_mixed()) { status = phys_efi_set_virtual_address_map( efi.memmap.desc_size * count, efi.memmap.desc_size, @@ -1071,7 +1071,7 @@ static void __init __efi_enter_virtual_mode(void) */ efi.runtime_version = efi_systab.hdr.revision; - if (efi_is_native()) + if (!efi_is_mixed()) efi_native_runtime_setup(); else efi_thunk_runtime_setup(); diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 08ce8177c3af..885e50a707a6 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -388,7 +388,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) * text and allocate a new stack because we can't rely on the * stack pointer being < 4GB. */ - if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native()) + if (!efi_is_mixed()) return 0; page = alloc_page(GFP_KERNEL|__GFP_DMA32); @@ -449,7 +449,7 @@ void __init efi_map_region(efi_memory_desc_t *md) * booting in EFI mixed mode, because even though we may be * running a 64-bit kernel, the firmware may only be 32-bit. */ - if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED)) { + if (efi_is_mixed()) { md->virt_addr = md->phys_addr; return; } diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index f8f0220b6a66..eb421cb35108 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -395,7 +395,7 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md) * EFI runtime calls, hence don't unmap EFI boot services code/data * regions. */ - if (!efi_is_native()) + if (efi_is_mixed()) return; if (kernel_unmap_pages_in_pgd(pgd, pa, md->num_pages)) -- cgit From 960a8d01834eabc4549928c60f8ce0300ad08519 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 24 Dec 2019 16:10:11 +0100 Subject: efi/libstub: Use stricter typing for firmware function pointers We will soon remove another level of pointer casting, so let's make sure all type handling involving firmware calls at boot time is correct. Signed-off-by: Ard Biesheuvel Cc: Arvind Sankar Cc: Borislav Petkov Cc: James Morse Cc: Matt Fleming Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/20191224151025.32482-12-ardb@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/efi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index e188b7ce0796..d96953d9d4e7 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -385,7 +385,7 @@ static int __init efi_systab_init(void *phys) tmp |= systab64->con_in; efi_systab.con_out_handle = systab64->con_out_handle; tmp |= systab64->con_out_handle; - efi_systab.con_out = systab64->con_out; + efi_systab.con_out = (void *)(unsigned long)systab64->con_out; tmp |= systab64->con_out; efi_systab.stderr_handle = systab64->stderr_handle; tmp |= systab64->stderr_handle; @@ -427,7 +427,7 @@ static int __init efi_systab_init(void *phys) efi_systab.con_in_handle = systab32->con_in_handle; efi_systab.con_in = systab32->con_in; efi_systab.con_out_handle = systab32->con_out_handle; - efi_systab.con_out = systab32->con_out; + efi_systab.con_out = (void *)(unsigned long)systab32->con_out; efi_systab.stderr_handle = systab32->stderr_handle; efi_systab.stderr = systab32->stderr; efi_systab.runtime = (void *)(unsigned long)systab32->runtime; -- cgit From afc4cc71cf78a8d691023da8ebcc31c3394a1674 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 24 Dec 2019 16:10:13 +0100 Subject: efi/libstub/x86: Avoid thunking for native firmware calls We use special wrapper routines to invoke firmware services in the native case as well as the mixed mode case. For mixed mode, the need is obvious, but for the native cases, we can simply rely on the compiler to generate the indirect call, given that GCC now has support for the MS calling convention (and has had it for quite some time now). Note that on i386, the decompressor and the EFI stub are not built with -mregparm=3 like the rest of the i386 kernel, so we can safely allow the compiler to emit the indirect calls here as well. So drop all the wrappers and indirection, and switch to either native calls, or direct calls into the thunk routine for mixed mode. Signed-off-by: Ard Biesheuvel Cc: Arvind Sankar Cc: Borislav Petkov Cc: James Morse Cc: Matt Fleming Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/20191224151025.32482-14-ardb@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/efi_64.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 885e50a707a6..03c2ed3c645c 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -635,8 +635,6 @@ void efi_switch_mm(struct mm_struct *mm) } #ifdef CONFIG_EFI_MIXED -extern efi_status_t efi64_thunk(u32, ...); - static DEFINE_SPINLOCK(efi_runtime_lock); #define runtime_service32(func) \ -- cgit From ffc2760bcf2dba0dbef74013ed73eea8310cc52c Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 3 Jan 2020 12:39:37 +0100 Subject: efi/x86: Map the entire EFI vendor string before copying it Fix a couple of issues with the way we map and copy the vendor string: - we map only 2 bytes, which usually works since you get at least a page, but if the vendor string happens to cross a page boundary, a crash will result - only call early_memunmap() if early_memremap() succeeded, or we will call it with a NULL address which it doesn't like, - while at it, switch to early_memremap_ro(), and array indexing rather than pointer dereferencing to read the CHAR16 characters. Signed-off-by: Ard Biesheuvel Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arvind Sankar Cc: Matthew Garrett Cc: linux-efi@vger.kernel.org Fixes: 5b83683f32b1 ("x86: EFI runtime service support") Link: https://lkml.kernel.org/r/20200103113953.9571-5-ardb@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/efi.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index d96953d9d4e7..3ce32c31bb61 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -541,7 +541,6 @@ void __init efi_init(void) efi_char16_t *c16; char vendor[100] = "unknown"; int i = 0; - void *tmp; #ifdef CONFIG_X86_32 if (boot_params.efi_info.efi_systab_hi || @@ -566,14 +565,16 @@ void __init efi_init(void) /* * Show what we know for posterity */ - c16 = tmp = early_memremap(efi.systab->fw_vendor, 2); + c16 = early_memremap_ro(efi.systab->fw_vendor, + sizeof(vendor) * sizeof(efi_char16_t)); if (c16) { - for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i) - vendor[i] = *c16++; + for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i) + vendor[i] = c16[i]; vendor[i] = '\0'; - } else + early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t)); + } else { pr_err("Could not map the firmware vendor!\n"); - early_memunmap(tmp, 2); + } pr_info("EFI v%u.%.02u by %s\n", efi.systab->hdr.revision >> 16, -- cgit From 98dd0e3a0ceebd594942777e4aca9791177e3f2b Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 3 Jan 2020 12:39:39 +0100 Subject: efi/x86: Split off some old memmap handling into separate routines In a subsequent patch, we will fold the prolog/epilog routines that are part of the support code to call SetVirtualAddressMap() with a 1:1 mapping into the callers. However, the 64-bit version mostly consists of ugly mapping code that is only used when efi=old_map is in effect, which is extremely rare. So let's move this code out of the way so it does not clutter the common code. Signed-off-by: Ard Biesheuvel Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arvind Sankar Cc: Matthew Garrett Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/20200103113953.9571-7-ardb@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/efi_64.c | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 03c2ed3c645c..a72bbabbc595 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -72,7 +72,9 @@ static void __init early_code_mapping_set_exec(int executable) } } -pgd_t * __init efi_call_phys_prolog(void) +void __init efi_old_memmap_phys_epilog(pgd_t *save_pgd); + +pgd_t * __init efi_old_memmap_phys_prolog(void) { unsigned long vaddr, addr_pgd, addr_p4d, addr_pud; pgd_t *save_pgd, *pgd_k, *pgd_efi; @@ -82,11 +84,6 @@ pgd_t * __init efi_call_phys_prolog(void) int pgd; int n_pgds, i, j; - if (!efi_enabled(EFI_OLD_MEMMAP)) { - efi_switch_mm(&efi_mm); - return efi_mm.pgd; - } - early_code_mapping_set_exec(1); n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); @@ -143,11 +140,11 @@ pgd_t * __init efi_call_phys_prolog(void) __flush_tlb_all(); return save_pgd; out: - efi_call_phys_epilog(save_pgd); + efi_old_memmap_phys_epilog(save_pgd); return NULL; } -void __init efi_call_phys_epilog(pgd_t *save_pgd) +void __init efi_old_memmap_phys_epilog(pgd_t *save_pgd) { /* * After the lock is released, the original page table is restored. @@ -158,11 +155,6 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) p4d_t *p4d; pud_t *pud; - if (!efi_enabled(EFI_OLD_MEMMAP)) { - efi_switch_mm(efi_scratch.prev_mm); - return; - } - nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) { @@ -193,6 +185,23 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) early_code_mapping_set_exec(0); } +pgd_t * __init efi_call_phys_prolog(void) +{ + if (efi_enabled(EFI_OLD_MEMMAP)) + return efi_old_memmap_phys_prolog(); + + efi_switch_mm(&efi_mm); + return efi_mm.pgd; +} + +void __init efi_call_phys_epilog(pgd_t *save_pgd) +{ + if (efi_enabled(EFI_OLD_MEMMAP)) + efi_old_memmap_phys_epilog(save_pgd); + else + efi_switch_mm(efi_scratch.prev_mm); +} + EXPORT_SYMBOL_GPL(efi_mm); /* -- cgit From 6982947045734480b8b57521e8068073fe36bd14 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 3 Jan 2020 12:39:40 +0100 Subject: efi/x86: Split SetVirtualAddresMap() wrappers into 32 and 64 bit versions Split the phys_efi_set_virtual_address_map() routine into 32 and 64 bit versions, so we can simplify them individually in subsequent patches. There is very little overlap between the logic anyway, and this has already been factored out in prolog/epilog routines which are completely different between 32 bit and 64 bit. So let's take it one step further, and get rid of the overlap completely. Signed-off-by: Ard Biesheuvel Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arvind Sankar Cc: Matthew Garrett Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/20200103113953.9571-8-ardb@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/efi.c | 30 ++-------------------- arch/x86/platform/efi/efi_32.c | 21 ++++++++++++---- arch/x86/platform/efi/efi_64.c | 56 +++++++++++++++++++++++++++--------------- 3 files changed, 54 insertions(+), 53 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 3ce32c31bb61..50f8123e658a 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -54,7 +54,7 @@ #include #include -static struct efi efi_phys __initdata; +struct efi efi_phys __initdata; static efi_system_table_t efi_systab __initdata; static efi_config_table_type_t arch_tables[] __initdata = { @@ -97,32 +97,6 @@ static int __init setup_add_efi_memmap(char *arg) } early_param("add_efi_memmap", setup_add_efi_memmap); -static efi_status_t __init phys_efi_set_virtual_address_map( - unsigned long memory_map_size, - unsigned long descriptor_size, - u32 descriptor_version, - efi_memory_desc_t *virtual_map) -{ - efi_status_t status; - unsigned long flags; - pgd_t *save_pgd; - - save_pgd = efi_call_phys_prolog(); - if (!save_pgd) - return EFI_ABORTED; - - /* Disable interrupts around EFI calls: */ - local_irq_save(flags); - status = efi_call_phys(efi_phys.set_virtual_address_map, - memory_map_size, descriptor_size, - descriptor_version, virtual_map); - local_irq_restore(flags); - - efi_call_phys_epilog(save_pgd); - - return status; -} - void __init efi_find_mirror(void) { efi_memory_desc_t *md; @@ -1042,7 +1016,7 @@ static void __init __efi_enter_virtual_mode(void) efi_sync_low_kernel_mappings(); if (!efi_is_mixed()) { - status = phys_efi_set_virtual_address_map( + status = efi_set_virtual_address_map( efi.memmap.desc_size * count, efi.memmap.desc_size, efi.memmap.desc_version, diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 9959657127f4..185950ade0e9 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -66,9 +66,16 @@ void __init efi_map_region(efi_memory_desc_t *md) void __init efi_map_region_fixed(efi_memory_desc_t *md) {} void __init parse_efi_setup(u64 phys_addr, u32 data_len) {} -pgd_t * __init efi_call_phys_prolog(void) +extern struct efi efi_phys; + +efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size, + unsigned long descriptor_size, + u32 descriptor_version, + efi_memory_desc_t *virtual_map) { struct desc_ptr gdt_descr; + efi_status_t status; + unsigned long flags; pgd_t *save_pgd; /* Current pgd is swapper_pg_dir, we'll restore it later: */ @@ -80,14 +87,18 @@ pgd_t * __init efi_call_phys_prolog(void) gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); - return save_pgd; -} + /* Disable interrupts around EFI calls: */ + local_irq_save(flags); + status = efi_call_phys(efi_phys.set_virtual_address_map, + memory_map_size, descriptor_size, + descriptor_version, virtual_map); + local_irq_restore(flags); -void __init efi_call_phys_epilog(pgd_t *save_pgd) -{ load_fixmap_gdt(0); load_cr3(save_pgd); __flush_tlb_all(); + + return status; } void __init efi_runtime_update_mappings(void) diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index a72bbabbc595..a7f11d1ff7c4 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -72,9 +72,9 @@ static void __init early_code_mapping_set_exec(int executable) } } -void __init efi_old_memmap_phys_epilog(pgd_t *save_pgd); +static void __init efi_old_memmap_phys_epilog(pgd_t *save_pgd); -pgd_t * __init efi_old_memmap_phys_prolog(void) +static pgd_t * __init efi_old_memmap_phys_prolog(void) { unsigned long vaddr, addr_pgd, addr_p4d, addr_pud; pgd_t *save_pgd, *pgd_k, *pgd_efi; @@ -144,7 +144,7 @@ out: return NULL; } -void __init efi_old_memmap_phys_epilog(pgd_t *save_pgd) +static void __init efi_old_memmap_phys_epilog(pgd_t *save_pgd) { /* * After the lock is released, the original page table is restored. @@ -185,23 +185,6 @@ void __init efi_old_memmap_phys_epilog(pgd_t *save_pgd) early_code_mapping_set_exec(0); } -pgd_t * __init efi_call_phys_prolog(void) -{ - if (efi_enabled(EFI_OLD_MEMMAP)) - return efi_old_memmap_phys_prolog(); - - efi_switch_mm(&efi_mm); - return efi_mm.pgd; -} - -void __init efi_call_phys_epilog(pgd_t *save_pgd) -{ - if (efi_enabled(EFI_OLD_MEMMAP)) - efi_old_memmap_phys_epilog(save_pgd); - else - efi_switch_mm(efi_scratch.prev_mm); -} - EXPORT_SYMBOL_GPL(efi_mm); /* @@ -1018,3 +1001,36 @@ void efi_thunk_runtime_setup(void) efi.query_capsule_caps = efi_thunk_query_capsule_caps; } #endif /* CONFIG_EFI_MIXED */ + +efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size, + unsigned long descriptor_size, + u32 descriptor_version, + efi_memory_desc_t *virtual_map) +{ + efi_status_t status; + unsigned long flags; + pgd_t *save_pgd = NULL; + + if (efi_enabled(EFI_OLD_MEMMAP)) { + save_pgd = efi_old_memmap_phys_prolog(); + if (!save_pgd) + return EFI_ABORTED; + } else { + efi_switch_mm(&efi_mm); + } + + /* Disable interrupts around EFI calls: */ + local_irq_save(flags); + status = efi_call(efi.systab->runtime->set_virtual_address_map, + memory_map_size, descriptor_size, + descriptor_version, virtual_map); + local_irq_restore(flags); + + + if (save_pgd) + efi_old_memmap_phys_epilog(save_pgd); + else + efi_switch_mm(efi_scratch.prev_mm); + + return status; +} -- cgit From a46d674068b69b3897fc0d659e25f74b7ab52647 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 3 Jan 2020 12:39:41 +0100 Subject: efi/x86: Simplify i386 efi_call_phys() firmware call wrapper The variadic efi_call_phys() wrapper that exists on i386 was originally created to call into any EFI firmware runtime service, but in practice, we only use it once, to call SetVirtualAddressMap() during early boot. The flexibility provided by the variadic nature also makes it type unsafe, and makes the assembler code more complicated than needed, since it has to deal with an unknown number of arguments living on the stack. So clean this up, by renaming the helper to efi_call_svam(), and dropping the unneeded complexity. Let's also drop the reference to the efi_phys struct and grab the address from the EFI system table directly. Signed-off-by: Ard Biesheuvel Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arvind Sankar Cc: Matthew Garrett Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/20200103113953.9571-9-ardb@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/efi_32.c | 5 +- arch/x86/platform/efi/efi_stub_32.S | 109 ++++++------------------------------ 2 files changed, 20 insertions(+), 94 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 185950ade0e9..71dddd1620f9 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -66,7 +66,8 @@ void __init efi_map_region(efi_memory_desc_t *md) void __init efi_map_region_fixed(efi_memory_desc_t *md) {} void __init parse_efi_setup(u64 phys_addr, u32 data_len) {} -extern struct efi efi_phys; +efi_status_t efi_call_svam(efi_set_virtual_address_map_t *__efiapi *, + u32, u32, u32, void *); efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size, unsigned long descriptor_size, @@ -89,7 +90,7 @@ efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size, /* Disable interrupts around EFI calls: */ local_irq_save(flags); - status = efi_call_phys(efi_phys.set_virtual_address_map, + status = efi_call_svam(&efi.systab->runtime->set_virtual_address_map, memory_map_size, descriptor_size, descriptor_version, virtual_map); local_irq_restore(flags); diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S index eed8b5b441f8..75c46e7a809f 100644 --- a/arch/x86/platform/efi/efi_stub_32.S +++ b/arch/x86/platform/efi/efi_stub_32.S @@ -7,118 +7,43 @@ */ #include +#include #include -/* - * efi_call_phys(void *, ...) is a function with variable parameters. - * All the callers of this function assure that all the parameters are 4-bytes. - */ - -/* - * In gcc calling convention, EBX, ESP, EBP, ESI and EDI are all callee save. - * So we'd better save all of them at the beginning of this function and restore - * at the end no matter how many we use, because we can not assure EFI runtime - * service functions will comply with gcc calling convention, too. - */ + __INIT +SYM_FUNC_START(efi_call_svam) + push 8(%esp) + push 8(%esp) + push %ecx + push %edx -.text -SYM_FUNC_START(efi_call_phys) /* - * 0. The function can only be called in Linux kernel. So CS has been - * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found - * the values of these registers are the same. And, the corresponding - * GDT entries are identical. So I will do nothing about segment reg - * and GDT, but change GDT base register in prolog and epilog. - */ - - /* - * 1. Now I am running with EIP = + PAGE_OFFSET. - * But to make it smoothly switch from virtual mode to flat mode. - * The mapping of lower virtual memory has been created in prolog and - * epilog. + * Switch to the flat mapped alias of this routine, by jumping to the + * address of label '1' after subtracting PAGE_OFFSET from it. */ movl $1f, %edx subl $__PAGE_OFFSET, %edx jmp *%edx 1: - /* - * 2. Now on the top of stack is the return - * address in the caller of efi_call_phys(), then parameter 1, - * parameter 2, ..., param n. To make things easy, we save the return - * address of efi_call_phys in a global variable. - */ - popl %edx - movl %edx, saved_return_addr - /* get the function pointer into ECX*/ - popl %ecx - movl %ecx, efi_rt_function_ptr - movl $2f, %edx - subl $__PAGE_OFFSET, %edx - pushl %edx - - /* - * 3. Clear PG bit in %CR0. - */ + /* disable paging */ movl %cr0, %edx andl $0x7fffffff, %edx movl %edx, %cr0 - jmp 1f -1: - /* - * 4. Adjust stack pointer. - */ + /* convert the stack pointer to a flat mapped address */ subl $__PAGE_OFFSET, %esp - /* - * 5. Call the physical function. - */ - jmp *%ecx + /* call the EFI routine */ + call *(%eax) -2: - /* - * 6. After EFI runtime service returns, control will return to - * following instruction. We'd better readjust stack pointer first. - */ - addl $__PAGE_OFFSET, %esp + /* convert ESP back to a kernel VA, and pop the outgoing args */ + addl $__PAGE_OFFSET + 16, %esp - /* - * 7. Restore PG bit - */ + /* re-enable paging */ movl %cr0, %edx orl $0x80000000, %edx movl %edx, %cr0 - jmp 1f -1: - /* - * 8. Now restore the virtual mode from flat mode by - * adding EIP with PAGE_OFFSET. - */ - movl $1f, %edx - jmp *%edx -1: - - /* - * 9. Balance the stack. And because EAX contain the return value, - * we'd better not clobber it. - */ - leal efi_rt_function_ptr, %edx - movl (%edx), %ecx - pushl %ecx - /* - * 10. Push the saved return address onto the stack and return. - */ - leal saved_return_addr, %edx - movl (%edx), %ecx - pushl %ecx ret -SYM_FUNC_END(efi_call_phys) -.previous - -.data -saved_return_addr: - .long 0 -efi_rt_function_ptr: - .long 0 +SYM_FUNC_END(efi_call_svam) -- cgit From e5f930fe8dafd2055220c95958926af16ee20713 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 3 Jan 2020 12:39:42 +0100 Subject: efi/x86: Simplify 64-bit EFI firmware call wrapper The efi_call() wrapper used to invoke EFI runtime services serves a number of purposes: - realign the stack to 16 bytes - preserve FP and CR0 register state - translate from SysV to MS calling convention. Preserving CR0.TS is no longer necessary in Linux, and preserving the FP register state is also redundant in most cases, since efi_call() is almost always used from within the scope of a pair of kernel_fpu_begin()/ kernel_fpu_end() calls, with the exception of the early call to SetVirtualAddressMap() and the SGI UV support code. So let's add a pair of kernel_fpu_begin()/_end() calls there as well, and remove the unnecessary code from the assembly implementation of efi_call(), and only keep the pieces that deal with the stack alignment and the ABI translation. Signed-off-by: Ard Biesheuvel Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arvind Sankar Cc: Matthew Garrett Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/20200103113953.9571-10-ardb@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/Makefile | 1 - arch/x86/platform/efi/efi_64.c | 3 +++ arch/x86/platform/efi/efi_stub_64.S | 39 ++++--------------------------------- 3 files changed, 7 insertions(+), 36 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile index fe29f3f5d384..7ec3a8b31f8b 100644 --- a/arch/x86/platform/efi/Makefile +++ b/arch/x86/platform/efi/Makefile @@ -1,6 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y -OBJECT_FILES_NON_STANDARD_efi_stub_$(BITS).o := y obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index a7f11d1ff7c4..03565dad0c4b 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -1019,6 +1019,8 @@ efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size, efi_switch_mm(&efi_mm); } + kernel_fpu_begin(); + /* Disable interrupts around EFI calls: */ local_irq_save(flags); status = efi_call(efi.systab->runtime->set_virtual_address_map, @@ -1026,6 +1028,7 @@ efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size, descriptor_version, virtual_map); local_irq_restore(flags); + kernel_fpu_end(); if (save_pgd) efi_old_memmap_phys_epilog(save_pgd); diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index b1d2313fe3bf..e7e1020f4ccb 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S @@ -8,41 +8,12 @@ */ #include -#include -#include -#include -#include - -#define SAVE_XMM \ - mov %rsp, %rax; \ - subq $0x70, %rsp; \ - and $~0xf, %rsp; \ - mov %rax, (%rsp); \ - mov %cr0, %rax; \ - clts; \ - mov %rax, 0x8(%rsp); \ - movaps %xmm0, 0x60(%rsp); \ - movaps %xmm1, 0x50(%rsp); \ - movaps %xmm2, 0x40(%rsp); \ - movaps %xmm3, 0x30(%rsp); \ - movaps %xmm4, 0x20(%rsp); \ - movaps %xmm5, 0x10(%rsp) - -#define RESTORE_XMM \ - movaps 0x60(%rsp), %xmm0; \ - movaps 0x50(%rsp), %xmm1; \ - movaps 0x40(%rsp), %xmm2; \ - movaps 0x30(%rsp), %xmm3; \ - movaps 0x20(%rsp), %xmm4; \ - movaps 0x10(%rsp), %xmm5; \ - mov 0x8(%rsp), %rsi; \ - mov %rsi, %cr0; \ - mov (%rsp), %rsp +#include SYM_FUNC_START(efi_call) pushq %rbp movq %rsp, %rbp - SAVE_XMM + and $~0xf, %rsp mov 16(%rbp), %rax subq $48, %rsp mov %r9, 32(%rsp) @@ -50,9 +21,7 @@ SYM_FUNC_START(efi_call) mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx - call *%rdi - addq $48, %rsp - RESTORE_XMM - popq %rbp + CALL_NOSPEC %rdi + leave ret SYM_FUNC_END(efi_call) -- cgit From ea5e1919b44f09fce72d919fbb87f9611fc700a6 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 3 Jan 2020 12:39:43 +0100 Subject: efi/x86: Simplify mixed mode call wrapper Calling 32-bit EFI runtime services from a 64-bit OS involves switching back to the flat mapping with a stack carved out of memory that is 32-bit addressable. There is no need to actually execute the 64-bit part of this routine from the flat mapping as well, as long as the entry and return address fit in 32 bits. There is also no need to preserve part of the calling context in global variables: we can simply push the old stack pointer value to the new stack, and keep the return address from the code32 section in EBX. While at it, move the conditional check whether to invoke the mixed mode version of SetVirtualAddressMap() into the 64-bit implementation of the wrapper routine. Signed-off-by: Ard Biesheuvel Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arvind Sankar Cc: Matthew Garrett Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/20200103113953.9571-11-ardb@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/efi.c | 19 ++---- arch/x86/platform/efi/efi_64.c | 73 +++++++++++++-------- arch/x86/platform/efi/efi_thunk_64.S | 121 ++++++----------------------------- 3 files changed, 71 insertions(+), 142 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 50f8123e658a..e4d3afac7be3 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -1015,21 +1015,10 @@ static void __init __efi_enter_virtual_mode(void) efi_sync_low_kernel_mappings(); - if (!efi_is_mixed()) { - status = efi_set_virtual_address_map( - efi.memmap.desc_size * count, - efi.memmap.desc_size, - efi.memmap.desc_version, - (efi_memory_desc_t *)pa); - } else { - status = efi_thunk_set_virtual_address_map( - efi_phys.set_virtual_address_map, - efi.memmap.desc_size * count, - efi.memmap.desc_size, - efi.memmap.desc_version, - (efi_memory_desc_t *)pa); - } - + status = efi_set_virtual_address_map(efi.memmap.desc_size * count, + efi.memmap.desc_size, + efi.memmap.desc_version, + (efi_memory_desc_t *)pa); if (status != EFI_SUCCESS) { pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n", status); diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 03565dad0c4b..910e9ec03b09 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -626,61 +626,74 @@ void efi_switch_mm(struct mm_struct *mm) switch_mm(efi_scratch.prev_mm, mm, NULL); } -#ifdef CONFIG_EFI_MIXED static DEFINE_SPINLOCK(efi_runtime_lock); -#define runtime_service32(func) \ -({ \ - u32 table = (u32)(unsigned long)efi.systab; \ - u32 *rt, *___f; \ - \ - rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime)); \ - ___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \ - *___f; \ +/* + * DS and ES contain user values. We need to save them. + * The 32-bit EFI code needs a valid DS, ES, and SS. There's no + * need to save the old SS: __KERNEL_DS is always acceptable. + */ +#define __efi_thunk(func, ...) \ +({ \ + efi_runtime_services_32_t *__rt; \ + unsigned short __ds, __es; \ + efi_status_t ____s; \ + \ + __rt = (void *)(unsigned long)efi.systab->mixed_mode.runtime; \ + \ + savesegment(ds, __ds); \ + savesegment(es, __es); \ + \ + loadsegment(ss, __KERNEL_DS); \ + loadsegment(ds, __KERNEL_DS); \ + loadsegment(es, __KERNEL_DS); \ + \ + ____s = efi64_thunk(__rt->func, __VA_ARGS__); \ + \ + loadsegment(ds, __ds); \ + loadsegment(es, __es); \ + \ + ____s ^= (____s & BIT(31)) | (____s & BIT_ULL(31)) << 32; \ + ____s; \ }) /* * Switch to the EFI page tables early so that we can access the 1:1 * runtime services mappings which are not mapped in any other page - * tables. This function must be called before runtime_service32(). + * tables. * * Also, disable interrupts because the IDT points to 64-bit handlers, * which aren't going to function correctly when we switch to 32-bit. */ -#define efi_thunk(f, ...) \ +#define efi_thunk(func...) \ ({ \ efi_status_t __s; \ - u32 __func; \ \ arch_efi_call_virt_setup(); \ \ - __func = runtime_service32(f); \ - __s = efi64_thunk(__func, __VA_ARGS__); \ + __s = __efi_thunk(func); \ \ arch_efi_call_virt_teardown(); \ \ __s; \ }) -efi_status_t efi_thunk_set_virtual_address_map( - void *phys_set_virtual_address_map, - unsigned long memory_map_size, - unsigned long descriptor_size, - u32 descriptor_version, - efi_memory_desc_t *virtual_map) +static efi_status_t __init +efi_thunk_set_virtual_address_map(unsigned long memory_map_size, + unsigned long descriptor_size, + u32 descriptor_version, + efi_memory_desc_t *virtual_map) { efi_status_t status; unsigned long flags; - u32 func; efi_sync_low_kernel_mappings(); local_irq_save(flags); efi_switch_mm(&efi_mm); - func = (u32)(unsigned long)phys_set_virtual_address_map; - status = efi64_thunk(func, memory_map_size, descriptor_size, - descriptor_version, virtual_map); + status = __efi_thunk(set_virtual_address_map, memory_map_size, + descriptor_size, descriptor_version, virtual_map); efi_switch_mm(efi_scratch.prev_mm); local_irq_restore(flags); @@ -983,8 +996,11 @@ efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules, return EFI_UNSUPPORTED; } -void efi_thunk_runtime_setup(void) +void __init efi_thunk_runtime_setup(void) { + if (!IS_ENABLED(CONFIG_EFI_MIXED)) + return; + efi.get_time = efi_thunk_get_time; efi.set_time = efi_thunk_set_time; efi.get_wakeup_time = efi_thunk_get_wakeup_time; @@ -1000,7 +1016,6 @@ void efi_thunk_runtime_setup(void) efi.update_capsule = efi_thunk_update_capsule; efi.query_capsule_caps = efi_thunk_query_capsule_caps; } -#endif /* CONFIG_EFI_MIXED */ efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size, unsigned long descriptor_size, @@ -1011,6 +1026,12 @@ efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size, unsigned long flags; pgd_t *save_pgd = NULL; + if (efi_is_mixed()) + return efi_thunk_set_virtual_address_map(memory_map_size, + descriptor_size, + descriptor_version, + virtual_map); + if (efi_enabled(EFI_OLD_MEMMAP)) { save_pgd = efi_old_memmap_phys_prolog(); if (!save_pgd) diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S index 3189f1394701..162b35729633 100644 --- a/arch/x86/platform/efi/efi_thunk_64.S +++ b/arch/x86/platform/efi/efi_thunk_64.S @@ -25,15 +25,16 @@ .text .code64 -SYM_FUNC_START(efi64_thunk) +SYM_CODE_START(efi64_thunk) push %rbp push %rbx /* * Switch to 1:1 mapped 32-bit stack pointer. */ - movq %rsp, efi_saved_sp(%rip) + movq %rsp, %rax movq efi_scratch(%rip), %rsp + push %rax /* * Calculate the physical address of the kernel text. @@ -41,113 +42,31 @@ SYM_FUNC_START(efi64_thunk) movq $__START_KERNEL_map, %rax subq phys_base(%rip), %rax - /* - * Push some physical addresses onto the stack. This is easier - * to do now in a code64 section while the assembler can address - * 64-bit values. Note that all the addresses on the stack are - * 32-bit. - */ - subq $16, %rsp - leaq efi_exit32(%rip), %rbx - subq %rax, %rbx - movl %ebx, 8(%rsp) - - leaq __efi64_thunk(%rip), %rbx + leaq 1f(%rip), %rbp + leaq 2f(%rip), %rbx + subq %rax, %rbp subq %rax, %rbx - call *%rbx - - movq efi_saved_sp(%rip), %rsp - pop %rbx - pop %rbp - retq -SYM_FUNC_END(efi64_thunk) -/* - * We run this function from the 1:1 mapping. - * - * This function must be invoked with a 1:1 mapped stack. - */ -SYM_FUNC_START_LOCAL(__efi64_thunk) - movl %ds, %eax - push %rax - movl %es, %eax - push %rax - movl %ss, %eax - push %rax - - subq $32, %rsp - movl %esi, 0x0(%rsp) - movl %edx, 0x4(%rsp) - movl %ecx, 0x8(%rsp) - movq %r8, %rsi - movl %esi, 0xc(%rsp) - movq %r9, %rsi - movl %esi, 0x10(%rsp) - - leaq 1f(%rip), %rbx - movq %rbx, func_rt_ptr(%rip) + subq $28, %rsp + movl %ebx, 0x0(%rsp) /* return address */ + movl %esi, 0x4(%rsp) + movl %edx, 0x8(%rsp) + movl %ecx, 0xc(%rsp) + movl %r8d, 0x10(%rsp) + movl %r9d, 0x14(%rsp) /* Switch to 32-bit descriptor */ pushq $__KERNEL32_CS - leaq efi_enter32(%rip), %rax - pushq %rax + pushq %rdi /* EFI runtime service address */ lretq -1: addq $32, %rsp - +1: movq 24(%rsp), %rsp pop %rbx - movl %ebx, %ss - pop %rbx - movl %ebx, %es - pop %rbx - movl %ebx, %ds - - /* - * Convert 32-bit status code into 64-bit. - */ - test %rax, %rax - jz 1f - movl %eax, %ecx - andl $0x0fffffff, %ecx - andl $0xf0000000, %eax - shl $32, %rax - or %rcx, %rax -1: - ret -SYM_FUNC_END(__efi64_thunk) - -SYM_FUNC_START_LOCAL(efi_exit32) - movq func_rt_ptr(%rip), %rax - push %rax - mov %rdi, %rax - ret -SYM_FUNC_END(efi_exit32) + pop %rbp + retq .code32 -/* - * EFI service pointer must be in %edi. - * - * The stack should represent the 32-bit calling convention. - */ -SYM_FUNC_START_LOCAL(efi_enter32) - movl $__KERNEL_DS, %eax - movl %eax, %ds - movl %eax, %es - movl %eax, %ss - - call *%edi - - /* We must preserve return value */ - movl %eax, %edi - - movl 72(%esp), %eax - pushl $__KERNEL_CS - pushl %eax - +2: pushl $__KERNEL_CS + pushl %ebp lret -SYM_FUNC_END(efi_enter32) - - .data - .balign 8 -func_rt_ptr: .quad 0 -efi_saved_sp: .quad 0 +SYM_CODE_END(efi64_thunk) -- cgit From 33b85447fa61946b94fea93dd4bc24772af14d54 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 3 Jan 2020 12:39:44 +0100 Subject: efi/x86: Drop two near identical versions of efi_runtime_init() The routines efi_runtime_init32() and efi_runtime_init64() are almost indistinguishable, and the only relevant difference is the offset in the runtime struct from where to obtain the physical address of the SetVirtualAddressMap() routine. However, this address is only used once, when installing the virtual address map that the OS will use to invoke EFI runtime services, and at the time of the call, we will necessarily be running with a 1:1 mapping, and so there is no need to do the map/unmap dance here to retrieve the address. In fact, in the preceding changes to these users, we stopped using the address recorded here entirely. So let's just get rid of all this code since it no longer serves a purpose. While at it, tweak the logic so that we handle unsupported and disable EFI runtime services in the same way, and unmap the EFI memory map in both cases. Signed-off-by: Ard Biesheuvel Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arvind Sankar Cc: Matthew Garrett Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/20200103113953.9571-12-ardb@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/efi.c | 95 +++------------------------------------------ 1 file changed, 5 insertions(+), 90 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index e4d3afac7be3..67cb0fd18777 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -429,87 +429,6 @@ static int __init efi_systab_init(void *phys) return 0; } -static int __init efi_runtime_init32(void) -{ - efi_runtime_services_32_t *runtime; - - runtime = early_memremap((unsigned long)efi.systab->runtime, - sizeof(efi_runtime_services_32_t)); - if (!runtime) { - pr_err("Could not map the runtime service table!\n"); - return -ENOMEM; - } - - /* - * We will only need *early* access to the SetVirtualAddressMap - * EFI runtime service. All other runtime services will be called - * via the virtual mapping. - */ - efi_phys.set_virtual_address_map = - (efi_set_virtual_address_map_t *) - (unsigned long)runtime->set_virtual_address_map; - early_memunmap(runtime, sizeof(efi_runtime_services_32_t)); - - return 0; -} - -static int __init efi_runtime_init64(void) -{ - efi_runtime_services_64_t *runtime; - - runtime = early_memremap((unsigned long)efi.systab->runtime, - sizeof(efi_runtime_services_64_t)); - if (!runtime) { - pr_err("Could not map the runtime service table!\n"); - return -ENOMEM; - } - - /* - * We will only need *early* access to the SetVirtualAddressMap - * EFI runtime service. All other runtime services will be called - * via the virtual mapping. - */ - efi_phys.set_virtual_address_map = - (efi_set_virtual_address_map_t *) - (unsigned long)runtime->set_virtual_address_map; - early_memunmap(runtime, sizeof(efi_runtime_services_64_t)); - - return 0; -} - -static int __init efi_runtime_init(void) -{ - int rv; - - /* - * Check out the runtime services table. We need to map - * the runtime services table so that we can grab the physical - * address of several of the EFI runtime functions, needed to - * set the firmware into virtual mode. - * - * When EFI_PARAVIRT is in force then we could not map runtime - * service memory region because we do not have direct access to it. - * However, runtime services are available through proxy functions - * (e.g. in case of Xen dom0 EFI implementation they call special - * hypercall which executes relevant EFI functions) and that is why - * they are always enabled. - */ - - if (!efi_enabled(EFI_PARAVIRT)) { - if (efi_enabled(EFI_64BIT)) - rv = efi_runtime_init64(); - else - rv = efi_runtime_init32(); - - if (rv) - return rv; - } - - set_bit(EFI_RUNTIME_SERVICES, &efi.flags); - - return 0; -} - void __init efi_init(void) { efi_char16_t *c16; @@ -567,13 +486,13 @@ void __init efi_init(void) if (!efi_runtime_supported()) pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); - else { - if (efi_runtime_disabled() || efi_runtime_init()) { - efi_memmap_unmap(); - return; - } + + if (!efi_runtime_supported() || efi_runtime_disabled()) { + efi_memmap_unmap(); + return; } + set_bit(EFI_RUNTIME_SERVICES, &efi.flags); efi_clean_memmap(); if (efi_enabled(EFI_DBG)) @@ -934,8 +853,6 @@ static void __init kexec_enter_virtual_mode(void) efi_native_runtime_setup(); - efi.set_virtual_address_map = NULL; - if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) runtime_code_page_mkexec(); #endif @@ -1040,8 +957,6 @@ static void __init __efi_enter_virtual_mode(void) else efi_thunk_runtime_setup(); - efi.set_virtual_address_map = NULL; - /* * Apply more restrictive page table mapping attributes now that * SVAM() has been called and the firmware has performed all -- cgit From 5b279a262f5413889529802b82e153e87ee6bef8 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 3 Jan 2020 12:39:45 +0100 Subject: efi/x86: Clean up efi_systab_init() routine for legibility Clean up the efi_systab_init() routine which maps the EFI system table and copies the relevant pieces of data out of it. The current routine is very difficult to read, so let's clean that up. Also, switch to a R/O mapping of the system table since that is all we need. Finally, use a plain u64 variable to record the physical address of the system table instead of pointlessly stashing it in a struct efi that is never used for anything else. Signed-off-by: Ard Biesheuvel Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arvind Sankar Cc: Matthew Garrett Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/20200103113953.9571-13-ardb@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/efi.c | 166 ++++++++++++++++++++++---------------------- 1 file changed, 82 insertions(+), 84 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 67cb0fd18777..53ed0b123641 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -54,8 +54,8 @@ #include #include -struct efi efi_phys __initdata; static efi_system_table_t efi_systab __initdata; +static u64 efi_systab_phys __initdata; static efi_config_table_type_t arch_tables[] __initdata = { #ifdef CONFIG_X86_UV @@ -327,89 +327,90 @@ void __init efi_print_memmap(void) } } -static int __init efi_systab_init(void *phys) +static int __init efi_systab_init(u64 phys) { + int size = efi_enabled(EFI_64BIT) ? sizeof(efi_system_table_64_t) + : sizeof(efi_system_table_32_t); + bool over4g = false; + void *p; + + p = early_memremap_ro(phys, size); + if (p == NULL) { + pr_err("Couldn't map the system table!\n"); + return -ENOMEM; + } + if (efi_enabled(EFI_64BIT)) { - efi_system_table_64_t *systab64; - struct efi_setup_data *data = NULL; - u64 tmp = 0; + const efi_system_table_64_t *systab64 = p; + + efi_systab.hdr = systab64->hdr; + efi_systab.fw_vendor = systab64->fw_vendor; + efi_systab.fw_revision = systab64->fw_revision; + efi_systab.con_in_handle = systab64->con_in_handle; + efi_systab.con_in = systab64->con_in; + efi_systab.con_out_handle = systab64->con_out_handle; + efi_systab.con_out = (void *)(unsigned long)systab64->con_out; + efi_systab.stderr_handle = systab64->stderr_handle; + efi_systab.stderr = systab64->stderr; + efi_systab.runtime = (void *)(unsigned long)systab64->runtime; + efi_systab.boottime = (void *)(unsigned long)systab64->boottime; + efi_systab.nr_tables = systab64->nr_tables; + efi_systab.tables = systab64->tables; + + over4g = systab64->con_in_handle > U32_MAX || + systab64->con_in > U32_MAX || + systab64->con_out_handle > U32_MAX || + systab64->con_out > U32_MAX || + systab64->stderr_handle > U32_MAX || + systab64->stderr > U32_MAX || + systab64->boottime > U32_MAX; if (efi_setup) { - data = early_memremap(efi_setup, sizeof(*data)); - if (!data) + struct efi_setup_data *data; + + data = early_memremap_ro(efi_setup, sizeof(*data)); + if (!data) { + early_memunmap(p, size); return -ENOMEM; - } - systab64 = early_memremap((unsigned long)phys, - sizeof(*systab64)); - if (systab64 == NULL) { - pr_err("Couldn't map the system table!\n"); - if (data) - early_memunmap(data, sizeof(*data)); - return -ENOMEM; - } + } + + efi_systab.fw_vendor = (unsigned long)data->fw_vendor; + efi_systab.runtime = (void *)(unsigned long)data->runtime; + efi_systab.tables = (unsigned long)data->tables; + + over4g |= data->fw_vendor > U32_MAX || + data->runtime > U32_MAX || + data->tables > U32_MAX; - efi_systab.hdr = systab64->hdr; - efi_systab.fw_vendor = data ? (unsigned long)data->fw_vendor : - systab64->fw_vendor; - tmp |= data ? data->fw_vendor : systab64->fw_vendor; - efi_systab.fw_revision = systab64->fw_revision; - efi_systab.con_in_handle = systab64->con_in_handle; - tmp |= systab64->con_in_handle; - efi_systab.con_in = systab64->con_in; - tmp |= systab64->con_in; - efi_systab.con_out_handle = systab64->con_out_handle; - tmp |= systab64->con_out_handle; - efi_systab.con_out = (void *)(unsigned long)systab64->con_out; - tmp |= systab64->con_out; - efi_systab.stderr_handle = systab64->stderr_handle; - tmp |= systab64->stderr_handle; - efi_systab.stderr = systab64->stderr; - tmp |= systab64->stderr; - efi_systab.runtime = data ? - (void *)(unsigned long)data->runtime : - (void *)(unsigned long)systab64->runtime; - tmp |= data ? data->runtime : systab64->runtime; - efi_systab.boottime = (void *)(unsigned long)systab64->boottime; - tmp |= systab64->boottime; - efi_systab.nr_tables = systab64->nr_tables; - efi_systab.tables = data ? (unsigned long)data->tables : - systab64->tables; - tmp |= data ? data->tables : systab64->tables; - - early_memunmap(systab64, sizeof(*systab64)); - if (data) early_memunmap(data, sizeof(*data)); -#ifdef CONFIG_X86_32 - if (tmp >> 32) { - pr_err("EFI data located above 4GB, disabling EFI.\n"); - return -EINVAL; + } else { + over4g |= systab64->fw_vendor > U32_MAX || + systab64->runtime > U32_MAX || + systab64->tables > U32_MAX; } -#endif } else { - efi_system_table_32_t *systab32; - - systab32 = early_memremap((unsigned long)phys, - sizeof(*systab32)); - if (systab32 == NULL) { - pr_err("Couldn't map the system table!\n"); - return -ENOMEM; - } + const efi_system_table_32_t *systab32 = p; + + efi_systab.hdr = systab32->hdr; + efi_systab.fw_vendor = systab32->fw_vendor; + efi_systab.fw_revision = systab32->fw_revision; + efi_systab.con_in_handle = systab32->con_in_handle; + efi_systab.con_in = systab32->con_in; + efi_systab.con_out_handle = systab32->con_out_handle; + efi_systab.con_out = (void *)(unsigned long)systab32->con_out; + efi_systab.stderr_handle = systab32->stderr_handle; + efi_systab.stderr = systab32->stderr; + efi_systab.runtime = (void *)(unsigned long)systab32->runtime; + efi_systab.boottime = (void *)(unsigned long)systab32->boottime; + efi_systab.nr_tables = systab32->nr_tables; + efi_systab.tables = systab32->tables; + } - efi_systab.hdr = systab32->hdr; - efi_systab.fw_vendor = systab32->fw_vendor; - efi_systab.fw_revision = systab32->fw_revision; - efi_systab.con_in_handle = systab32->con_in_handle; - efi_systab.con_in = systab32->con_in; - efi_systab.con_out_handle = systab32->con_out_handle; - efi_systab.con_out = (void *)(unsigned long)systab32->con_out; - efi_systab.stderr_handle = systab32->stderr_handle; - efi_systab.stderr = systab32->stderr; - efi_systab.runtime = (void *)(unsigned long)systab32->runtime; - efi_systab.boottime = (void *)(unsigned long)systab32->boottime; - efi_systab.nr_tables = systab32->nr_tables; - efi_systab.tables = systab32->tables; + early_memunmap(p, size); - early_memunmap(systab32, sizeof(*systab32)); + if (IS_ENABLED(CONFIG_X86_32) && over4g) { + pr_err("EFI data located above 4GB, disabling EFI.\n"); + return -EINVAL; } efi.systab = &efi_systab; @@ -435,20 +436,17 @@ void __init efi_init(void) char vendor[100] = "unknown"; int i = 0; -#ifdef CONFIG_X86_32 - if (boot_params.efi_info.efi_systab_hi || - boot_params.efi_info.efi_memmap_hi) { + if (IS_ENABLED(CONFIG_X86_32) && + (boot_params.efi_info.efi_systab_hi || + boot_params.efi_info.efi_memmap_hi)) { pr_info("Table located above 4GB, disabling EFI.\n"); return; } - efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab; -#else - efi_phys.systab = (efi_system_table_t *) - (boot_params.efi_info.efi_systab | - ((__u64)boot_params.efi_info.efi_systab_hi<<32)); -#endif - if (efi_systab_init(efi_phys.systab)) + efi_systab_phys = boot_params.efi_info.efi_systab | + ((__u64)boot_params.efi_info.efi_systab_hi << 32); + + if (efi_systab_init(efi_systab_phys)) return; efi.config_table = (unsigned long)efi.systab->tables; @@ -601,7 +599,7 @@ static void __init get_systab_virt_addr(efi_memory_desc_t *md) size = md->num_pages << EFI_PAGE_SHIFT; end = md->phys_addr + size; - systab = (u64)(unsigned long)efi_phys.systab; + systab = efi_systab_phys; if (md->phys_addr <= systab && systab < end) { systab += md->virt_addr - md->phys_addr; efi.systab = (efi_system_table_t *)(unsigned long)systab; -- cgit From e2d68a955e49d61fd0384f23e92058dc9b79be5e Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 3 Jan 2020 12:39:46 +0100 Subject: efi/x86: Don't panic or BUG() on non-critical error conditions The logic in __efi_enter_virtual_mode() does a number of steps in sequence, all of which may fail in one way or the other. In most cases, we simply print an error and disable EFI runtime services support, but in some cases, we BUG() or panic() and bring down the system when encountering conditions that we could easily handle in the same way. While at it, replace a pointless page-to-virt-phys conversion with one that goes straight from struct page to physical. Signed-off-by: Ard Biesheuvel Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arvind Sankar Cc: Matthew Garrett Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/20200103113953.9571-14-ardb@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/efi.c | 28 ++++++++++++++-------------- arch/x86/platform/efi/efi_64.c | 9 +++++---- 2 files changed, 19 insertions(+), 18 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 53ed0b123641..4f539bfdc051 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -889,16 +889,14 @@ static void __init __efi_enter_virtual_mode(void) if (efi_alloc_page_tables()) { pr_err("Failed to allocate EFI page tables\n"); - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); - return; + goto err; } efi_merge_regions(); new_memmap = efi_map_regions(&count, &pg_shift); if (!new_memmap) { pr_err("Error reallocating memory, EFI runtime non-functional!\n"); - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); - return; + goto err; } pa = __pa(new_memmap); @@ -912,8 +910,7 @@ static void __init __efi_enter_virtual_mode(void) if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) { pr_err("Failed to remap late EFI memory map\n"); - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); - return; + goto err; } if (efi_enabled(EFI_DBG)) { @@ -921,12 +918,11 @@ static void __init __efi_enter_virtual_mode(void) efi_print_memmap(); } - BUG_ON(!efi.systab); + if (WARN_ON(!efi.systab)) + goto err; - if (efi_setup_page_tables(pa, 1 << pg_shift)) { - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); - return; - } + if (efi_setup_page_tables(pa, 1 << pg_shift)) + goto err; efi_sync_low_kernel_mappings(); @@ -935,9 +931,9 @@ static void __init __efi_enter_virtual_mode(void) efi.memmap.desc_version, (efi_memory_desc_t *)pa); if (status != EFI_SUCCESS) { - pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n", - status); - panic("EFI call to SetVirtualAddressMap() failed!"); + pr_err("Unable to switch EFI into virtual mode (status=%lx)!\n", + status); + goto err; } efi_free_boot_services(); @@ -964,6 +960,10 @@ static void __init __efi_enter_virtual_mode(void) /* clean DUMMY object */ efi_delete_dummy_variable(); + return; + +err: + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); } void __init efi_enter_virtual_mode(void) diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 910e9ec03b09..c13fa2150976 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -384,11 +384,12 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) return 0; page = alloc_page(GFP_KERNEL|__GFP_DMA32); - if (!page) - panic("Unable to allocate EFI runtime stack < 4GB\n"); + if (!page) { + pr_err("Unable to allocate EFI runtime stack < 4GB\n"); + return 1; + } - efi_scratch.phys_stack = virt_to_phys(page_address(page)); - efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */ + efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */ npages = (_etext - _text) >> PAGE_SHIFT; text = __pa(_text); -- cgit From 4684abe375927e745974bfddd85e171a3eb887a5 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 3 Jan 2020 12:39:47 +0100 Subject: efi/x86: Remove unreachable code in kexec_enter_virtual_mode() Remove some code that is guaranteed to be unreachable, given that we have already bailed by this time if EFI_OLD_MEMMAP is set. Signed-off-by: Ard Biesheuvel Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arvind Sankar Cc: Matthew Garrett Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/20200103113953.9571-15-ardb@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/efi.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 4f539bfdc051..b931c4bea284 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -850,9 +850,6 @@ static void __init kexec_enter_virtual_mode(void) efi.runtime_version = efi_systab.hdr.revision; efi_native_runtime_setup(); - - if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) - runtime_code_page_mkexec(); #endif } -- cgit From 14b864f4b5c402fe1ca394042beeea6fdf54f8f5 Mon Sep 17 00:00:00 2001 From: Arvind Sankar Date: Fri, 3 Jan 2020 12:39:48 +0100 Subject: efi/x86: Check number of arguments to variadic functions On x86 we need to thunk through assembler stubs to call the EFI services for mixed mode, and for runtime services in 64-bit mode. The assembler stubs have limits on how many arguments it handles. Introduce a few macros to check that we do not try to pass too many arguments to the stubs. Signed-off-by: Arvind Sankar Signed-off-by: Ard Biesheuvel Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Matthew Garrett Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/20200103113953.9571-16-ardb@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/efi_stub_64.S | 4 ++-- arch/x86/platform/efi/efi_thunk_64.S | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index e7e1020f4ccb..15da118f04f0 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S @@ -10,7 +10,7 @@ #include #include -SYM_FUNC_START(efi_call) +SYM_FUNC_START(__efi_call) pushq %rbp movq %rsp, %rbp and $~0xf, %rsp @@ -24,4 +24,4 @@ SYM_FUNC_START(efi_call) CALL_NOSPEC %rdi leave ret -SYM_FUNC_END(efi_call) +SYM_FUNC_END(__efi_call) diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S index 162b35729633..26f0da238c1c 100644 --- a/arch/x86/platform/efi/efi_thunk_64.S +++ b/arch/x86/platform/efi/efi_thunk_64.S @@ -25,7 +25,7 @@ .text .code64 -SYM_CODE_START(efi64_thunk) +SYM_CODE_START(__efi64_thunk) push %rbp push %rbx @@ -69,4 +69,4 @@ SYM_CODE_START(efi64_thunk) 2: pushl $__KERNEL_CS pushl %ebp lret -SYM_CODE_END(efi64_thunk) +SYM_CODE_END(__efi64_thunk) -- cgit From d9e3d2c4f103200d87f2c243a84c1fd3b3bfea8c Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 13 Jan 2020 18:22:37 +0100 Subject: efi/x86: Don't map the entire kernel text RW for mixed mode The mixed mode thunking routine requires a part of it to be mapped 1:1, and for this reason, we currently map the entire kernel .text read/write in the EFI page tables, which is bad. In fact, the kernel_map_pages_in_pgd() invocation that installs this mapping is entirely redundant, since all of DRAM is already 1:1 mapped read/write in the EFI page tables when we reach this point, which means that .rodata is mapped read-write as well. So let's remap both .text and .rodata read-only in the EFI page tables. Signed-off-by: Ard Biesheuvel Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200113172245.27925-6-ardb@kernel.org --- arch/x86/platform/efi/efi_64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index c13fa2150976..6ec58ff60b56 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -391,11 +391,11 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */ - npages = (_etext - _text) >> PAGE_SHIFT; + npages = (__end_rodata_aligned - _text) >> PAGE_SHIFT; text = __pa(_text); pfn = text >> PAGE_SHIFT; - pf = _PAGE_RW | _PAGE_ENC; + pf = _PAGE_ENC; if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) { pr_err("Failed to map kernel text 1:1\n"); return 1; -- cgit From 97bb9cdc32108036170d9d0d208257168f80d9e9 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 13 Jan 2020 18:22:38 +0100 Subject: efi/x86: Avoid RWX mappings for all of DRAM The EFI code creates RWX mappings for all memory regions that are occupied after the stub completes, and in the mixed mode case, it even creates RWX mappings for all of the remaining DRAM as well. Let's try to avoid this, by setting the NX bit for all memory regions except the ones that are marked as EFI runtime services code [which means text+rodata+data in practice, so we cannot mark them read-only right away]. For cases of buggy firmware where boot services code is called during SetVirtualAddressMap(), map those regions with exec permissions as well - they will be unmapped in efi_free_boot_services(). Signed-off-by: Ard Biesheuvel Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200113172245.27925-7-ardb@kernel.org --- arch/x86/platform/efi/efi_64.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 6ec58ff60b56..3eb23966e30a 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -365,10 +365,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) * as trim_bios_range() will reserve the first page and isolate it away * from memory allocators anyway. */ - pf = _PAGE_RW; - if (sev_active()) - pf |= _PAGE_ENC; - if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) { pr_err("Failed to create 1:1 mapping for the first page!\n"); return 1; @@ -410,6 +406,22 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va) unsigned long pfn; pgd_t *pgd = efi_mm.pgd; + /* + * EFI_RUNTIME_SERVICES_CODE regions typically cover PE/COFF + * executable images in memory that consist of both R-X and + * RW- sections, so we cannot apply read-only or non-exec + * permissions just yet. However, modern EFI systems provide + * a memory attributes table that describes those sections + * with the appropriate restricted permissions, which are + * applied in efi_runtime_update_mappings() below. All other + * regions can be mapped non-executable at this point, with + * the exception of boot services code regions, but those will + * be unmapped again entirely in efi_free_boot_services(). + */ + if (md->type != EFI_BOOT_SERVICES_CODE && + md->type != EFI_RUNTIME_SERVICES_CODE) + flags |= _PAGE_NX; + if (!(md->attribute & EFI_MEMORY_WB)) flags |= _PAGE_PCD; -- cgit From 1f299fad1e312947c974c6a1d8a3a484f27a6111 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 13 Jan 2020 18:22:39 +0100 Subject: efi/x86: Limit EFI old memory map to SGI UV machines We carry a quirk in the x86 EFI code to switch back to an older method of mapping the EFI runtime services memory regions, because it was deemed risky at the time to implement a new method without providing a fallback to the old method in case problems arose. Such problems did arise, but they appear to be limited to SGI UV1 machines, and so these are the only ones for which the fallback gets enabled automatically (via a DMI quirk). The fallback can be enabled manually as well, by passing efi=old_map, but there is very little evidence that suggests that this is something that is being relied upon in the field. Given that UV1 support is not enabled by default by the distros (Ubuntu, Fedora), there is no point in carrying this fallback code all the time if there are no other users. So let's move it into the UV support code, and document that efi=old_map now requires this support code to be enabled. Note that efi=old_map has been used in the past on other SGI UV machines to work around kernel regressions in production, so we keep the option to enable it by hand, but only if the kernel was built with UV support. Signed-off-by: Ard Biesheuvel Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200113172245.27925-8-ardb@kernel.org --- arch/x86/platform/efi/efi.c | 30 +++----- arch/x86/platform/efi/efi_64.c | 166 +++-------------------------------------- arch/x86/platform/efi/quirks.c | 21 ++++-- 3 files changed, 33 insertions(+), 184 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index b931c4bea284..4e46d2d24352 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -497,6 +497,8 @@ void __init efi_init(void) efi_print_memmap(); } +#if defined(CONFIG_X86_32) || defined(CONFIG_X86_UV) + void __init efi_set_executable(efi_memory_desc_t *md, bool executable) { u64 addr, npages; @@ -561,6 +563,8 @@ void __init old_map_region(efi_memory_desc_t *md) (unsigned long long)md->phys_addr); } +#endif + /* Merge contiguous regions of the same type and attribute */ static void __init efi_merge_regions(void) { @@ -659,7 +663,7 @@ static inline void *efi_map_next_entry_reverse(void *entry) */ static void *efi_map_next_entry(void *entry) { - if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) { + if (!efi_have_uv1_memmap() && efi_enabled(EFI_64BIT)) { /* * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE * config table feature requires us to map all entries @@ -791,11 +795,11 @@ static void __init kexec_enter_virtual_mode(void) /* * We don't do virtual mode, since we don't do runtime services, on - * non-native EFI. With efi=old_map, we don't do runtime services in + * non-native EFI. With the UV1 memmap, we don't do runtime services in * kexec kernel because in the initial boot something else might * have been mapped at these virtual addresses. */ - if (efi_is_mixed() || efi_enabled(EFI_OLD_MEMMAP)) { + if (efi_is_mixed() || efi_have_uv1_memmap()) { efi_memmap_unmap(); clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); return; @@ -861,9 +865,9 @@ static void __init kexec_enter_virtual_mode(void) * * The old method which used to update that memory descriptor with the * virtual address obtained from ioremap() is still supported when the - * kernel is booted with efi=old_map on its command line. Same old - * method enabled the runtime services to be called without having to - * thunk back into physical mode for every invocation. + * kernel is booted on SG1 UV1 hardware. Same old method enabled the + * runtime services to be called without having to thunk back into + * physical mode for every invocation. * * The new method does a pagetable switch in a preemption-safe manner * so that we're in a different address space when calling a runtime @@ -976,20 +980,6 @@ void __init efi_enter_virtual_mode(void) efi_dump_pagetable(); } -static int __init arch_parse_efi_cmdline(char *str) -{ - if (!str) { - pr_warn("need at least one option\n"); - return -EINVAL; - } - - if (parse_option_str(str, "old_map")) - set_bit(EFI_OLD_MEMMAP, &efi.flags); - - return 0; -} -early_param("efi", arch_parse_efi_cmdline); - bool efi_is_table_address(unsigned long phys_addr) { unsigned int i; diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 3eb23966e30a..8d1869ff1033 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -57,134 +57,6 @@ static u64 efi_va = EFI_VA_START; struct efi_scratch efi_scratch; -static void __init early_code_mapping_set_exec(int executable) -{ - efi_memory_desc_t *md; - - if (!(__supported_pte_mask & _PAGE_NX)) - return; - - /* Make EFI service code area executable */ - for_each_efi_memory_desc(md) { - if (md->type == EFI_RUNTIME_SERVICES_CODE || - md->type == EFI_BOOT_SERVICES_CODE) - efi_set_executable(md, executable); - } -} - -static void __init efi_old_memmap_phys_epilog(pgd_t *save_pgd); - -static pgd_t * __init efi_old_memmap_phys_prolog(void) -{ - unsigned long vaddr, addr_pgd, addr_p4d, addr_pud; - pgd_t *save_pgd, *pgd_k, *pgd_efi; - p4d_t *p4d, *p4d_k, *p4d_efi; - pud_t *pud; - - int pgd; - int n_pgds, i, j; - - early_code_mapping_set_exec(1); - - n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); - save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL); - if (!save_pgd) - return NULL; - - /* - * Build 1:1 identity mapping for efi=old_map usage. Note that - * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while - * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical - * address X, the pud_index(X) != pud_index(__va(X)), we can only copy - * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping. - * This means here we can only reuse the PMD tables of the direct mapping. - */ - for (pgd = 0; pgd < n_pgds; pgd++) { - addr_pgd = (unsigned long)(pgd * PGDIR_SIZE); - vaddr = (unsigned long)__va(pgd * PGDIR_SIZE); - pgd_efi = pgd_offset_k(addr_pgd); - save_pgd[pgd] = *pgd_efi; - - p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd); - if (!p4d) { - pr_err("Failed to allocate p4d table!\n"); - goto out; - } - - for (i = 0; i < PTRS_PER_P4D; i++) { - addr_p4d = addr_pgd + i * P4D_SIZE; - p4d_efi = p4d + p4d_index(addr_p4d); - - pud = pud_alloc(&init_mm, p4d_efi, addr_p4d); - if (!pud) { - pr_err("Failed to allocate pud table!\n"); - goto out; - } - - for (j = 0; j < PTRS_PER_PUD; j++) { - addr_pud = addr_p4d + j * PUD_SIZE; - - if (addr_pud > (max_pfn << PAGE_SHIFT)) - break; - - vaddr = (unsigned long)__va(addr_pud); - - pgd_k = pgd_offset_k(vaddr); - p4d_k = p4d_offset(pgd_k, vaddr); - pud[j] = *pud_offset(p4d_k, vaddr); - } - } - pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX; - } - - __flush_tlb_all(); - return save_pgd; -out: - efi_old_memmap_phys_epilog(save_pgd); - return NULL; -} - -static void __init efi_old_memmap_phys_epilog(pgd_t *save_pgd) -{ - /* - * After the lock is released, the original page table is restored. - */ - int pgd_idx, i; - int nr_pgds; - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - - nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); - - for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) { - pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE); - set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); - - if (!pgd_present(*pgd)) - continue; - - for (i = 0; i < PTRS_PER_P4D; i++) { - p4d = p4d_offset(pgd, - pgd_idx * PGDIR_SIZE + i * P4D_SIZE); - - if (!p4d_present(*p4d)) - continue; - - pud = (pud_t *)p4d_page_vaddr(*p4d); - pud_free(&init_mm, pud); - } - - p4d = (p4d_t *)pgd_page_vaddr(*pgd); - p4d_free(&init_mm, p4d); - } - - kfree(save_pgd); - - __flush_tlb_all(); - early_code_mapping_set_exec(0); -} - EXPORT_SYMBOL_GPL(efi_mm); /* @@ -203,7 +75,7 @@ int __init efi_alloc_page_tables(void) pud_t *pud; gfp_t gfp_mask; - if (efi_enabled(EFI_OLD_MEMMAP)) + if (efi_have_uv1_memmap()) return 0; gfp_mask = GFP_KERNEL | __GFP_ZERO; @@ -244,7 +116,7 @@ void efi_sync_low_kernel_mappings(void) pud_t *pud_k, *pud_efi; pgd_t *efi_pgd = efi_mm.pgd; - if (efi_enabled(EFI_OLD_MEMMAP)) + if (efi_have_uv1_memmap()) return; /* @@ -338,7 +210,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) unsigned npages; pgd_t *pgd = efi_mm.pgd; - if (efi_enabled(EFI_OLD_MEMMAP)) + if (efi_have_uv1_memmap()) return 0; /* @@ -439,7 +311,7 @@ void __init efi_map_region(efi_memory_desc_t *md) unsigned long size = md->num_pages << PAGE_SHIFT; u64 pa = md->phys_addr; - if (efi_enabled(EFI_OLD_MEMMAP)) + if (efi_have_uv1_memmap()) return old_map_region(md); /* @@ -496,26 +368,6 @@ void __init efi_map_region_fixed(efi_memory_desc_t *md) __map_region(md, md->virt_addr); } -void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, - u32 type, u64 attribute) -{ - unsigned long last_map_pfn; - - if (type == EFI_MEMORY_MAPPED_IO) - return ioremap(phys_addr, size); - - last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); - if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { - unsigned long top = last_map_pfn << PAGE_SHIFT; - efi_ioremap(top, size - (top - phys_addr), type, attribute); - } - - if (!(attribute & EFI_MEMORY_WB)) - efi_memory_uc((u64)(unsigned long)__va(phys_addr), size); - - return (void __iomem *)__va(phys_addr); -} - void __init parse_efi_setup(u64 phys_addr, u32 data_len) { efi_setup = phys_addr + sizeof(struct setup_data); @@ -564,7 +416,7 @@ void __init efi_runtime_update_mappings(void) { efi_memory_desc_t *md; - if (efi_enabled(EFI_OLD_MEMMAP)) { + if (efi_have_uv1_memmap()) { if (__supported_pte_mask & _PAGE_NX) runtime_code_page_mkexec(); return; @@ -618,7 +470,7 @@ void __init efi_runtime_update_mappings(void) void __init efi_dump_pagetable(void) { #ifdef CONFIG_EFI_PGT_DUMP - if (efi_enabled(EFI_OLD_MEMMAP)) + if (efi_have_uv1_memmap()) ptdump_walk_pgd_level(NULL, swapper_pg_dir); else ptdump_walk_pgd_level(NULL, efi_mm.pgd); @@ -1045,8 +897,8 @@ efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size, descriptor_version, virtual_map); - if (efi_enabled(EFI_OLD_MEMMAP)) { - save_pgd = efi_old_memmap_phys_prolog(); + if (efi_have_uv1_memmap()) { + save_pgd = efi_uv1_memmap_phys_prolog(); if (!save_pgd) return EFI_ABORTED; } else { @@ -1065,7 +917,7 @@ efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size, kernel_fpu_end(); if (save_pgd) - efi_old_memmap_phys_epilog(save_pgd); + efi_uv1_memmap_phys_epilog(save_pgd); else efi_switch_mm(efi_scratch.prev_mm); diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index eb421cb35108..fe46ddf6c761 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -384,10 +384,10 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md) /* * To Do: Remove this check after adding functionality to unmap EFI boot - * services code/data regions from direct mapping area because - * "efi=old_map" maps EFI regions in swapper_pg_dir. + * services code/data regions from direct mapping area because the UV1 + * memory map maps EFI regions in swapper_pg_dir. */ - if (efi_enabled(EFI_OLD_MEMMAP)) + if (efi_have_uv1_memmap()) return; /* @@ -558,7 +558,7 @@ out: return ret; } -static const struct dmi_system_id sgi_uv1_dmi[] = { +static const struct dmi_system_id sgi_uv1_dmi[] __initconst = { { NULL, "SGI UV1", { DMI_MATCH(DMI_PRODUCT_NAME, "Stoutland Platform"), DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), @@ -581,8 +581,15 @@ void __init efi_apply_memmap_quirks(void) } /* UV2+ BIOS has a fix for this issue. UV1 still needs the quirk. */ - if (dmi_check_system(sgi_uv1_dmi)) - set_bit(EFI_OLD_MEMMAP, &efi.flags); + if (dmi_check_system(sgi_uv1_dmi)) { + if (IS_ENABLED(CONFIG_X86_UV)) { + set_bit(EFI_UV1_MEMMAP, &efi.flags); + } else { + pr_warn("EFI runtime disabled, needs CONFIG_X86_UV=y on UV1\n"); + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); + efi_memmap_unmap(); + } + } } /* @@ -720,7 +727,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr) /* * Make sure that an efi runtime service caused the page fault. * "efi_mm" cannot be used to check if the page fault had occurred - * in the firmware context because efi=old_map doesn't use efi_pgd. + * in the firmware context because the UV1 memmap doesn't use efi_pgd. */ if (efi_rts_work.efi_rts_id == EFI_NONE) return; -- cgit From 1db91035d01aa8bfa2350c00ccb63d629b4041ad Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 13 Jan 2020 18:22:43 +0100 Subject: efi: Add tracking for dynamically allocated memmaps In preparation for fixing efi_memmap_alloc() leaks, add support for recording whether the memmap was dynamically allocated from slab, memblock, or is the original physical memmap provided by the platform. Given this tracking is established in efi_memmap_alloc() and needs to be carried to efi_memmap_install(), use 'struct efi_memory_map_data' to convey the flags. Some small cleanups result from this reorganization, specifically the removal of local variables for 'phys' and 'size' that are already tracked in @data. Signed-off-by: Dan Williams Signed-off-by: Ard Biesheuvel Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200113172245.27925-12-ardb@kernel.org --- arch/x86/platform/efi/efi.c | 10 ++++++++-- arch/x86/platform/efi/quirks.c | 23 +++++++++-------------- 2 files changed, 17 insertions(+), 16 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 4e46d2d24352..59f7f6d60cf6 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -304,10 +304,16 @@ static void __init efi_clean_memmap(void) } if (n_removal > 0) { - u64 size = efi.memmap.nr_map - n_removal; + struct efi_memory_map_data data = { + .phys_map = efi.memmap.phys_map, + .desc_version = efi.memmap.desc_version, + .desc_size = efi.memmap.desc_size, + .size = data.desc_size * (efi.memmap.nr_map - n_removal), + .flags = 0, + }; pr_warn("Removing %d invalid memory map entries.\n", n_removal); - efi_memmap_install(efi.memmap.phys_map, size); + efi_memmap_install(&data); } } diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index fe46ddf6c761..46807b7606da 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(efi_query_variable_store); */ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) { - phys_addr_t new_phys, new_size; + struct efi_memory_map_data data = { 0 }; struct efi_mem_range mr; efi_memory_desc_t md; int num_entries; @@ -271,24 +271,21 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) num_entries = efi_memmap_split_count(&md, &mr.range); num_entries += efi.memmap.nr_map; - new_size = efi.memmap.desc_size * num_entries; - - new_phys = efi_memmap_alloc(num_entries); - if (!new_phys) { + if (efi_memmap_alloc(num_entries, &data) != 0) { pr_err("Could not allocate boot services memmap\n"); return; } - new = early_memremap(new_phys, new_size); + new = early_memremap(data.phys_map, data.size); if (!new) { pr_err("Failed to map new boot services memmap\n"); return; } efi_memmap_insert(&efi.memmap, new, &mr); - early_memunmap(new, new_size); + early_memunmap(new, data.size); - efi_memmap_install(new_phys, num_entries); + efi_memmap_install(&data); e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED); e820__update_table(e820_table); } @@ -407,7 +404,7 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md) void __init efi_free_boot_services(void) { - phys_addr_t new_phys, new_size; + struct efi_memory_map_data data = { 0 }; efi_memory_desc_t *md; int num_entries = 0; void *new, *new_md; @@ -462,14 +459,12 @@ void __init efi_free_boot_services(void) if (!num_entries) return; - new_size = efi.memmap.desc_size * num_entries; - new_phys = efi_memmap_alloc(num_entries); - if (!new_phys) { + if (efi_memmap_alloc(num_entries, &data) != 0) { pr_err("Failed to allocate new EFI memmap\n"); return; } - new = memremap(new_phys, new_size, MEMREMAP_WB); + new = memremap(data.phys_map, data.size, MEMREMAP_WB); if (!new) { pr_err("Failed to map new EFI memmap\n"); return; @@ -493,7 +488,7 @@ void __init efi_free_boot_services(void) memunmap(new); - if (efi_memmap_install(new_phys, num_entries)) { + if (efi_memmap_install(&data) != 0) { pr_err("Could not install new EFI memmap\n"); return; } -- cgit From 3cc028619e284188cdde652631e1c3c5a83692b9 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sat, 18 Jan 2020 17:57:03 +0100 Subject: efi/x86: avoid KASAN false positives when accessing the 1: 1 mapping When installing the EFI virtual address map during early boot, we access the EFI system table to retrieve the 1:1 mapped address of the SetVirtualAddressMap() EFI runtime service. This memory is not known to KASAN, so on KASAN enabled builds, this may result in a splat like ================================================================== BUG: KASAN: user-memory-access in efi_set_virtual_address_map+0x141/0x354 Read of size 4 at addr 000000003fbeef38 by task swapper/0/0 CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.5.0-rc5+ #758 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 Call Trace: dump_stack+0x8b/0xbb ? efi_set_virtual_address_map+0x141/0x354 ? efi_set_virtual_address_map+0x141/0x354 __kasan_report+0x176/0x192 ? efi_set_virtual_address_map+0x141/0x354 kasan_report+0xe/0x20 efi_set_virtual_address_map+0x141/0x354 ? efi_thunk_runtime_setup+0x148/0x148 ? __inc_numa_state+0x19/0x90 ? memcpy+0x34/0x50 efi_enter_virtual_mode+0x5fd/0x67d start_kernel+0x5cd/0x682 ? mem_encrypt_init+0x6/0x6 ? x86_family+0x5/0x20 ? load_ucode_bsp+0x46/0x154 secondary_startup_64+0xa4/0xb0 ================================================================== Since this code runs only a single time during early boot, let's annotate it as __no_sanitize_address so KASAN disregards it entirely. Fixes: 698294704573 ("efi/x86: Split SetVirtualAddresMap() wrappers into ...") Reported-by: Qian Cai Signed-off-by: Ard Biesheuvel Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/efi_64.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 8d1869ff1033..e2accfe636bd 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -543,7 +543,7 @@ static DEFINE_SPINLOCK(efi_runtime_lock); __s; \ }) -static efi_status_t __init +static efi_status_t __init __no_sanitize_address efi_thunk_set_virtual_address_map(unsigned long memory_map_size, unsigned long descriptor_size, u32 descriptor_version, @@ -882,10 +882,11 @@ void __init efi_thunk_runtime_setup(void) efi.query_capsule_caps = efi_thunk_query_capsule_caps; } -efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size, - unsigned long descriptor_size, - u32 descriptor_version, - efi_memory_desc_t *virtual_map) +efi_status_t __init __no_sanitize_address +efi_set_virtual_address_map(unsigned long memory_map_size, + unsigned long descriptor_size, + u32 descriptor_version, + efi_memory_desc_t *virtual_map) { efi_status_t status; unsigned long flags; -- cgit From ac6119e7f25b842fc061e8aec88c4f32d3bc28ef Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 21 Jan 2020 10:39:12 +0100 Subject: efi/x86: Disable instrumentation in the EFI runtime handling code We already disable KASAN instrumentation specifically for the EFI routines that are known to dereference memory addresses that KASAN does not know about, avoiding false positive KASAN splats. However, as it turns out, having GCOV or KASAN instrumentation enabled interferes with the compiler's ability to optimize away function calls that are guarded by IS_ENABLED() checks that should have resulted in those references to have been const-propagated out of existence. But with instrumenation enabled, we may get build errors like: ld: arch/x86/platform/efi/efi_64.o: in function `efi_thunk_set_virtual_address_map': ld: arch/x86/platform/efi/efi_64.o: in function `efi_set_virtual_address_map': in builds where CONFIG_EFI=y but CONFIG_EFI_MIXED or CONFIG_X86_UV are not defined, even though the invocations are conditional on IS_ENABLED() checks against the respective Kconfig symbols. So let's disable instrumentation entirely for this subdirectory, which isn't that useful here to begin with. Signed-off-by: Ard Biesheuvel Signed-off-by: Ingo Molnar Cc: linux-efi@vger.kernel.org --- arch/x86/platform/efi/Makefile | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/platform/efi') diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile index 7ec3a8b31f8b..84b09c230cbd 100644 --- a/arch/x86/platform/efi/Makefile +++ b/arch/x86/platform/efi/Makefile @@ -1,5 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y +KASAN_SANITIZE := n +GCOV_PROFILE := n obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o -- cgit