summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/efi.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/efi.c')
-rw-r--r--arch/arm64/kernel/efi.c189
1 files changed, 148 insertions, 41 deletions
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 8d36e66a6e64..a81cb4aa4738 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -9,31 +9,60 @@
#include <linux/efi.h>
#include <linux/init.h>
-#include <linux/percpu.h>
+#include <linux/kmemleak.h>
+#include <linux/kthread.h>
+#include <linux/screen_info.h>
+#include <linux/vmalloc.h>
#include <asm/efi.h>
+#include <asm/stacktrace.h>
+#include <asm/vmap_stack.h>
+
+static bool region_is_misaligned(const efi_memory_desc_t *md)
+{
+ if (PAGE_SIZE == EFI_PAGE_SIZE)
+ return false;
+ return !PAGE_ALIGNED(md->phys_addr) ||
+ !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT);
+}
/*
* Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
* executable, everything else can be mapped with the XN bits
* set. Also take the new (optional) RO/XP bits into account.
*/
-static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
+static __init ptdesc_t create_mapping_protection(efi_memory_desc_t *md)
{
u64 attr = md->attribute;
u32 type = md->type;
- if (type == EFI_MEMORY_MAPPED_IO)
- return PROT_DEVICE_nGnRE;
+ if (type == EFI_MEMORY_MAPPED_IO) {
+ pgprot_t prot = __pgprot(PROT_DEVICE_nGnRE);
+
+ if (arm64_is_protected_mmio(md->phys_addr,
+ md->num_pages << EFI_PAGE_SHIFT))
+ prot = pgprot_encrypted(prot);
+ else
+ prot = pgprot_decrypted(prot);
+ return pgprot_val(prot);
+ }
+
+ if (region_is_misaligned(md)) {
+ static bool __initdata code_is_misaligned;
- if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
- "UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
/*
- * If the region is not aligned to the page size of the OS, we
- * can not use strict permissions, since that would also affect
- * the mapping attributes of the adjacent regions.
+ * Regions that are not aligned to the OS page size cannot be
+ * mapped with strict permissions, as those might interfere
+ * with the permissions that are needed by the adjacent
+ * region's mapping. However, if we haven't encountered any
+ * misaligned runtime code regions so far, we can safely use
+ * non-executable permissions for non-code regions.
*/
- return pgprot_val(PAGE_KERNEL_EXEC);
+ code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE);
+
+ return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC)
+ : pgprot_val(PAGE_KERNEL);
+ }
/* R-- */
if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
@@ -54,29 +83,22 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
return pgprot_val(PAGE_KERNEL_EXEC);
}
-/* we will fill this structure from the stub, so don't put it in .bss */
-struct screen_info screen_info __section(".data");
-EXPORT_SYMBOL(screen_info);
-
int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
{
- pteval_t prot_val = create_mapping_protection(md);
+ ptdesc_t prot_val = create_mapping_protection(md);
bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
md->type == EFI_RUNTIME_SERVICES_DATA);
- if (!PAGE_ALIGNED(md->phys_addr) ||
- !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
- /*
- * If the end address of this region is not aligned to page
- * size, the mapping is rounded up, and may end up sharing a
- * page frame with the next UEFI memory region. If we create
- * a block entry now, we may need to split it again when mapping
- * the next region, and support for that is going to be removed
- * from the MMU routines. So avoid block mappings altogether in
- * that case.
- */
+ /*
+ * If this region is not aligned to the page size used by the OS, the
+ * mapping will be rounded outwards, and may end up sharing a page
+ * frame with an adjacent runtime memory region. Given that the page
+ * table descriptor covering the shared page will be rewritten when the
+ * adjacent region gets mapped, we must avoid block mappings here so we
+ * don't have to worry about splitting them when that happens.
+ */
+ if (region_is_misaligned(md))
page_mappings_only = true;
- }
create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
md->num_pages << EFI_PAGE_SHIFT,
@@ -84,25 +106,39 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
return 0;
}
+struct set_perm_data {
+ const efi_memory_desc_t *md;
+ bool has_bti;
+};
+
static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
{
- efi_memory_desc_t *md = data;
- pte_t pte = READ_ONCE(*ptep);
+ struct set_perm_data *spd = data;
+ const efi_memory_desc_t *md = spd->md;
+ pte_t pte = __ptep_get(ptep);
if (md->attribute & EFI_MEMORY_RO)
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
if (md->attribute & EFI_MEMORY_XP)
pte = set_pte_bit(pte, __pgprot(PTE_PXN));
- set_pte(ptep, pte);
+ else if (system_supports_bti_kernel() && spd->has_bti)
+ pte = set_pte_bit(pte, __pgprot(PTE_GP));
+ __set_pte(ptep, pte);
return 0;
}
int __init efi_set_mapping_permissions(struct mm_struct *mm,
- efi_memory_desc_t *md)
+ efi_memory_desc_t *md,
+ bool has_bti)
{
+ struct set_perm_data data = { md, has_bti };
+
BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
md->type != EFI_RUNTIME_SERVICES_DATA);
+ if (region_is_misaligned(md))
+ return 0;
+
/*
* Calling apply_to_page_range() is only safe on regions that are
* guaranteed to be mapped down to pages. Since we are only called
@@ -112,7 +148,7 @@ int __init efi_set_mapping_permissions(struct mm_struct *mm,
*/
return apply_to_page_range(mm, md->virt_addr,
md->num_pages << EFI_PAGE_SHIFT,
- set_permissions, md);
+ set_permissions, &data);
}
/*
@@ -130,27 +166,98 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
return s;
}
-asmlinkage DEFINE_PER_CPU(u64, __efi_rt_asm_recover_sp);
+void arch_efi_call_virt_setup(void)
+{
+ efi_runtime_assert_lock_held();
-asmlinkage efi_status_t __efi_rt_asm_recover(void);
+ if (preemptible() && (current->flags & PF_KTHREAD)) {
+ /*
+ * Disable migration to ensure that a preempted EFI runtime
+ * service call will be resumed on the same CPU. This avoids
+ * potential issues with EFI runtime calls that are preempted
+ * while polling for an asynchronous completion of a secure
+ * firmware call, which may not permit the CPU to change.
+ */
+ migrate_disable();
+ kthread_use_mm(&efi_mm);
+ } else {
+ efi_virtmap_load();
+ }
-asmlinkage efi_status_t efi_handle_runtime_exception(const char *f)
+ /*
+ * Enable access to the valid TTBR0_EL1 and invoke the errata
+ * workaround directly since there is no return from exception when
+ * invoking the EFI run-time services.
+ */
+ uaccess_ttbr0_enable();
+ post_ttbr_update_workaround();
+
+ __efi_fpsimd_begin();
+}
+
+void arch_efi_call_virt_teardown(void)
{
- pr_err(FW_BUG "Synchronous exception occurred in EFI runtime service %s()\n", f);
- clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
- return EFI_ABORTED;
+ __efi_fpsimd_end();
+
+ /*
+ * Defer the switch to the current thread's TTBR0_EL1 until
+ * uaccess_enable(). Do so before efi_virtmap_unload() updates the
+ * saved TTBR0 value, so the userland page tables are not activated
+ * inadvertently over the back of an exception.
+ */
+ uaccess_ttbr0_disable();
+
+ if (preemptible() && (current->flags & PF_KTHREAD)) {
+ kthread_unuse_mm(&efi_mm);
+ migrate_enable();
+ } else {
+ efi_virtmap_unload();
+ }
}
+asmlinkage u64 *efi_rt_stack_top __ro_after_init;
+
+asmlinkage efi_status_t __efi_rt_asm_recover(void);
+
bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
{
/* Check whether the exception occurred while running the firmware */
- if (current_work() != &efi_rts_work.work || regs->pc >= TASK_SIZE_64)
+ if (!current_in_efi() || regs->pc >= TASK_SIZE_64)
return false;
pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg);
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
- dump_stack();
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
+ regs->regs[0] = EFI_ABORTED;
+ regs->regs[30] = efi_rt_stack_top[-1];
+ regs->pc = (u64)__efi_rt_asm_recover;
+
+ if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
+ regs->regs[18] = efi_rt_stack_top[-2];
- regs->pc = (u64)__efi_rt_asm_recover;
return true;
}
+
+/* EFI requires 8 KiB of stack space for runtime services */
+static_assert(THREAD_SIZE >= SZ_8K);
+
+static int __init arm64_efi_rt_init(void)
+{
+ void *p;
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return 0;
+
+ p = arch_alloc_vmap_stack(THREAD_SIZE, NUMA_NO_NODE);
+ if (!p) {
+ pr_warn("Failed to allocate EFI runtime stack\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return -ENOMEM;
+ }
+
+ kmemleak_not_leak(p);
+ efi_rt_stack_top = p + THREAD_SIZE;
+ return 0;
+}
+core_initcall(arm64_efi_rt_init);