diff options
author | Dimitris Papastamos <dimitris.papastamos@arm.com> | 2017-12-18 13:46:21 +0000 |
---|---|---|
committer | Kostya Porotchkin <kostap@marvell.com> | 2018-04-15 16:41:46 +0300 |
commit | 68eca6129d09397f14e72ac02b38d23d582bbfa6 (patch) | |
tree | 0c1e87fb478d41e228d094c729c7d5994e8b2336 | |
parent | 48e008805bd689471786f250a3b6532fc6cf2f64 (diff) |
Workaround for CVE-2017-5715 on Cortex A73 and A75
Invalidate the Branch Target Buffer (BTB) on entry to EL3 by
temporarily dropping into AArch32 Secure-EL1 and executing the
`BPIALL` instruction.
This is achieved by using 3 vector tables. There is the runtime
vector table which is used to handle exceptions and 2 additional
tables which are required to implement this workaround. The
additional tables are `vbar0` and `vbar1`.
The sequence of events for handling a single exception is
as follows:
1) Install vector table `vbar0` which saves the CPU context on entry
to EL3 and sets up the Secure-EL1 context to execute in AArch32 mode
with the MMU disabled and I$ enabled. This is the default vector table.
2) Before doing an ERET into Secure-EL1, switch vbar to point to
another vector table `vbar1`. This is required to restore EL3 state
when returning from the workaround, before proceeding with normal EL3
exception handling.
3) While in Secure-EL1, the `BPIALL` instruction is executed and an
SMC call back to EL3 is performed.
4) On entry to EL3 from Secure-EL1, the saved context from step 1) is
restored. The vbar is switched to point to `vbar0` in preparation to
handle further exceptions. Finally a branch to the runtime vector
table entry is taken to complete the handling of the original
exception.
This workaround is enabled by default on the affected CPUs.
NOTE
====
There are 4 different stubs in Secure-EL1. Each stub corresponds to
an exception type such as Sync/IRQ/FIQ/SError. Each stub will move a
different value in `R0` before doing an SMC call back into EL3.
Without this piece of information it would not be possible to know
what the original exception type was as we cannot use `ESR_EL3` to
distinguish between IRQs and FIQs.
Change-Id: I90b32d14a3735290b48685d43c70c99daaa4b434
Signed-off-by: Dimitris Papastamos <dimitris.papastamos@arm.com>
Reviewed-on: http://vgitil04.il.marvell.com:8080/52573
Tested-by: iSoC Platform CI <ykjenk@marvell.com>
Reviewed-by: Kostya Porotchkin <kostap@marvell.com>
-rw-r--r-- | bl31/bl31.mk | 3 | ||||
-rw-r--r-- | include/lib/aarch64/arch.h | 5 | ||||
-rw-r--r-- | include/lib/el3_runtime/aarch64/context.h | 54 | ||||
-rw-r--r-- | lib/cpus/aarch64/cortex_a73.S | 5 | ||||
-rw-r--r-- | lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S | 372 |
5 files changed, 411 insertions, 28 deletions
diff --git a/bl31/bl31.mk b/bl31/bl31.mk index 4be3e600..9f6de041 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -45,7 +45,8 @@ BL31_SOURCES += lib/pmf/pmf_main.c endif ifeq (${WORKAROUND_CVE_2017_5715},1) -BL31_SOURCES += lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S +BL31_SOURCES += lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S \ + lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S endif BL31_LINKERFILE := bl31/bl31.ld.S diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h index 708c90e5..bf07b014 100644 --- a/include/lib/aarch64/arch.h +++ b/include/lib/aarch64/arch.h @@ -235,6 +235,11 @@ #define SPSR_T_ARM 0x0 #define SPSR_T_THUMB 0x1 +#define SPSR_M_SHIFT 4 +#define SPSR_M_MASK 0x1 +#define SPSR_M_AARCH64 0x0 +#define SPSR_M_AARCH32 0x1 + #define DISABLE_ALL_EXCEPTIONS \ (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT) diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h index b528c03b..0773506e 100644 --- a/include/lib/el3_runtime/aarch64/context.h +++ b/include/lib/el3_runtime/aarch64/context.h @@ -1,31 +1,7 @@ /* - * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. + * SPDX-License-Identifier: BSD-3-Clause */ #ifndef __CONTEXT_H__ @@ -70,12 +46,26 @@ #define CTX_GPREG_SP_EL0 0xf8 #define CTX_GPREGS_END 0x100 +#if WORKAROUND_CVE_2017_5715 +#define CTX_CVE_2017_5715_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END) +#define CTX_CVE_2017_5715_QUAD0 0x0 +#define CTX_CVE_2017_5715_QUAD1 0x8 +#define CTX_CVE_2017_5715_QUAD2 0x10 +#define CTX_CVE_2017_5715_QUAD3 0x18 +#define CTX_CVE_2017_5715_QUAD4 0x20 +#define CTX_CVE_2017_5715_QUAD5 0x28 +#define CTX_CVE_2017_5715_END 0x30 +#else +#define CTX_CVE_2017_5715_OFFSET CTX_GPREGS_OFFSET +#define CTX_CVE_2017_5715_END CTX_GPREGS_END +#endif + /******************************************************************************* * Constants that allow assembler code to access members of and the 'el3_state' * structure at their correct offsets. Note that some of the registers are only * 32-bits wide but are stored as 64-bit values for convenience ******************************************************************************/ -#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END) +#define CTX_EL3STATE_OFFSET (CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_END) #define CTX_SCR_EL3 0x0 #define CTX_RUNTIME_SP 0x8 #define CTX_SPSR_EL3 0x10 @@ -205,6 +195,9 @@ /* Constants to determine the size of individual context structures */ #define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT) +#if WORKAROUND_CVE_2017_5715 +#define CTX_CVE_2017_5715_ALL (CTX_CVE_2017_5715_END >> DWORD_SHIFT) +#endif #define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT) #if CTX_INCLUDE_FPREGS #define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT) @@ -220,6 +213,10 @@ */ DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL); +#if WORKAROUND_CVE_2017_5715 +DEFINE_REG_STRUCT(cve_2017_5715_regs, CTX_CVE_2017_5715_ALL); +#endif + /* * AArch64 EL1 system register context structure for preserving the * architectural state during switches from one security state to @@ -261,6 +258,9 @@ DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL); */ typedef struct cpu_context { gp_regs_t gpregs_ctx; +#if WORKAROUND_CVE_2017_5715 + cve_2017_5715_regs_t cve_2017_5715_regs_ctx; +#endif el3_state_t el3state_ctx; el1_sys_regs_t sysregs_ctx; #if CTX_INCLUDE_FPREGS diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S index 93ea401e..bcf28f49 100644 --- a/lib/cpus/aarch64/cortex_a73.S +++ b/lib/cpus/aarch64/cortex_a73.S @@ -60,6 +60,11 @@ func cortex_a73_disable_smp endfunc cortex_a73_disable_smp func cortex_a73_reset_func +#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 + adr x0, workaround_bpiall_vbar0_runtime_exceptions + msr vbar_el3, x0 +#endif + /* --------------------------------------------- * Enable the SMP bit. * Clobbers : x0 diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S new file mode 100644 index 00000000..cd29266e --- /dev/null +++ b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S @@ -0,0 +1,372 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <context.h> + + .globl workaround_bpiall_vbar0_runtime_exceptions + +#define EMIT_BPIALL 0xee070fd5 +#define EMIT_MOV_R0_IMM(v) 0xe3a0000##v +#define EMIT_SMC 0xe1600070 + + .macro enter_workaround _stub_name + /* Save GP regs */ + stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] + stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] + stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] + stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] + stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] + + adr x4, \_stub_name + + /* + * Load SPSR_EL3 and VBAR_EL3. SPSR_EL3 is set up to have + * all interrupts masked in preparation to running the workaround + * stub in S-EL1. VBAR_EL3 points to the vector table that + * will handle the SMC back from the workaround stub. + */ + ldp x0, x1, [x4, #0] + + /* + * Load SCTLR_EL1 and ELR_EL3. SCTLR_EL1 is configured to disable + * the MMU in S-EL1. ELR_EL3 points to the appropriate stub in S-EL1. + */ + ldp x2, x3, [x4, #16] + + mrs x4, scr_el3 + mrs x5, spsr_el3 + mrs x6, elr_el3 + mrs x7, sctlr_el1 + mrs x8, esr_el3 + + /* Preserve system registers in the workaround context */ + stp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0] + stp x6, x7, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2] + stp x8, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4] + + /* + * Setting SCR_EL3 to all zeroes means that the NS, RW + * and SMD bits are configured as expected. + */ + msr scr_el3, xzr + + /* + * Reload system registers with the crafted values + * in preparation for entry in S-EL1. + */ + msr spsr_el3, x0 + msr vbar_el3, x1 + msr sctlr_el1, x2 + msr elr_el3, x3 + + eret + .endm + + /* --------------------------------------------------------------------- + * This vector table is used at runtime to enter the workaround at + * AArch32 S-EL1 for Sync/IRQ/FIQ/SError exceptions. If the workaround + * is not enabled, the existing runtime exception vector table is used. + * --------------------------------------------------------------------- + */ +vector_base workaround_bpiall_vbar0_runtime_exceptions + + /* --------------------------------------------------------------------- + * Current EL with SP_EL0 : 0x0 - 0x200 + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0 + b sync_exception_sp_el0 + /* + * Since each vector table entry is 128 bytes, we can store the + * stub context in the unused space to minimize memory footprint. + */ +aarch32_stub_smc: + .word EMIT_BPIALL + .word EMIT_MOV_R0_IMM(1) + .word EMIT_SMC +aarch32_stub_ctx_smc: + /* Mask all interrupts and set AArch32 Supervisor mode */ + .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \ + SPSR_M_AARCH32 << SPSR_M_SHIFT | \ + MODE32_svc << MODE32_SHIFT) + + /* + * VBAR_EL3 points to vbar1 which is the vector table + * used while the workaround is executing. + */ + .quad workaround_bpiall_vbar1_runtime_exceptions + + /* Setup SCTLR_EL1 with MMU off and I$ on */ + .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT + + /* ELR_EL3 is setup to point to the sync exception stub in AArch32 */ + .quad aarch32_stub_smc + check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0 + +vector_entry workaround_bpiall_vbar0_irq_sp_el0 + b irq_sp_el0 +aarch32_stub_irq: + .word EMIT_BPIALL + .word EMIT_MOV_R0_IMM(2) + .word EMIT_SMC +aarch32_stub_ctx_irq: + .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \ + SPSR_M_AARCH32 << SPSR_M_SHIFT | \ + MODE32_svc << MODE32_SHIFT) + .quad workaround_bpiall_vbar1_runtime_exceptions + .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT + .quad aarch32_stub_irq + check_vector_size workaround_bpiall_vbar0_irq_sp_el0 + +vector_entry workaround_bpiall_vbar0_fiq_sp_el0 + b fiq_sp_el0 +aarch32_stub_fiq: + .word EMIT_BPIALL + .word EMIT_MOV_R0_IMM(4) + .word EMIT_SMC +aarch32_stub_ctx_fiq: + .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \ + SPSR_M_AARCH32 << SPSR_M_SHIFT | \ + MODE32_svc << MODE32_SHIFT) + .quad workaround_bpiall_vbar1_runtime_exceptions + .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT + .quad aarch32_stub_fiq + check_vector_size workaround_bpiall_vbar0_fiq_sp_el0 + +vector_entry workaround_bpiall_vbar0_serror_sp_el0 + b serror_sp_el0 +aarch32_stub_serror: + .word EMIT_BPIALL + .word EMIT_MOV_R0_IMM(8) + .word EMIT_SMC +aarch32_stub_ctx_serror: + .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \ + SPSR_M_AARCH32 << SPSR_M_SHIFT | \ + MODE32_svc << MODE32_SHIFT) + .quad workaround_bpiall_vbar1_runtime_exceptions + .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT + .quad aarch32_stub_serror + check_vector_size workaround_bpiall_vbar0_serror_sp_el0 + + /* --------------------------------------------------------------------- + * Current EL with SP_ELx: 0x200 - 0x400 + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar0_sync_exception_sp_elx + b sync_exception_sp_elx + check_vector_size workaround_bpiall_vbar0_sync_exception_sp_elx + +vector_entry workaround_bpiall_vbar0_irq_sp_elx + b irq_sp_elx + check_vector_size workaround_bpiall_vbar0_irq_sp_elx + +vector_entry workaround_bpiall_vbar0_fiq_sp_elx + b fiq_sp_elx + check_vector_size workaround_bpiall_vbar0_fiq_sp_elx + +vector_entry workaround_bpiall_vbar0_serror_sp_elx + b serror_sp_elx + check_vector_size workaround_bpiall_vbar0_serror_sp_elx + + /* --------------------------------------------------------------------- + * Lower EL using AArch64 : 0x400 - 0x600 + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar0_sync_exception_aarch64 + enter_workaround aarch32_stub_ctx_smc + check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64 + +vector_entry workaround_bpiall_vbar0_irq_aarch64 + enter_workaround aarch32_stub_ctx_irq + check_vector_size workaround_bpiall_vbar0_irq_aarch64 + +vector_entry workaround_bpiall_vbar0_fiq_aarch64 + enter_workaround aarch32_stub_ctx_fiq + check_vector_size workaround_bpiall_vbar0_fiq_aarch64 + +vector_entry workaround_bpiall_vbar0_serror_aarch64 + enter_workaround aarch32_stub_ctx_serror + check_vector_size workaround_bpiall_vbar0_serror_aarch64 + + /* --------------------------------------------------------------------- + * Lower EL using AArch32 : 0x600 - 0x800 + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar0_sync_exception_aarch32 + enter_workaround aarch32_stub_ctx_smc + check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32 + +vector_entry workaround_bpiall_vbar0_irq_aarch32 + enter_workaround aarch32_stub_ctx_irq + check_vector_size workaround_bpiall_vbar0_irq_aarch32 + +vector_entry workaround_bpiall_vbar0_fiq_aarch32 + enter_workaround aarch32_stub_ctx_fiq + check_vector_size workaround_bpiall_vbar0_fiq_aarch32 + +vector_entry workaround_bpiall_vbar0_serror_aarch32 + enter_workaround aarch32_stub_ctx_serror + check_vector_size workaround_bpiall_vbar0_serror_aarch32 + + /* --------------------------------------------------------------------- + * This vector table is used while the workaround is executing. It + * installs a simple SMC handler to allow the Sync/IRQ/FIQ/SError + * workaround stubs to enter EL3 from S-EL1. It restores the previous + * EL3 state before proceeding with the normal runtime exception vector. + * --------------------------------------------------------------------- + */ +vector_base workaround_bpiall_vbar1_runtime_exceptions + + /* --------------------------------------------------------------------- + * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED) + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar1_sync_exception_sp_el0 + b report_unhandled_exception + check_vector_size workaround_bpiall_vbar1_sync_exception_sp_el0 + +vector_entry workaround_bpiall_vbar1_irq_sp_el0 + b report_unhandled_interrupt + check_vector_size workaround_bpiall_vbar1_irq_sp_el0 + +vector_entry workaround_bpiall_vbar1_fiq_sp_el0 + b report_unhandled_interrupt + check_vector_size workaround_bpiall_vbar1_fiq_sp_el0 + +vector_entry workaround_bpiall_vbar1_serror_sp_el0 + b report_unhandled_exception + check_vector_size workaround_bpiall_vbar1_serror_sp_el0 + + /* --------------------------------------------------------------------- + * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED) + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar1_sync_exception_sp_elx + b report_unhandled_exception + check_vector_size workaround_bpiall_vbar1_sync_exception_sp_elx + +vector_entry workaround_bpiall_vbar1_irq_sp_elx + b report_unhandled_interrupt + check_vector_size workaround_bpiall_vbar1_irq_sp_elx + +vector_entry workaround_bpiall_vbar1_fiq_sp_elx + b report_unhandled_interrupt + check_vector_size workaround_bpiall_vbar1_fiq_sp_elx + +vector_entry workaround_bpiall_vbar1_serror_sp_elx + b report_unhandled_exception + check_vector_size workaround_bpiall_vbar1_serror_sp_elx + + /* --------------------------------------------------------------------- + * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED) + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar1_sync_exception_aarch64 + b report_unhandled_exception + check_vector_size workaround_bpiall_vbar1_sync_exception_aarch64 + +vector_entry workaround_bpiall_vbar1_irq_aarch64 + b report_unhandled_interrupt + check_vector_size workaround_bpiall_vbar1_irq_aarch64 + +vector_entry workaround_bpiall_vbar1_fiq_aarch64 + b report_unhandled_interrupt + check_vector_size workaround_bpiall_vbar1_fiq_aarch64 + +vector_entry workaround_bpiall_vbar1_serror_aarch64 + b report_unhandled_exception + check_vector_size workaround_bpiall_vbar1_serror_aarch64 + + /* --------------------------------------------------------------------- + * Lower EL using AArch32 : 0x600 - 0x800 + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar1_sync_exception_aarch32 + /* Restore register state from the workaround context */ + ldp x2, x3, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0] + ldp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2] + ldp x6, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4] + + /* Apply the restored system register state */ + msr scr_el3, x2 + msr spsr_el3, x3 + msr elr_el3, x4 + msr sctlr_el1, x5 + msr esr_el3, x6 + + /* + * Workaround is complete, so swap VBAR_EL3 to point + * to workaround entry table in preparation for subsequent + * Sync/IRQ/FIQ/SError exceptions. + */ + adr x2, workaround_bpiall_vbar0_runtime_exceptions + msr vbar_el3, x2 + + /* + * Restore all GP regs except x0 and x1. The value in x0 + * indicates the type of the original exception. + */ + ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] + ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] + ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] + ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] + ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] + + /* + * Each of these handlers will first restore x0 and x1 from + * the context and the branch to the common implementation for + * each of the exception types. + */ + tbnz x0, #1, workaround_bpiall_vbar1_irq + tbnz x0, #2, workaround_bpiall_vbar1_fiq + tbnz x0, #3, workaround_bpiall_vbar1_serror + + /* Fallthrough case for Sync exception */ + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + b sync_exception_aarch64 + check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32 + +vector_entry workaround_bpiall_vbar1_irq_aarch32 + b report_unhandled_interrupt +workaround_bpiall_vbar1_irq: + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + b irq_aarch64 + check_vector_size workaround_bpiall_vbar1_irq_aarch32 + +vector_entry workaround_bpiall_vbar1_fiq_aarch32 + b report_unhandled_interrupt +workaround_bpiall_vbar1_fiq: + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + b fiq_aarch64 + check_vector_size workaround_bpiall_vbar1_fiq_aarch32 + +vector_entry workaround_bpiall_vbar1_serror_aarch32 + b report_unhandled_exception +workaround_bpiall_vbar1_serror: + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + b serror_aarch64 + check_vector_size workaround_bpiall_vbar1_serror_aarch32 |