summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/hyp-stub.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/hyp-stub.S')
-rw-r--r--arch/arm64/kernel/hyp-stub.S71
1 files changed, 45 insertions, 26 deletions
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 43d212618834..085bc9972f6b 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -51,8 +51,13 @@ SYM_CODE_START_LOCAL(elx_sync)
msr vbar_el2, x1
b 9f
-1: cmp x0, #HVC_VHE_RESTART
- b.eq mutate_to_vhe
+1: cmp x0, #HVC_FINALISE_EL2
+ b.eq __finalise_el2
+
+ cmp x0, #HVC_GET_ICH_VTR_EL2
+ b.ne 2f
+ mrs_s x1, SYS_ICH_VTR_EL2
+ b 9f
2: cmp x0, #HVC_SOFT_RESTART
b.ne 3f
@@ -73,34 +78,31 @@ SYM_CODE_START_LOCAL(elx_sync)
eret
SYM_CODE_END(elx_sync)
-// nVHE? No way! Give me the real thing!
-SYM_CODE_START_LOCAL(mutate_to_vhe)
+SYM_CODE_START_LOCAL(__finalise_el2)
+ finalise_el2_state
+
+ // nVHE? No way! Give me the real thing!
// Sanity check: MMU *must* be off
mrs x1, sctlr_el2
tbnz x1, #0, 1f
// Needs to be VHE capable, obviously
- mrs x1, id_aa64mmfr1_el1
- ubfx x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
- cbz x1, 1f
+ check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 0f 1f x1 x2
- // Check whether VHE is disabled from the command line
- adr_l x1, id_aa64mmfr1_override
+0: // Check whether we only want the hypervisor to run VHE, not the kernel
+ adr_l x1, arm64_sw_feature_override
ldr x2, [x1, FTR_OVR_VAL_OFFSET]
ldr x1, [x1, FTR_OVR_MASK_OFFSET]
- ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
- ubfx x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
- cmp x1, xzr
and x2, x2, x1
- csinv x2, x2, xzr, ne
- cbnz x2, 2f
+ ubfx x2, x2, #ARM64_SW_FEATURE_OVERRIDE_HVHE, #4
+ cbz x2, 2f
1: mov_q x0, HVC_STUB_ERR
eret
2:
// Engage the VHE magic!
mov_q x0, HCR_HOST_VHE_FLAGS
- msr hcr_el2, x0
+ msr_hcr_el2 x0
isb
// Use the EL1 allocated stack, per-cpu offset
@@ -117,8 +119,8 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
// Use EL2 translations for SPE & TRBE and disable access from EL1
mrs x0, mdcr_el2
- bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
- bic x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
+ bic x0, x0, #MDCR_EL2_E2PB_MASK
+ bic x0, x0, #MDCR_EL2_E2TB_MASK
msr mdcr_el2, x0
// Transfer the MM state from EL1 to EL2
@@ -130,6 +132,24 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
msr ttbr1_el1, x0
mrs_s x0, SYS_MAIR_EL12
msr mair_el1, x0
+ mrs x1, REG_ID_AA64MMFR3_EL1
+ ubfx x1, x1, #ID_AA64MMFR3_EL1_TCRX_SHIFT, #4
+ cbz x1, .Lskip_tcr2
+ mrs x0, REG_TCR2_EL12
+ msr REG_TCR2_EL1, x0
+
+ // Transfer permission indirection state
+ mrs x1, REG_ID_AA64MMFR3_EL1
+ ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4
+ cbz x1, .Lskip_indirection
+ mrs x0, REG_PIRE0_EL12
+ msr REG_PIRE0_EL1, x0
+ mrs x0, REG_PIR_EL12
+ msr REG_PIR_EL1, x0
+
+.Lskip_indirection:
+.Lskip_tcr2:
+
isb
// Hack the exception return to stay at EL2
@@ -140,10 +160,10 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
msr spsr_el1, x0
b enter_vhe
-SYM_CODE_END(mutate_to_vhe)
+SYM_CODE_END(__finalise_el2)
// At the point where we reach enter_vhe(), we run with
- // the MMU off (which is enforced by mutate_to_vhe()).
+ // the MMU off (which is enforced by __finalise_el2()).
// We thus need to be in the idmap, or everything will
// explode when enabling the MMU.
@@ -222,12 +242,12 @@ SYM_FUNC_START(__hyp_reset_vectors)
SYM_FUNC_END(__hyp_reset_vectors)
/*
- * Entry point to switch to VHE if deemed capable
+ * Entry point to finalise EL2 and switch to VHE if deemed capable
+ *
+ * w0: boot mode, as returned by init_kernel_el()
*/
-SYM_FUNC_START(switch_to_vhe)
+SYM_FUNC_START(finalise_el2)
// Need to have booted at EL2
- adr_l x1, __boot_cpu_mode
- ldr w0, [x1]
cmp w0, #BOOT_CPU_MODE_EL2
b.ne 1f
@@ -236,9 +256,8 @@ SYM_FUNC_START(switch_to_vhe)
cmp x0, #CurrentEL_EL1
b.ne 1f
- // Turn the world upside down
- mov x0, #HVC_VHE_RESTART
+ mov x0, #HVC_FINALISE_EL2
hvc #0
1:
ret
-SYM_FUNC_END(switch_to_vhe)
+SYM_FUNC_END(finalise_el2)