summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/kvm_ptrauth.h
blob: 0ddf98c3ba9faa653168cc94a93bf71058e4284a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
/* SPDX-License-Identifier: GPL-2.0 */
/* arch/arm64/include/asm/kvm_ptrauth.h: Guest/host ptrauth save/restore
 * Copyright 2019 Arm Limited
 * Authors: Mark Rutland <mark.rutland@arm.com>
 *         Amit Daniel Kachhap <amit.kachhap@arm.com>
 */

#ifndef __ASM_KVM_PTRAUTH_H
#define __ASM_KVM_PTRAUTH_H

#ifdef __ASSEMBLY__

#include <asm/sysreg.h>

#ifdef	CONFIG_ARM64_PTR_AUTH

#define PTRAUTH_REG_OFFSET(x)	(x - CPU_APIAKEYLO_EL1)

/*
 * CPU_AP*_EL1 values exceed immediate offset range (512) for stp
 * instruction so below macros takes CPU_APIAKEYLO_EL1 as base and
 * calculates the offset of the keys from this base to avoid an extra add
 * instruction. These macros assumes the keys offsets follow the order of
 * the sysreg enum in kvm_host.h.
 */
.macro	ptrauth_save_state base, reg1, reg2
	mrs_s	\reg1, SYS_APIAKEYLO_EL1
	mrs_s	\reg2, SYS_APIAKEYHI_EL1
	stp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)]
	mrs_s	\reg1, SYS_APIBKEYLO_EL1
	mrs_s	\reg2, SYS_APIBKEYHI_EL1
	stp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)]
	mrs_s	\reg1, SYS_APDAKEYLO_EL1
	mrs_s	\reg2, SYS_APDAKEYHI_EL1
	stp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)]
	mrs_s	\reg1, SYS_APDBKEYLO_EL1
	mrs_s	\reg2, SYS_APDBKEYHI_EL1
	stp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)]
	mrs_s	\reg1, SYS_APGAKEYLO_EL1
	mrs_s	\reg2, SYS_APGAKEYHI_EL1
	stp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)]
.endm

.macro	ptrauth_restore_state base, reg1, reg2
	ldp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)]
	msr_s	SYS_APIAKEYLO_EL1, \reg1
	msr_s	SYS_APIAKEYHI_EL1, \reg2
	ldp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)]
	msr_s	SYS_APIBKEYLO_EL1, \reg1
	msr_s	SYS_APIBKEYHI_EL1, \reg2
	ldp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)]
	msr_s	SYS_APDAKEYLO_EL1, \reg1
	msr_s	SYS_APDAKEYHI_EL1, \reg2
	ldp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)]
	msr_s	SYS_APDBKEYLO_EL1, \reg1
	msr_s	SYS_APDBKEYHI_EL1, \reg2
	ldp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)]
	msr_s	SYS_APGAKEYLO_EL1, \reg1
	msr_s	SYS_APGAKEYHI_EL1, \reg2
.endm

/*
 * Both ptrauth_switch_to_guest and ptrauth_switch_to_host macros will
 * check for the presence ARM64_HAS_ADDRESS_AUTH, which is defined as
 * (ARM64_HAS_ADDRESS_AUTH_ARCH || ARM64_HAS_ADDRESS_AUTH_IMP_DEF) and
 * then proceed ahead with the save/restore of Pointer Authentication
 * key registers if enabled for the guest.
 */
.macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3
alternative_if_not ARM64_HAS_ADDRESS_AUTH
	b	.L__skip_switch\@
alternative_else_nop_endif
	mrs	\reg1, hcr_el2
	and	\reg1, \reg1, #(HCR_API | HCR_APK)
	cbz	\reg1, .L__skip_switch\@
	add	\reg1, \g_ctxt, #CPU_APIAKEYLO_EL1
	ptrauth_restore_state	\reg1, \reg2, \reg3
.L__skip_switch\@:
.endm

.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3
alternative_if_not ARM64_HAS_ADDRESS_AUTH
	b	.L__skip_switch\@
alternative_else_nop_endif
	mrs	\reg1, hcr_el2
	and	\reg1, \reg1, #(HCR_API | HCR_APK)
	cbz	\reg1, .L__skip_switch\@
	add	\reg1, \g_ctxt, #CPU_APIAKEYLO_EL1
	ptrauth_save_state	\reg1, \reg2, \reg3
	add	\reg1, \h_ctxt, #CPU_APIAKEYLO_EL1
	ptrauth_restore_state	\reg1, \reg2, \reg3
	isb
.L__skip_switch\@:
.endm

#else /* !CONFIG_ARM64_PTR_AUTH */
.macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3
.endm
.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3
.endm
#endif /* CONFIG_ARM64_PTR_AUTH */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_KVM_PTRAUTH_H */