summaryrefslogtreecommitdiff
path: root/arch/x86/hyperv/mshv_vtl_asm.S
blob: f595eefad9abfd331b639901473d3688d73c3700 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
/* SPDX-License-Identifier: GPL-2.0
 *
 * Assembly level code for mshv_vtl VTL transition
 *
 * Copyright (c) 2025, Microsoft Corporation.
 *
 * Author:
 *   Naman Jain <namjain@microsoft.com>
 */

#include <linux/linkage.h>
#include <linux/static_call_types.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/frame.h>
#include "mshv-asm-offsets.h"

	.text
	.section .noinstr.text, "ax"
/*
 * void __mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0)
 *
 * This function is used to context switch between different Virtual Trust Levels.
 * It is marked as 'noinstr' to prevent against instrumentation and debugging facilities.
 * NMIs aren't a problem because the NMI handler saves/restores CR2 specifically to guard
 * against #PFs in NMI context clobbering the guest state.
 */
SYM_FUNC_START(__mshv_vtl_return_call)
	/* Push callee save registers */
	pushq %rbp
	mov %rsp, %rbp
	pushq %r12
	pushq %r13
	pushq %r14
	pushq %r15
	pushq %rbx

	/* register switch to VTL0 clobbers all registers except rax/rcx */
	mov %_ASM_ARG1, %rax

	/* grab rbx/rbp/rsi/rdi/r8-r15 */
	mov MSHV_VTL_CPU_CONTEXT_rbx(%rax), %rbx
	mov MSHV_VTL_CPU_CONTEXT_rbp(%rax), %rbp
	mov MSHV_VTL_CPU_CONTEXT_rsi(%rax), %rsi
	mov MSHV_VTL_CPU_CONTEXT_rdi(%rax), %rdi
	mov MSHV_VTL_CPU_CONTEXT_r8(%rax), %r8
	mov MSHV_VTL_CPU_CONTEXT_r9(%rax), %r9
	mov MSHV_VTL_CPU_CONTEXT_r10(%rax), %r10
	mov MSHV_VTL_CPU_CONTEXT_r11(%rax), %r11
	mov MSHV_VTL_CPU_CONTEXT_r12(%rax), %r12
	mov MSHV_VTL_CPU_CONTEXT_r13(%rax), %r13
	mov MSHV_VTL_CPU_CONTEXT_r14(%rax), %r14
	mov MSHV_VTL_CPU_CONTEXT_r15(%rax), %r15

	mov MSHV_VTL_CPU_CONTEXT_cr2(%rax), %rdx
	mov %rdx, %cr2
	mov MSHV_VTL_CPU_CONTEXT_rdx(%rax), %rdx

	/* stash host registers on stack */
	pushq %rax
	pushq %rcx

	xor %ecx, %ecx

	/* make a hypercall to switch VTL */
	call STATIC_CALL_TRAMP_STR(__mshv_vtl_return_hypercall)

	/* stash guest registers on stack, restore saved host copies */
	pushq %rax
	pushq %rcx
	mov 16(%rsp), %rcx
	mov 24(%rsp), %rax

	mov %rdx, MSHV_VTL_CPU_CONTEXT_rdx(%rax)
	mov %cr2, %rdx
	mov %rdx, MSHV_VTL_CPU_CONTEXT_cr2(%rax)
	pop MSHV_VTL_CPU_CONTEXT_rcx(%rax)
	pop MSHV_VTL_CPU_CONTEXT_rax(%rax)
	add $16, %rsp

	/* save rbx/rbp/rsi/rdi/r8-r15 */
	mov %rbx, MSHV_VTL_CPU_CONTEXT_rbx(%rax)
	mov %rbp, MSHV_VTL_CPU_CONTEXT_rbp(%rax)
	mov %rsi, MSHV_VTL_CPU_CONTEXT_rsi(%rax)
	mov %rdi, MSHV_VTL_CPU_CONTEXT_rdi(%rax)
	mov %r8,  MSHV_VTL_CPU_CONTEXT_r8(%rax)
	mov %r9,  MSHV_VTL_CPU_CONTEXT_r9(%rax)
	mov %r10, MSHV_VTL_CPU_CONTEXT_r10(%rax)
	mov %r11, MSHV_VTL_CPU_CONTEXT_r11(%rax)
	mov %r12, MSHV_VTL_CPU_CONTEXT_r12(%rax)
	mov %r13, MSHV_VTL_CPU_CONTEXT_r13(%rax)
	mov %r14, MSHV_VTL_CPU_CONTEXT_r14(%rax)
	mov %r15, MSHV_VTL_CPU_CONTEXT_r15(%rax)

	/* pop callee-save registers r12-r15, rbx */
	pop %rbx
	pop %r15
	pop %r14
	pop %r13
	pop %r12

	pop %rbp
	RET
SYM_FUNC_END(__mshv_vtl_return_call)
/*
 * Make sure that static_call_key symbol: __SCK____mshv_vtl_return_hypercall is accessible here.
 * Below code is inspired from __ADDRESSABLE(sym) macro. Symbol name is kept simple, to avoid
 * naming it something like "__UNIQUE_ID_addressable___SCK____mshv_vtl_return_hypercall_662.0"
 * which would otherwise have been generated by the macro.
 */
	.section	.discard.addressable,"aw"
	.align 8
	.type	mshv_vtl_return_sym, @object
	.size	mshv_vtl_return_sym, 8
mshv_vtl_return_sym:
	.quad	__SCK____mshv_vtl_return_hypercall