summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
blob: e0004bd265360dec3175c4ce00f5d6148c13a22c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2020, Google LLC.
 *
 * Tests for KVM paravirtual feature disablement
 */
#include <asm/kvm_para.h>
#include <linux/kvm_para.h>
#include <linux/stringify.h>
#include <stdint.h>

#include "apic.h"
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"

/* VMCALL and VMMCALL are both 3-byte opcodes. */
#define HYPERCALL_INSN_SIZE	3

static bool ud_expected;

static void guest_ud_handler(struct ex_regs *regs)
{
	GUEST_ASSERT(ud_expected);
	GUEST_DONE();
}

extern uint8_t svm_hypercall_insn[HYPERCALL_INSN_SIZE];
static uint64_t svm_do_sched_yield(uint8_t apic_id)
{
	uint64_t ret;

	asm volatile("mov %1, %%rax\n\t"
		     "mov %2, %%rbx\n\t"
		     "svm_hypercall_insn:\n\t"
		     "vmmcall\n\t"
		     "mov %%rax, %0\n\t"
		     : "=r"(ret)
		     : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id)
		     : "rax", "rbx", "memory");

	return ret;
}

extern uint8_t vmx_hypercall_insn[HYPERCALL_INSN_SIZE];
static uint64_t vmx_do_sched_yield(uint8_t apic_id)
{
	uint64_t ret;

	asm volatile("mov %1, %%rax\n\t"
		     "mov %2, %%rbx\n\t"
		     "vmx_hypercall_insn:\n\t"
		     "vmcall\n\t"
		     "mov %%rax, %0\n\t"
		     : "=r"(ret)
		     : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id)
		     : "rax", "rbx", "memory");

	return ret;
}

static void guest_main(void)
{
	uint8_t *native_hypercall_insn, *hypercall_insn;
	uint8_t apic_id;

	apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID));

	if (is_intel_cpu()) {
		native_hypercall_insn = vmx_hypercall_insn;
		hypercall_insn = svm_hypercall_insn;
		svm_do_sched_yield(apic_id);
	} else if (is_amd_cpu()) {
		native_hypercall_insn = svm_hypercall_insn;
		hypercall_insn = vmx_hypercall_insn;
		vmx_do_sched_yield(apic_id);
	} else {
		GUEST_ASSERT(0);
		/* unreachable */
		return;
	}

	/*
	 * The hypercall didn't #UD (guest_ud_handler() signals "done" if a #UD
	 * occurs).  Verify that a #UD is NOT expected and that KVM patched in
	 * the native hypercall.
	 */
	GUEST_ASSERT(!ud_expected);
	GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn, HYPERCALL_INSN_SIZE));
	GUEST_DONE();
}

static void setup_ud_vector(struct kvm_vcpu *vcpu)
{
	vm_init_descriptor_tables(vcpu->vm);
	vcpu_init_descriptor_tables(vcpu);
	vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler);
}

static void enter_guest(struct kvm_vcpu *vcpu)
{
	struct kvm_run *run = vcpu->run;
	struct ucall uc;

	vcpu_run(vcpu);
	switch (get_ucall(vcpu, &uc)) {
	case UCALL_SYNC:
		pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]);
		break;
	case UCALL_DONE:
		return;
	case UCALL_ABORT:
		REPORT_GUEST_ASSERT(uc);
	default:
		TEST_FAIL("Unhandled ucall: %ld\nexit_reason: %u (%s)",
			  uc.cmd, run->exit_reason, exit_reason_str(run->exit_reason));
	}
}

static void test_fix_hypercall(void)
{
	struct kvm_vcpu *vcpu;
	struct kvm_vm *vm;

	vm = vm_create_with_one_vcpu(&vcpu, guest_main);
	setup_ud_vector(vcpu);

	ud_expected = false;
	sync_global_to_guest(vm, ud_expected);

	virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);

	enter_guest(vcpu);
}

static void test_fix_hypercall_disabled(void)
{
	struct kvm_vcpu *vcpu;
	struct kvm_vm *vm;

	vm = vm_create_with_one_vcpu(&vcpu, guest_main);
	setup_ud_vector(vcpu);

	vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2,
		      KVM_X86_QUIRK_FIX_HYPERCALL_INSN);

	ud_expected = true;
	sync_global_to_guest(vm, ud_expected);

	virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);

	enter_guest(vcpu);
}

int main(void)
{
	TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN);

	test_fix_hypercall();
	test_fix_hypercall_disabled();
}