diff options
Diffstat (limited to 'tools/testing/selftests/kvm/lib/x86_64')
-rw-r--r-- | tools/testing/selftests/kvm/lib/x86_64/apic.c | 43 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/x86_64/handlers.S | 81 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/x86_64/hyperv.c | 46 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/x86_64/memstress.c | 112 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/x86_64/pmu.c | 31 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/x86_64/processor.c | 1358 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/x86_64/sev.c | 114 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/x86_64/svm.c | 164 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/x86_64/ucall.c | 56 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/x86_64/vmx.c | 554 |
10 files changed, 0 insertions, 2559 deletions
diff --git a/tools/testing/selftests/kvm/lib/x86_64/apic.c b/tools/testing/selftests/kvm/lib/x86_64/apic.c deleted file mode 100644 index 89153a333e83..000000000000 --- a/tools/testing/selftests/kvm/lib/x86_64/apic.c +++ /dev/null @@ -1,43 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2021, Google LLC. - */ - -#include "apic.h" - -void apic_disable(void) -{ - wrmsr(MSR_IA32_APICBASE, - rdmsr(MSR_IA32_APICBASE) & - ~(MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD)); -} - -void xapic_enable(void) -{ - uint64_t val = rdmsr(MSR_IA32_APICBASE); - - /* Per SDM: to enable xAPIC when in x2APIC must first disable APIC */ - if (val & MSR_IA32_APICBASE_EXTD) { - apic_disable(); - wrmsr(MSR_IA32_APICBASE, - rdmsr(MSR_IA32_APICBASE) | MSR_IA32_APICBASE_ENABLE); - } else if (!(val & MSR_IA32_APICBASE_ENABLE)) { - wrmsr(MSR_IA32_APICBASE, val | MSR_IA32_APICBASE_ENABLE); - } - - /* - * Per SDM: reset value of spurious interrupt vector register has the - * APIC software enabled bit=0. It must be enabled in addition to the - * enable bit in the MSR. - */ - val = xapic_read_reg(APIC_SPIV) | APIC_SPIV_APIC_ENABLED; - xapic_write_reg(APIC_SPIV, val); -} - -void x2apic_enable(void) -{ - wrmsr(MSR_IA32_APICBASE, rdmsr(MSR_IA32_APICBASE) | - MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD); - x2apic_write_reg(APIC_SPIV, - x2apic_read_reg(APIC_SPIV) | APIC_SPIV_APIC_ENABLED); -} diff --git a/tools/testing/selftests/kvm/lib/x86_64/handlers.S b/tools/testing/selftests/kvm/lib/x86_64/handlers.S deleted file mode 100644 index 7629819734af..000000000000 --- a/tools/testing/selftests/kvm/lib/x86_64/handlers.S +++ /dev/null @@ -1,81 +0,0 @@ -handle_exception: - push %r15 - push %r14 - push %r13 - push %r12 - push %r11 - push %r10 - push %r9 - push %r8 - - push %rdi - push %rsi - push %rbp - push %rbx - push %rdx - push %rcx - push %rax - mov %rsp, %rdi - - call route_exception - - pop %rax - pop %rcx - pop %rdx - pop %rbx - pop %rbp - pop %rsi - pop %rdi - pop %r8 - pop %r9 - pop %r10 - pop %r11 - pop %r12 - pop %r13 - pop %r14 - pop %r15 - - /* Discard vector and error code. */ - add $16, %rsp - iretq - -/* - * Build the handle_exception wrappers which push the vector/error code on the - * stack and an array of pointers to those wrappers. - */ -.pushsection .rodata -.globl idt_handlers -idt_handlers: -.popsection - -.macro HANDLERS has_error from to - vector = \from - .rept \to - \from + 1 - .align 8 - - /* Fetch current address and append it to idt_handlers. */ -666 : -.pushsection .rodata - .quad 666b -.popsection - - .if ! \has_error - pushq $0 - .endif - pushq $vector - jmp handle_exception - vector = vector + 1 - .endr -.endm - -.global idt_handler_code -idt_handler_code: - HANDLERS has_error=0 from=0 to=7 - HANDLERS has_error=1 from=8 to=8 - HANDLERS has_error=0 from=9 to=9 - HANDLERS has_error=1 from=10 to=14 - HANDLERS has_error=0 from=15 to=16 - HANDLERS has_error=1 from=17 to=17 - HANDLERS has_error=0 from=18 to=255 - -.section .note.GNU-stack, "", %progbits diff --git a/tools/testing/selftests/kvm/lib/x86_64/hyperv.c b/tools/testing/selftests/kvm/lib/x86_64/hyperv.c deleted file mode 100644 index efb7e7a1354d..000000000000 --- a/tools/testing/selftests/kvm/lib/x86_64/hyperv.c +++ /dev/null @@ -1,46 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Hyper-V specific functions. - * - * Copyright (C) 2021, Red Hat Inc. - */ -#include <stdint.h> -#include "processor.h" -#include "hyperv.h" - -struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, - vm_vaddr_t *p_hv_pages_gva) -{ - vm_vaddr_t hv_pages_gva = vm_vaddr_alloc_page(vm); - struct hyperv_test_pages *hv = addr_gva2hva(vm, hv_pages_gva); - - /* Setup of a region of guest memory for the VP Assist page. */ - hv->vp_assist = (void *)vm_vaddr_alloc_page(vm); - hv->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->vp_assist); - hv->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->vp_assist); - - /* Setup of a region of guest memory for the partition assist page. */ - hv->partition_assist = (void *)vm_vaddr_alloc_page(vm); - hv->partition_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->partition_assist); - hv->partition_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->partition_assist); - - /* Setup of a region of guest memory for the enlightened VMCS. */ - hv->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm); - hv->enlightened_vmcs_hva = addr_gva2hva(vm, (uintptr_t)hv->enlightened_vmcs); - hv->enlightened_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)hv->enlightened_vmcs); - - *p_hv_pages_gva = hv_pages_gva; - return hv; -} - -int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) -{ - uint64_t val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | - HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; - - wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val); - - current_vp_assist = vp_assist; - - return 0; -} diff --git a/tools/testing/selftests/kvm/lib/x86_64/memstress.c b/tools/testing/selftests/kvm/lib/x86_64/memstress.c deleted file mode 100644 index d61e623afc8c..000000000000 --- a/tools/testing/selftests/kvm/lib/x86_64/memstress.c +++ /dev/null @@ -1,112 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * x86_64-specific extensions to memstress.c. - * - * Copyright (C) 2022, Google, Inc. - */ -#include <stdio.h> -#include <stdlib.h> -#include <linux/bitmap.h> -#include <linux/bitops.h> - -#include "test_util.h" -#include "kvm_util.h" -#include "memstress.h" -#include "processor.h" -#include "vmx.h" - -void memstress_l2_guest_code(uint64_t vcpu_id) -{ - memstress_guest_code(vcpu_id); - vmcall(); -} - -extern char memstress_l2_guest_entry[]; -__asm__( -"memstress_l2_guest_entry:" -" mov (%rsp), %rdi;" -" call memstress_l2_guest_code;" -" ud2;" -); - -static void memstress_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id) -{ -#define L2_GUEST_STACK_SIZE 64 - unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; - unsigned long *rsp; - - GUEST_ASSERT(vmx->vmcs_gpa); - GUEST_ASSERT(prepare_for_vmx_operation(vmx)); - GUEST_ASSERT(load_vmcs(vmx)); - GUEST_ASSERT(ept_1g_pages_supported()); - - rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1]; - *rsp = vcpu_id; - prepare_vmcs(vmx, memstress_l2_guest_entry, rsp); - - GUEST_ASSERT(!vmlaunch()); - GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); - GUEST_DONE(); -} - -uint64_t memstress_nested_pages(int nr_vcpus) -{ - /* - * 513 page tables is enough to identity-map 256 TiB of L2 with 1G - * pages and 4-level paging, plus a few pages per-vCPU for data - * structures such as the VMCS. - */ - return 513 + 10 * nr_vcpus; -} - -void memstress_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm) -{ - uint64_t start, end; - - prepare_eptp(vmx, vm, 0); - - /* - * Identity map the first 4G and the test region with 1G pages so that - * KVM can shadow the EPT12 with the maximum huge page size supported - * by the backing source. - */ - nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL); - - start = align_down(memstress_args.gpa, PG_SIZE_1G); - end = align_up(memstress_args.gpa + memstress_args.size, PG_SIZE_1G); - nested_identity_map_1g(vmx, vm, start, end - start); -} - -void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]) -{ - struct vmx_pages *vmx, *vmx0 = NULL; - struct kvm_regs regs; - vm_vaddr_t vmx_gva; - int vcpu_id; - - TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX)); - TEST_REQUIRE(kvm_cpu_has_ept()); - - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { - vmx = vcpu_alloc_vmx(vm, &vmx_gva); - - if (vcpu_id == 0) { - memstress_setup_ept(vmx, vm); - vmx0 = vmx; - } else { - /* Share the same EPT table across all vCPUs. */ - vmx->eptp = vmx0->eptp; - vmx->eptp_hva = vmx0->eptp_hva; - vmx->eptp_gpa = vmx0->eptp_gpa; - } - - /* - * Override the vCPU to run memstress_l1_guest_code() which will - * bounce it into L2 before calling memstress_guest_code(). - */ - vcpu_regs_get(vcpus[vcpu_id], ®s); - regs.rip = (unsigned long) memstress_l1_guest_code; - vcpu_regs_set(vcpus[vcpu_id], ®s); - vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id); - } -} diff --git a/tools/testing/selftests/kvm/lib/x86_64/pmu.c b/tools/testing/selftests/kvm/lib/x86_64/pmu.c deleted file mode 100644 index f31f0427c17c..000000000000 --- a/tools/testing/selftests/kvm/lib/x86_64/pmu.c +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2023, Tencent, Inc. - */ - -#include <stdint.h> - -#include <linux/kernel.h> - -#include "kvm_util.h" -#include "pmu.h" - -const uint64_t intel_pmu_arch_events[] = { - INTEL_ARCH_CPU_CYCLES, - INTEL_ARCH_INSTRUCTIONS_RETIRED, - INTEL_ARCH_REFERENCE_CYCLES, - INTEL_ARCH_LLC_REFERENCES, - INTEL_ARCH_LLC_MISSES, - INTEL_ARCH_BRANCHES_RETIRED, - INTEL_ARCH_BRANCHES_MISPREDICTED, - INTEL_ARCH_TOPDOWN_SLOTS, -}; -kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS); - -const uint64_t amd_pmu_zen_events[] = { - AMD_ZEN_CORE_CYCLES, - AMD_ZEN_INSTRUCTIONS_RETIRED, - AMD_ZEN_BRANCHES_RETIRED, - AMD_ZEN_BRANCHES_MISPREDICTED, -}; -kvm_static_assert(ARRAY_SIZE(amd_pmu_zen_events) == NR_AMD_ZEN_EVENTS); diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c deleted file mode 100644 index 74a4c736c9ae..000000000000 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ /dev/null @@ -1,1358 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * tools/testing/selftests/kvm/lib/x86_64/processor.c - * - * Copyright (C) 2018, Google LLC. - */ - -#include "linux/bitmap.h" -#include "test_util.h" -#include "kvm_util.h" -#include "processor.h" -#include "sev.h" - -#ifndef NUM_INTERRUPTS -#define NUM_INTERRUPTS 256 -#endif - -#define DEFAULT_CODE_SELECTOR 0x8 -#define DEFAULT_DATA_SELECTOR 0x10 - -#define MAX_NR_CPUID_ENTRIES 100 - -vm_vaddr_t exception_handlers; -bool host_cpu_is_amd; -bool host_cpu_is_intel; - -static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent) -{ - fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx " - "rcx: 0x%.16llx rdx: 0x%.16llx\n", - indent, "", - regs->rax, regs->rbx, regs->rcx, regs->rdx); - fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx " - "rsp: 0x%.16llx rbp: 0x%.16llx\n", - indent, "", - regs->rsi, regs->rdi, regs->rsp, regs->rbp); - fprintf(stream, "%*sr8: 0x%.16llx r9: 0x%.16llx " - "r10: 0x%.16llx r11: 0x%.16llx\n", - indent, "", - regs->r8, regs->r9, regs->r10, regs->r11); - fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx " - "r14: 0x%.16llx r15: 0x%.16llx\n", - indent, "", - regs->r12, regs->r13, regs->r14, regs->r15); - fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n", - indent, "", - regs->rip, regs->rflags); -} - -static void segment_dump(FILE *stream, struct kvm_segment *segment, - uint8_t indent) -{ - fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x " - "selector: 0x%.4x type: 0x%.2x\n", - indent, "", segment->base, segment->limit, - segment->selector, segment->type); - fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x " - "db: 0x%.2x s: 0x%.2x l: 0x%.2x\n", - indent, "", segment->present, segment->dpl, - segment->db, segment->s, segment->l); - fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x " - "unusable: 0x%.2x padding: 0x%.2x\n", - indent, "", segment->g, segment->avl, - segment->unusable, segment->padding); -} - -static void dtable_dump(FILE *stream, struct kvm_dtable *dtable, - uint8_t indent) -{ - fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x " - "padding: 0x%.4x 0x%.4x 0x%.4x\n", - indent, "", dtable->base, dtable->limit, - dtable->padding[0], dtable->padding[1], dtable->padding[2]); -} - -static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent) -{ - unsigned int i; - - fprintf(stream, "%*scs:\n", indent, ""); - segment_dump(stream, &sregs->cs, indent + 2); - fprintf(stream, "%*sds:\n", indent, ""); - segment_dump(stream, &sregs->ds, indent + 2); - fprintf(stream, "%*ses:\n", indent, ""); - segment_dump(stream, &sregs->es, indent + 2); - fprintf(stream, "%*sfs:\n", indent, ""); - segment_dump(stream, &sregs->fs, indent + 2); - fprintf(stream, "%*sgs:\n", indent, ""); - segment_dump(stream, &sregs->gs, indent + 2); - fprintf(stream, "%*sss:\n", indent, ""); - segment_dump(stream, &sregs->ss, indent + 2); - fprintf(stream, "%*str:\n", indent, ""); - segment_dump(stream, &sregs->tr, indent + 2); - fprintf(stream, "%*sldt:\n", indent, ""); - segment_dump(stream, &sregs->ldt, indent + 2); - - fprintf(stream, "%*sgdt:\n", indent, ""); - dtable_dump(stream, &sregs->gdt, indent + 2); - fprintf(stream, "%*sidt:\n", indent, ""); - dtable_dump(stream, &sregs->idt, indent + 2); - - fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx " - "cr3: 0x%.16llx cr4: 0x%.16llx\n", - indent, "", - sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4); - fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx " - "apic_base: 0x%.16llx\n", - indent, "", - sregs->cr8, sregs->efer, sregs->apic_base); - - fprintf(stream, "%*sinterrupt_bitmap:\n", indent, ""); - for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) { - fprintf(stream, "%*s%.16llx\n", indent + 2, "", - sregs->interrupt_bitmap[i]); - } -} - -bool kvm_is_tdp_enabled(void) -{ - if (host_cpu_is_intel) - return get_kvm_intel_param_bool("ept"); - else - return get_kvm_amd_param_bool("npt"); -} - -void virt_arch_pgd_alloc(struct kvm_vm *vm) -{ - TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " - "unknown or unsupported guest mode, mode: 0x%x", vm->mode); - - /* If needed, create page map l4 table. */ - if (!vm->pgd_created) { - vm->pgd = vm_alloc_page_table(vm); - vm->pgd_created = true; - } -} - -static void *virt_get_pte(struct kvm_vm *vm, uint64_t *parent_pte, - uint64_t vaddr, int level) -{ - uint64_t pt_gpa = PTE_GET_PA(*parent_pte); - uint64_t *page_table = addr_gpa2hva(vm, pt_gpa); - int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu; - - TEST_ASSERT((*parent_pte & PTE_PRESENT_MASK) || parent_pte == &vm->pgd, - "Parent PTE (level %d) not PRESENT for gva: 0x%08lx", - level + 1, vaddr); - - return &page_table[index]; -} - -static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, - uint64_t *parent_pte, - uint64_t vaddr, - uint64_t paddr, - int current_level, - int target_level) -{ - uint64_t *pte = virt_get_pte(vm, parent_pte, vaddr, current_level); - - paddr = vm_untag_gpa(vm, paddr); - - if (!(*pte & PTE_PRESENT_MASK)) { - *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK; - if (current_level == target_level) - *pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK); - else - *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK; - } else { - /* - * Entry already present. Assert that the caller doesn't want - * a hugepage at this level, and that there isn't a hugepage at - * this level. - */ - TEST_ASSERT(current_level != target_level, - "Cannot create hugepage at level: %u, vaddr: 0x%lx", - current_level, vaddr); - TEST_ASSERT(!(*pte & PTE_LARGE_MASK), - "Cannot create page table at level: %u, vaddr: 0x%lx", - current_level, vaddr); - } - return pte; -} - -void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level) -{ - const uint64_t pg_size = PG_LEVEL_SIZE(level); - uint64_t *pml4e, *pdpe, *pde; - uint64_t *pte; - - TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, - "Unknown or unsupported guest mode, mode: 0x%x", vm->mode); - - TEST_ASSERT((vaddr % pg_size) == 0, - "Virtual address not aligned,\n" - "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size); - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)), - "Invalid virtual address, vaddr: 0x%lx", vaddr); - TEST_ASSERT((paddr % pg_size) == 0, - "Physical address not aligned,\n" - " paddr: 0x%lx page size: 0x%lx", paddr, pg_size); - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, - "Physical address beyond maximum supported,\n" - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", - paddr, vm->max_gfn, vm->page_size); - TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr, - "Unexpected bits in paddr: %lx", paddr); - - /* - * Allocate upper level page tables, if not already present. Return - * early if a hugepage was created. - */ - pml4e = virt_create_upper_pte(vm, &vm->pgd, vaddr, paddr, PG_LEVEL_512G, level); - if (*pml4e & PTE_LARGE_MASK) - return; - - pdpe = virt_create_upper_pte(vm, pml4e, vaddr, paddr, PG_LEVEL_1G, level); - if (*pdpe & PTE_LARGE_MASK) - return; - - pde = virt_create_upper_pte(vm, pdpe, vaddr, paddr, PG_LEVEL_2M, level); - if (*pde & PTE_LARGE_MASK) - return; - - /* Fill in page table entry. */ - pte = virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K); - TEST_ASSERT(!(*pte & PTE_PRESENT_MASK), - "PTE already present for 4k page at vaddr: 0x%lx", vaddr); - *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK); - - /* - * Neither SEV nor TDX supports shared page tables, so only the final - * leaf PTE needs manually set the C/S-bit. - */ - if (vm_is_gpa_protected(vm, paddr)) - *pte |= vm->arch.c_bit; - else - *pte |= vm->arch.s_bit; -} - -void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) -{ - __virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K); -} - -void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, - uint64_t nr_bytes, int level) -{ - uint64_t pg_size = PG_LEVEL_SIZE(level); - uint64_t nr_pages = nr_bytes / pg_size; - int i; - - TEST_ASSERT(nr_bytes % pg_size == 0, - "Region size not aligned: nr_bytes: 0x%lx, page size: 0x%lx", - nr_bytes, pg_size); - - for (i = 0; i < nr_pages; i++) { - __virt_pg_map(vm, vaddr, paddr, level); - - vaddr += pg_size; - paddr += pg_size; - } -} - -static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level) -{ - if (*pte & PTE_LARGE_MASK) { - TEST_ASSERT(*level == PG_LEVEL_NONE || - *level == current_level, - "Unexpected hugepage at level %d", current_level); - *level = current_level; - } - - return *level == current_level; -} - -uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr, - int *level) -{ - uint64_t *pml4e, *pdpe, *pde; - - TEST_ASSERT(!vm->arch.is_pt_protected, - "Walking page tables of protected guests is impossible"); - - TEST_ASSERT(*level >= PG_LEVEL_NONE && *level < PG_LEVEL_NUM, - "Invalid PG_LEVEL_* '%d'", *level); - - TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " - "unknown or unsupported guest mode, mode: 0x%x", vm->mode); - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, - (vaddr >> vm->page_shift)), - "Invalid virtual address, vaddr: 0x%lx", - vaddr); - /* - * Based on the mode check above there are 48 bits in the vaddr, so - * shift 16 to sign extend the last bit (bit-47), - */ - TEST_ASSERT(vaddr == (((int64_t)vaddr << 16) >> 16), - "Canonical check failed. The virtual address is invalid."); - - pml4e = virt_get_pte(vm, &vm->pgd, vaddr, PG_LEVEL_512G); - if (vm_is_target_pte(pml4e, level, PG_LEVEL_512G)) - return pml4e; - - pdpe = virt_get_pte(vm, pml4e, vaddr, PG_LEVEL_1G); - if (vm_is_target_pte(pdpe, level, PG_LEVEL_1G)) - return pdpe; - - pde = virt_get_pte(vm, pdpe, vaddr, PG_LEVEL_2M); - if (vm_is_target_pte(pde, level, PG_LEVEL_2M)) - return pde; - - return virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K); -} - -uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr) -{ - int level = PG_LEVEL_4K; - - return __vm_get_page_table_entry(vm, vaddr, &level); -} - -void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) -{ - uint64_t *pml4e, *pml4e_start; - uint64_t *pdpe, *pdpe_start; - uint64_t *pde, *pde_start; - uint64_t *pte, *pte_start; - - if (!vm->pgd_created) - return; - - fprintf(stream, "%*s " - " no\n", indent, ""); - fprintf(stream, "%*s index hvaddr gpaddr " - "addr w exec dirty\n", - indent, ""); - pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->pgd); - for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) { - pml4e = &pml4e_start[n1]; - if (!(*pml4e & PTE_PRESENT_MASK)) - continue; - fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u " - " %u\n", - indent, "", - pml4e - pml4e_start, pml4e, - addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e), - !!(*pml4e & PTE_WRITABLE_MASK), !!(*pml4e & PTE_NX_MASK)); - - pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK); - for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) { - pdpe = &pdpe_start[n2]; - if (!(*pdpe & PTE_PRESENT_MASK)) - continue; - fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10llx " - "%u %u\n", - indent, "", - pdpe - pdpe_start, pdpe, - addr_hva2gpa(vm, pdpe), - PTE_GET_PFN(*pdpe), !!(*pdpe & PTE_WRITABLE_MASK), - !!(*pdpe & PTE_NX_MASK)); - - pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK); - for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) { - pde = &pde_start[n3]; - if (!(*pde & PTE_PRESENT_MASK)) - continue; - fprintf(stream, "%*spde 0x%-3zx %p " - "0x%-12lx 0x%-10llx %u %u\n", - indent, "", pde - pde_start, pde, - addr_hva2gpa(vm, pde), - PTE_GET_PFN(*pde), !!(*pde & PTE_WRITABLE_MASK), - !!(*pde & PTE_NX_MASK)); - - pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK); - for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) { - pte = &pte_start[n4]; - if (!(*pte & PTE_PRESENT_MASK)) - continue; - fprintf(stream, "%*spte 0x%-3zx %p " - "0x%-12lx 0x%-10llx %u %u " - " %u 0x%-10lx\n", - indent, "", - pte - pte_start, pte, - addr_hva2gpa(vm, pte), - PTE_GET_PFN(*pte), - !!(*pte & PTE_WRITABLE_MASK), - !!(*pte & PTE_NX_MASK), - !!(*pte & PTE_DIRTY_MASK), - ((uint64_t) n1 << 27) - | ((uint64_t) n2 << 18) - | ((uint64_t) n3 << 9) - | ((uint64_t) n4)); - } - } - } - } -} - -/* - * Set Unusable Segment - * - * Input Args: None - * - * Output Args: - * segp - Pointer to segment register - * - * Return: None - * - * Sets the segment register pointed to by @segp to an unusable state. - */ -static void kvm_seg_set_unusable(struct kvm_segment *segp) -{ - memset(segp, 0, sizeof(*segp)); - segp->unusable = true; -} - -static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp) -{ - void *gdt = addr_gva2hva(vm, vm->gdt); - struct desc64 *desc = gdt + (segp->selector >> 3) * 8; - - desc->limit0 = segp->limit & 0xFFFF; - desc->base0 = segp->base & 0xFFFF; - desc->base1 = segp->base >> 16; - desc->type = segp->type; - desc->s = segp->s; - desc->dpl = segp->dpl; - desc->p = segp->present; - desc->limit1 = segp->limit >> 16; - desc->avl = segp->avl; - desc->l = segp->l; - desc->db = segp->db; - desc->g = segp->g; - desc->base2 = segp->base >> 24; - if (!segp->s) - desc->base3 = segp->base >> 32; -} - - -/* - * Set Long Mode Flat Kernel Code Segment - * - * Input Args: - * vm - VM whose GDT is being filled, or NULL to only write segp - * selector - selector value - * - * Output Args: - * segp - Pointer to KVM segment - * - * Return: None - * - * Sets up the KVM segment pointed to by @segp, to be a code segment - * with the selector value given by @selector. - */ -static void kvm_seg_set_kernel_code_64bit(struct kvm_vm *vm, uint16_t selector, - struct kvm_segment *segp) -{ - memset(segp, 0, sizeof(*segp)); - segp->selector = selector; - segp->limit = 0xFFFFFFFFu; - segp->s = 0x1; /* kTypeCodeData */ - segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed - * | kFlagCodeReadable - */ - segp->g = true; - segp->l = true; - segp->present = 1; - if (vm) - kvm_seg_fill_gdt_64bit(vm, segp); -} - -/* - * Set Long Mode Flat Kernel Data Segment - * - * Input Args: - * vm - VM whose GDT is being filled, or NULL to only write segp - * selector - selector value - * - * Output Args: - * segp - Pointer to KVM segment - * - * Return: None - * - * Sets up the KVM segment pointed to by @segp, to be a data segment - * with the selector value given by @selector. - */ -static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector, - struct kvm_segment *segp) -{ - memset(segp, 0, sizeof(*segp)); - segp->selector = selector; - segp->limit = 0xFFFFFFFFu; - segp->s = 0x1; /* kTypeCodeData */ - segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed - * | kFlagDataWritable - */ - segp->g = true; - segp->present = true; - if (vm) - kvm_seg_fill_gdt_64bit(vm, segp); -} - -vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) -{ - int level = PG_LEVEL_NONE; - uint64_t *pte = __vm_get_page_table_entry(vm, gva, &level); - - TEST_ASSERT(*pte & PTE_PRESENT_MASK, - "Leaf PTE not PRESENT for gva: 0x%08lx", gva); - - /* - * No need for a hugepage mask on the PTE, x86-64 requires the "unused" - * address bits to be zero. - */ - return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level)); -} - -static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt) -{ - if (!vm->gdt) - vm->gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); - - dt->base = vm->gdt; - dt->limit = getpagesize(); -} - -static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp, - int selector) -{ - if (!vm->tss) - vm->tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); - - memset(segp, 0, sizeof(*segp)); - segp->base = vm->tss; - segp->limit = 0x67; - segp->selector = selector; - segp->type = 0xb; - segp->present = 1; - kvm_seg_fill_gdt_64bit(vm, segp); -} - -static void vcpu_setup(struct kvm_vm *vm, struct kvm_vcpu *vcpu) -{ - struct kvm_sregs sregs; - - /* Set mode specific system register values. */ - vcpu_sregs_get(vcpu, &sregs); - - sregs.idt.limit = 0; - - kvm_setup_gdt(vm, &sregs.gdt); - - switch (vm->mode) { - case VM_MODE_PXXV48_4K: - sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG; - sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR; - sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX); - - kvm_seg_set_unusable(&sregs.ldt); - kvm_seg_set_kernel_code_64bit(vm, DEFAULT_CODE_SELECTOR, &sregs.cs); - kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.ds); - kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.es); - kvm_setup_tss_64bit(vm, &sregs.tr, 0x18); - break; - - default: - TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); - } - - sregs.cr3 = vm->pgd; - vcpu_sregs_set(vcpu, &sregs); -} - -void kvm_arch_vm_post_create(struct kvm_vm *vm) -{ - vm_create_irqchip(vm); - sync_global_to_guest(vm, host_cpu_is_intel); - sync_global_to_guest(vm, host_cpu_is_amd); - - if (vm->subtype == VM_SUBTYPE_SEV) - sev_vm_init(vm); - else if (vm->subtype == VM_SUBTYPE_SEV_ES) - sev_es_vm_init(vm); -} - -void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) -{ - struct kvm_regs regs; - - vcpu_regs_get(vcpu, ®s); - regs.rip = (unsigned long) guest_code; - vcpu_regs_set(vcpu, ®s); -} - -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) -{ - struct kvm_mp_state mp_state; - struct kvm_regs regs; - vm_vaddr_t stack_vaddr; - struct kvm_vcpu *vcpu; - - stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), - DEFAULT_GUEST_STACK_VADDR_MIN, - MEM_REGION_DATA); - - stack_vaddr += DEFAULT_STACK_PGS * getpagesize(); - - /* - * Align stack to match calling sequence requirements in section "The - * Stack Frame" of the System V ABI AMD64 Architecture Processor - * Supplement, which requires the value (%rsp + 8) to be a multiple of - * 16 when control is transferred to the function entry point. - * - * If this code is ever used to launch a vCPU with 32-bit entry point it - * may need to subtract 4 bytes instead of 8 bytes. - */ - TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE), - "__vm_vaddr_alloc() did not provide a page-aligned address"); - stack_vaddr -= 8; - - vcpu = __vm_vcpu_add(vm, vcpu_id); - vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid()); - vcpu_setup(vm, vcpu); - - /* Setup guest general purpose registers */ - vcpu_regs_get(vcpu, ®s); - regs.rflags = regs.rflags | 0x2; - regs.rsp = stack_vaddr; - vcpu_regs_set(vcpu, ®s); - - /* Setup the MP state */ - mp_state.mp_state = 0; - vcpu_mp_state_set(vcpu, &mp_state); - - return vcpu; -} - -struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id) -{ - struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); - - vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid()); - - return vcpu; -} - -void vcpu_arch_free(struct kvm_vcpu *vcpu) -{ - if (vcpu->cpuid) - free(vcpu->cpuid); -} - -/* Do not use kvm_supported_cpuid directly except for validity checks. */ -static void *kvm_supported_cpuid; - -const struct kvm_cpuid2 *kvm_get_supported_cpuid(void) -{ - int kvm_fd; - - if (kvm_supported_cpuid) - return kvm_supported_cpuid; - - kvm_supported_cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES); - kvm_fd = open_kvm_dev_path_or_exit(); - - kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, - (struct kvm_cpuid2 *)kvm_supported_cpuid); - - close(kvm_fd); - return kvm_supported_cpuid; -} - -static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid, - uint32_t function, uint32_t index, - uint8_t reg, uint8_t lo, uint8_t hi) -{ - const struct kvm_cpuid_entry2 *entry; - int i; - - for (i = 0; i < cpuid->nent; i++) { - entry = &cpuid->entries[i]; - - /* - * The output registers in kvm_cpuid_entry2 are in alphabetical - * order, but kvm_x86_cpu_feature matches that mess, so yay - * pointer shenanigans! - */ - if (entry->function == function && entry->index == index) - return ((&entry->eax)[reg] & GENMASK(hi, lo)) >> lo; - } - - return 0; -} - -bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid, - struct kvm_x86_cpu_feature feature) -{ - return __kvm_cpu_has(cpuid, feature.function, feature.index, - feature.reg, feature.bit, feature.bit); -} - -uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, - struct kvm_x86_cpu_property property) -{ - return __kvm_cpu_has(cpuid, property.function, property.index, - property.reg, property.lo_bit, property.hi_bit); -} - -uint64_t kvm_get_feature_msr(uint64_t msr_index) -{ - struct { - struct kvm_msrs header; - struct kvm_msr_entry entry; - } buffer = {}; - int r, kvm_fd; - - buffer.header.nmsrs = 1; - buffer.entry.index = msr_index; - kvm_fd = open_kvm_dev_path_or_exit(); - - r = __kvm_ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header); - TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_GET_MSRS, r)); - - close(kvm_fd); - return buffer.entry.data; -} - -void __vm_xsave_require_permission(uint64_t xfeature, const char *name) -{ - int kvm_fd; - u64 bitmask; - long rc; - struct kvm_device_attr attr = { - .group = 0, - .attr = KVM_X86_XCOMP_GUEST_SUPP, - .addr = (unsigned long) &bitmask, - }; - - TEST_ASSERT(!kvm_supported_cpuid, - "kvm_get_supported_cpuid() cannot be used before ARCH_REQ_XCOMP_GUEST_PERM"); - - TEST_ASSERT(is_power_of_2(xfeature), - "Dynamic XFeatures must be enabled one at a time"); - - kvm_fd = open_kvm_dev_path_or_exit(); - rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr); - close(kvm_fd); - - if (rc == -1 && (errno == ENXIO || errno == EINVAL)) - __TEST_REQUIRE(0, "KVM_X86_XCOMP_GUEST_SUPP not supported"); - - TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc); - - __TEST_REQUIRE(bitmask & xfeature, - "Required XSAVE feature '%s' not supported", name); - - TEST_REQUIRE(!syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, ilog2(xfeature))); - - rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask); - TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc); - TEST_ASSERT(bitmask & xfeature, - "'%s' (0x%lx) not permitted after prctl(ARCH_REQ_XCOMP_GUEST_PERM) permitted=0x%lx", - name, xfeature, bitmask); -} - -void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid) -{ - TEST_ASSERT(cpuid != vcpu->cpuid, "@cpuid can't be the vCPU's CPUID"); - - /* Allow overriding the default CPUID. */ - if (vcpu->cpuid && vcpu->cpuid->nent < cpuid->nent) { - free(vcpu->cpuid); - vcpu->cpuid = NULL; - } - - if (!vcpu->cpuid) - vcpu->cpuid = allocate_kvm_cpuid2(cpuid->nent); - - memcpy(vcpu->cpuid, cpuid, kvm_cpuid2_size(cpuid->nent)); - vcpu_set_cpuid(vcpu); -} - -void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, - struct kvm_x86_cpu_property property, - uint32_t value) -{ - struct kvm_cpuid_entry2 *entry; - - entry = __vcpu_get_cpuid_entry(vcpu, property.function, property.index); - - (&entry->eax)[property.reg] &= ~GENMASK(property.hi_bit, property.lo_bit); - (&entry->eax)[property.reg] |= value << property.lo_bit; - - vcpu_set_cpuid(vcpu); - - /* Sanity check that @value doesn't exceed the bounds in any way. */ - TEST_ASSERT_EQ(kvm_cpuid_property(vcpu->cpuid, property), value); -} - -void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function) -{ - struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function); - - entry->eax = 0; - entry->ebx = 0; - entry->ecx = 0; - entry->edx = 0; - vcpu_set_cpuid(vcpu); -} - -void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu, - struct kvm_x86_cpu_feature feature, - bool set) -{ - struct kvm_cpuid_entry2 *entry; - u32 *reg; - - entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index); - reg = (&entry->eax) + feature.reg; - - if (set) - *reg |= BIT(feature.bit); - else - *reg &= ~BIT(feature.bit); - - vcpu_set_cpuid(vcpu); -} - -uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index) -{ - struct { - struct kvm_msrs header; - struct kvm_msr_entry entry; - } buffer = {}; - - buffer.header.nmsrs = 1; - buffer.entry.index = msr_index; - - vcpu_msrs_get(vcpu, &buffer.header); - - return buffer.entry.data; -} - -int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value) -{ - struct { - struct kvm_msrs header; - struct kvm_msr_entry entry; - } buffer = {}; - - memset(&buffer, 0, sizeof(buffer)); - buffer.header.nmsrs = 1; - buffer.entry.index = msr_index; - buffer.entry.data = msr_value; - - return __vcpu_ioctl(vcpu, KVM_SET_MSRS, &buffer.header); -} - -void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) -{ - va_list ap; - struct kvm_regs regs; - - TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n" - " num: %u", - num); - - va_start(ap, num); - vcpu_regs_get(vcpu, ®s); - - if (num >= 1) - regs.rdi = va_arg(ap, uint64_t); - - if (num >= 2) - regs.rsi = va_arg(ap, uint64_t); - - if (num >= 3) - regs.rdx = va_arg(ap, uint64_t); - - if (num >= 4) - regs.rcx = va_arg(ap, uint64_t); - - if (num >= 5) - regs.r8 = va_arg(ap, uint64_t); - - if (num >= 6) - regs.r9 = va_arg(ap, uint64_t); - - vcpu_regs_set(vcpu, ®s); - va_end(ap); -} - -void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) -{ - struct kvm_regs regs; - struct kvm_sregs sregs; - - fprintf(stream, "%*svCPU ID: %u\n", indent, "", vcpu->id); - - fprintf(stream, "%*sregs:\n", indent + 2, ""); - vcpu_regs_get(vcpu, ®s); - regs_dump(stream, ®s, indent + 4); - - fprintf(stream, "%*ssregs:\n", indent + 2, ""); - vcpu_sregs_get(vcpu, &sregs); - sregs_dump(stream, &sregs, indent + 4); -} - -static struct kvm_msr_list *__kvm_get_msr_index_list(bool feature_msrs) -{ - struct kvm_msr_list *list; - struct kvm_msr_list nmsrs; - int kvm_fd, r; - - kvm_fd = open_kvm_dev_path_or_exit(); - - nmsrs.nmsrs = 0; - if (!feature_msrs) - r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs); - else - r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, &nmsrs); - - TEST_ASSERT(r == -1 && errno == E2BIG, - "Expected -E2BIG, got rc: %i errno: %i (%s)", - r, errno, strerror(errno)); - - list = malloc(sizeof(*list) + nmsrs.nmsrs * sizeof(list->indices[0])); - TEST_ASSERT(list, "-ENOMEM when allocating MSR index list"); - list->nmsrs = nmsrs.nmsrs; - - if (!feature_msrs) - kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list); - else - kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list); - close(kvm_fd); - - TEST_ASSERT(list->nmsrs == nmsrs.nmsrs, - "Number of MSRs in list changed, was %d, now %d", - nmsrs.nmsrs, list->nmsrs); - return list; -} - -const struct kvm_msr_list *kvm_get_msr_index_list(void) -{ - static const struct kvm_msr_list *list; - - if (!list) - list = __kvm_get_msr_index_list(false); - return list; -} - - -const struct kvm_msr_list *kvm_get_feature_msr_index_list(void) -{ - static const struct kvm_msr_list *list; - - if (!list) - list = __kvm_get_msr_index_list(true); - return list; -} - -bool kvm_msr_is_in_save_restore_list(uint32_t msr_index) -{ - const struct kvm_msr_list *list = kvm_get_msr_index_list(); - int i; - - for (i = 0; i < list->nmsrs; ++i) { - if (list->indices[i] == msr_index) - return true; - } - - return false; -} - -static void vcpu_save_xsave_state(struct kvm_vcpu *vcpu, - struct kvm_x86_state *state) -{ - int size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2); - - if (size) { - state->xsave = malloc(size); - vcpu_xsave2_get(vcpu, state->xsave); - } else { - state->xsave = malloc(sizeof(struct kvm_xsave)); - vcpu_xsave_get(vcpu, state->xsave); - } -} - -struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu) -{ - const struct kvm_msr_list *msr_list = kvm_get_msr_index_list(); - struct kvm_x86_state *state; - int i; - - static int nested_size = -1; - - if (nested_size == -1) { - nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE); - TEST_ASSERT(nested_size <= sizeof(state->nested_), - "Nested state size too big, %i > %zi", - nested_size, sizeof(state->nested_)); - } - - /* - * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees - * guest state is consistent only after userspace re-enters the - * kernel with KVM_RUN. Complete IO prior to migrating state - * to a new VM. - */ - vcpu_run_complete_io(vcpu); - - state = malloc(sizeof(*state) + msr_list->nmsrs * sizeof(state->msrs.entries[0])); - TEST_ASSERT(state, "-ENOMEM when allocating kvm state"); - - vcpu_events_get(vcpu, &state->events); - vcpu_mp_state_get(vcpu, &state->mp_state); - vcpu_regs_get(vcpu, &state->regs); - vcpu_save_xsave_state(vcpu, state); - - if (kvm_has_cap(KVM_CAP_XCRS)) - vcpu_xcrs_get(vcpu, &state->xcrs); - - vcpu_sregs_get(vcpu, &state->sregs); - - if (nested_size) { - state->nested.size = sizeof(state->nested_); - - vcpu_nested_state_get(vcpu, &state->nested); - TEST_ASSERT(state->nested.size <= nested_size, - "Nested state size too big, %i (KVM_CHECK_CAP gave %i)", - state->nested.size, nested_size); - } else { - state->nested.size = 0; - } - - state->msrs.nmsrs = msr_list->nmsrs; - for (i = 0; i < msr_list->nmsrs; i++) - state->msrs.entries[i].index = msr_list->indices[i]; - vcpu_msrs_get(vcpu, &state->msrs); - - vcpu_debugregs_get(vcpu, &state->debugregs); - - return state; -} - -void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state) -{ - vcpu_sregs_set(vcpu, &state->sregs); - vcpu_msrs_set(vcpu, &state->msrs); - - if (kvm_has_cap(KVM_CAP_XCRS)) - vcpu_xcrs_set(vcpu, &state->xcrs); - - vcpu_xsave_set(vcpu, state->xsave); - vcpu_events_set(vcpu, &state->events); - vcpu_mp_state_set(vcpu, &state->mp_state); - vcpu_debugregs_set(vcpu, &state->debugregs); - vcpu_regs_set(vcpu, &state->regs); - - if (state->nested.size) - vcpu_nested_state_set(vcpu, &state->nested); -} - -void kvm_x86_state_cleanup(struct kvm_x86_state *state) -{ - free(state->xsave); - free(state); -} - -void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits) -{ - if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) { - *pa_bits = kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32; - *va_bits = 32; - } else { - *pa_bits = kvm_cpu_property(X86_PROPERTY_MAX_PHY_ADDR); - *va_bits = kvm_cpu_property(X86_PROPERTY_MAX_VIRT_ADDR); - } -} - -void kvm_init_vm_address_properties(struct kvm_vm *vm) -{ - if (vm->subtype == VM_SUBTYPE_SEV || vm->subtype == VM_SUBTYPE_SEV_ES) { - vm->arch.c_bit = BIT_ULL(this_cpu_property(X86_PROPERTY_SEV_C_BIT)); - vm->gpa_tag_mask = vm->arch.c_bit; - } -} - -static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr, - int dpl, unsigned short selector) -{ - struct idt_entry *base = - (struct idt_entry *)addr_gva2hva(vm, vm->idt); - struct idt_entry *e = &base[vector]; - - memset(e, 0, sizeof(*e)); - e->offset0 = addr; - e->selector = selector; - e->ist = 0; - e->type = 14; - e->dpl = dpl; - e->p = 1; - e->offset1 = addr >> 16; - e->offset2 = addr >> 32; -} - - -static bool kvm_fixup_exception(struct ex_regs *regs) -{ - if (regs->r9 != KVM_EXCEPTION_MAGIC || regs->rip != regs->r10) - return false; - - if (regs->vector == DE_VECTOR) - return false; - - regs->rip = regs->r11; - regs->r9 = regs->vector; - regs->r10 = regs->error_code; - return true; -} - -void route_exception(struct ex_regs *regs) -{ - typedef void(*handler)(struct ex_regs *); - handler *handlers = (handler *)exception_handlers; - - if (handlers && handlers[regs->vector]) { - handlers[regs->vector](regs); - return; - } - - if (kvm_fixup_exception(regs)) - return; - - ucall_assert(UCALL_UNHANDLED, - "Unhandled exception in guest", __FILE__, __LINE__, - "Unhandled exception '0x%lx' at guest RIP '0x%lx'", - regs->vector, regs->rip); -} - -void vm_init_descriptor_tables(struct kvm_vm *vm) -{ - extern void *idt_handlers; - int i; - - vm->idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); - vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); - /* Handlers have the same address in both address spaces.*/ - for (i = 0; i < NUM_INTERRUPTS; i++) - set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, - DEFAULT_CODE_SELECTOR); -} - -void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu) -{ - struct kvm_vm *vm = vcpu->vm; - struct kvm_sregs sregs; - - vcpu_sregs_get(vcpu, &sregs); - sregs.idt.base = vm->idt; - sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1; - sregs.gdt.base = vm->gdt; - sregs.gdt.limit = getpagesize() - 1; - kvm_seg_set_kernel_data_64bit(NULL, DEFAULT_DATA_SELECTOR, &sregs.gs); - vcpu_sregs_set(vcpu, &sregs); - *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; -} - -void vm_install_exception_handler(struct kvm_vm *vm, int vector, - void (*handler)(struct ex_regs *)) -{ - vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers); - - handlers[vector] = (vm_vaddr_t)handler; -} - -void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) -{ - struct ucall uc; - - if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) - REPORT_GUEST_ASSERT(uc); -} - -const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, - uint32_t function, uint32_t index) -{ - int i; - - for (i = 0; i < cpuid->nent; i++) { - if (cpuid->entries[i].function == function && - cpuid->entries[i].index == index) - return &cpuid->entries[i]; - } - - TEST_FAIL("CPUID function 0x%x index 0x%x not found ", function, index); - - return NULL; -} - -#define X86_HYPERCALL(inputs...) \ -({ \ - uint64_t r; \ - \ - asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" \ - "jnz 1f\n\t" \ - "vmcall\n\t" \ - "jmp 2f\n\t" \ - "1: vmmcall\n\t" \ - "2:" \ - : "=a"(r) \ - : [use_vmmcall] "r" (host_cpu_is_amd), inputs); \ - \ - r; \ -}) - -uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, - uint64_t a3) -{ - return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3)); -} - -uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1) -{ - return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1)); -} - -void xen_hypercall(uint64_t nr, uint64_t a0, void *a1) -{ - GUEST_ASSERT(!__xen_hypercall(nr, a0, a1)); -} - -const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void) -{ - static struct kvm_cpuid2 *cpuid; - int kvm_fd; - - if (cpuid) - return cpuid; - - cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES); - kvm_fd = open_kvm_dev_path_or_exit(); - - kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid); - - close(kvm_fd); - return cpuid; -} - -void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu) -{ - static struct kvm_cpuid2 *cpuid_full; - const struct kvm_cpuid2 *cpuid_sys, *cpuid_hv; - int i, nent = 0; - - if (!cpuid_full) { - cpuid_sys = kvm_get_supported_cpuid(); - cpuid_hv = kvm_get_supported_hv_cpuid(); - - cpuid_full = allocate_kvm_cpuid2(cpuid_sys->nent + cpuid_hv->nent); - if (!cpuid_full) { - perror("malloc"); - abort(); - } - - /* Need to skip KVM CPUID leaves 0x400000xx */ - for (i = 0; i < cpuid_sys->nent; i++) { - if (cpuid_sys->entries[i].function >= 0x40000000 && - cpuid_sys->entries[i].function < 0x40000100) - continue; - cpuid_full->entries[nent] = cpuid_sys->entries[i]; - nent++; - } - - memcpy(&cpuid_full->entries[nent], cpuid_hv->entries, - cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2)); - cpuid_full->nent = nent + cpuid_hv->nent; - } - - vcpu_init_cpuid(vcpu, cpuid_full); -} - -const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES); - - vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, cpuid); - - return cpuid; -} - -unsigned long vm_compute_max_gfn(struct kvm_vm *vm) -{ - const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */ - unsigned long ht_gfn, max_gfn, max_pfn; - uint8_t maxphyaddr; - - max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1; - - /* Avoid reserved HyperTransport region on AMD processors. */ - if (!host_cpu_is_amd) - return max_gfn; - - /* On parts with <40 physical address bits, the area is fully hidden */ - if (vm->pa_bits < 40) - return max_gfn; - - /* Before family 17h, the HyperTransport area is just below 1T. */ - ht_gfn = (1 << 28) - num_ht_pages; - if (this_cpu_family() < 0x17) - goto done; - - /* - * Otherwise it's at the top of the physical address space, possibly - * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use - * the old conservative value if MAXPHYADDR is not enumerated. - */ - if (!this_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) - goto done; - - maxphyaddr = this_cpu_property(X86_PROPERTY_MAX_PHY_ADDR); - max_pfn = (1ULL << (maxphyaddr - vm->page_shift)) - 1; - - if (this_cpu_has_p(X86_PROPERTY_PHYS_ADDR_REDUCTION)) - max_pfn >>= this_cpu_property(X86_PROPERTY_PHYS_ADDR_REDUCTION); - - ht_gfn = max_pfn - num_ht_pages; -done: - return min(max_gfn, ht_gfn - 1); -} - -/* Returns true if kvm_intel was loaded with unrestricted_guest=1. */ -bool vm_is_unrestricted_guest(struct kvm_vm *vm) -{ - /* Ensure that a KVM vendor-specific module is loaded. */ - if (vm == NULL) - close(open_kvm_dev_path_or_exit()); - - return get_kvm_intel_param_bool("unrestricted_guest"); -} - -void kvm_selftest_arch_init(void) -{ - host_cpu_is_intel = this_cpu_is_intel(); - host_cpu_is_amd = this_cpu_is_amd(); -} - -bool sys_clocksource_is_based_on_tsc(void) -{ - char *clk_name = sys_get_cur_clocksource(); - bool ret = !strcmp(clk_name, "tsc\n") || - !strcmp(clk_name, "hyperv_clocksource_tsc_page\n"); - - free(clk_name); - - return ret; -} diff --git a/tools/testing/selftests/kvm/lib/x86_64/sev.c b/tools/testing/selftests/kvm/lib/x86_64/sev.c deleted file mode 100644 index e248d3364b9c..000000000000 --- a/tools/testing/selftests/kvm/lib/x86_64/sev.c +++ /dev/null @@ -1,114 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -#define _GNU_SOURCE /* for program_invocation_short_name */ -#include <stdint.h> -#include <stdbool.h> - -#include "sev.h" - -/* - * sparsebit_next_clear() can return 0 if [x, 2**64-1] are all set, and the - * -1 would then cause an underflow back to 2**64 - 1. This is expected and - * correct. - * - * If the last range in the sparsebit is [x, y] and we try to iterate, - * sparsebit_next_set() will return 0, and sparsebit_next_clear() will try - * and find the first range, but that's correct because the condition - * expression would cause us to quit the loop. - */ -static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region) -{ - const struct sparsebit *protected_phy_pages = region->protected_phy_pages; - const vm_paddr_t gpa_base = region->region.guest_phys_addr; - const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift; - sparsebit_idx_t i, j; - - if (!sparsebit_any_set(protected_phy_pages)) - return; - - sev_register_encrypted_memory(vm, region); - - sparsebit_for_each_set_range(protected_phy_pages, i, j) { - const uint64_t size = (j - i + 1) * vm->page_size; - const uint64_t offset = (i - lowest_page_in_region) * vm->page_size; - - sev_launch_update_data(vm, gpa_base + offset, size); - } -} - -void sev_vm_launch(struct kvm_vm *vm, uint32_t policy) -{ - struct kvm_sev_launch_start launch_start = { - .policy = policy, - }; - struct userspace_mem_region *region; - struct kvm_sev_guest_status status; - int ctr; - - vm_sev_ioctl(vm, KVM_SEV_LAUNCH_START, &launch_start); - vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status); - - TEST_ASSERT_EQ(status.policy, policy); - TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_LAUNCH_UPDATE); - - hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) - encrypt_region(vm, region); - - if (policy & SEV_POLICY_ES) - vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); - - vm->arch.is_pt_protected = true; -} - -void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement) -{ - struct kvm_sev_launch_measure launch_measure; - struct kvm_sev_guest_status guest_status; - - launch_measure.len = 256; - launch_measure.uaddr = (__u64)measurement; - vm_sev_ioctl(vm, KVM_SEV_LAUNCH_MEASURE, &launch_measure); - - vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &guest_status); - TEST_ASSERT_EQ(guest_status.state, SEV_GUEST_STATE_LAUNCH_SECRET); -} - -void sev_vm_launch_finish(struct kvm_vm *vm) -{ - struct kvm_sev_guest_status status; - - vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status); - TEST_ASSERT(status.state == SEV_GUEST_STATE_LAUNCH_UPDATE || - status.state == SEV_GUEST_STATE_LAUNCH_SECRET, - "Unexpected guest state: %d", status.state); - - vm_sev_ioctl(vm, KVM_SEV_LAUNCH_FINISH, NULL); - - vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status); - TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING); -} - -struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t policy, void *guest_code, - struct kvm_vcpu **cpu) -{ - struct vm_shape shape = { - .type = VM_TYPE_DEFAULT, - .mode = VM_MODE_DEFAULT, - .subtype = policy & SEV_POLICY_ES ? VM_SUBTYPE_SEV_ES : - VM_SUBTYPE_SEV, - }; - struct kvm_vm *vm; - struct kvm_vcpu *cpus[1]; - uint8_t measurement[512]; - - vm = __vm_create_with_vcpus(shape, 1, 0, guest_code, cpus); - *cpu = cpus[0]; - - sev_vm_launch(vm, policy); - - /* TODO: Validate the measurement is as expected. */ - sev_vm_launch_measure(vm, measurement); - - sev_vm_launch_finish(vm); - - return vm; -} diff --git a/tools/testing/selftests/kvm/lib/x86_64/svm.c b/tools/testing/selftests/kvm/lib/x86_64/svm.c deleted file mode 100644 index 5495a92dfd5a..000000000000 --- a/tools/testing/selftests/kvm/lib/x86_64/svm.c +++ /dev/null @@ -1,164 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * tools/testing/selftests/kvm/lib/x86_64/svm.c - * Helpers used for nested SVM testing - * Largely inspired from KVM unit test svm.c - * - * Copyright (C) 2020, Red Hat, Inc. - */ - -#include "test_util.h" -#include "kvm_util.h" -#include "processor.h" -#include "svm_util.h" - -#define SEV_DEV_PATH "/dev/sev" - -struct gpr64_regs guest_regs; -u64 rflags; - -/* Allocate memory regions for nested SVM tests. - * - * Input Args: - * vm - The VM to allocate guest-virtual addresses in. - * - * Output Args: - * p_svm_gva - The guest virtual address for the struct svm_test_data. - * - * Return: - * Pointer to structure with the addresses of the SVM areas. - */ -struct svm_test_data * -vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva) -{ - vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm); - struct svm_test_data *svm = addr_gva2hva(vm, svm_gva); - - svm->vmcb = (void *)vm_vaddr_alloc_page(vm); - svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb); - svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb); - - svm->save_area = (void *)vm_vaddr_alloc_page(vm); - svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area); - svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area); - - svm->msr = (void *)vm_vaddr_alloc_page(vm); - svm->msr_hva = addr_gva2hva(vm, (uintptr_t)svm->msr); - svm->msr_gpa = addr_gva2gpa(vm, (uintptr_t)svm->msr); - memset(svm->msr_hva, 0, getpagesize()); - - *p_svm_gva = svm_gva; - return svm; -} - -static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector, - u64 base, u32 limit, u32 attr) -{ - seg->selector = selector; - seg->attrib = attr; - seg->limit = limit; - seg->base = base; -} - -void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp) -{ - struct vmcb *vmcb = svm->vmcb; - uint64_t vmcb_gpa = svm->vmcb_gpa; - struct vmcb_save_area *save = &vmcb->save; - struct vmcb_control_area *ctrl = &vmcb->control; - u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK - | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK; - u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK - | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK; - uint64_t efer; - - efer = rdmsr(MSR_EFER); - wrmsr(MSR_EFER, efer | EFER_SVME); - wrmsr(MSR_VM_HSAVE_PA, svm->save_area_gpa); - - memset(vmcb, 0, sizeof(*vmcb)); - asm volatile ("vmsave %0\n\t" : : "a" (vmcb_gpa) : "memory"); - vmcb_set_seg(&save->es, get_es(), 0, -1U, data_seg_attr); - vmcb_set_seg(&save->cs, get_cs(), 0, -1U, code_seg_attr); - vmcb_set_seg(&save->ss, get_ss(), 0, -1U, data_seg_attr); - vmcb_set_seg(&save->ds, get_ds(), 0, -1U, data_seg_attr); - vmcb_set_seg(&save->gdtr, 0, get_gdt().address, get_gdt().size, 0); - vmcb_set_seg(&save->idtr, 0, get_idt().address, get_idt().size, 0); - - ctrl->asid = 1; - save->cpl = 0; - save->efer = rdmsr(MSR_EFER); - asm volatile ("mov %%cr4, %0" : "=r"(save->cr4) : : "memory"); - asm volatile ("mov %%cr3, %0" : "=r"(save->cr3) : : "memory"); - asm volatile ("mov %%cr0, %0" : "=r"(save->cr0) : : "memory"); - asm volatile ("mov %%dr7, %0" : "=r"(save->dr7) : : "memory"); - asm volatile ("mov %%dr6, %0" : "=r"(save->dr6) : : "memory"); - asm volatile ("mov %%cr2, %0" : "=r"(save->cr2) : : "memory"); - save->g_pat = rdmsr(MSR_IA32_CR_PAT); - save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); - ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | - (1ULL << INTERCEPT_VMMCALL); - ctrl->msrpm_base_pa = svm->msr_gpa; - - vmcb->save.rip = (u64)guest_rip; - vmcb->save.rsp = (u64)guest_rsp; - guest_regs.rdi = (u64)svm; -} - -/* - * save/restore 64-bit general registers except rax, rip, rsp - * which are directly handed through the VMCB guest processor state - */ -#define SAVE_GPR_C \ - "xchg %%rbx, guest_regs+0x20\n\t" \ - "xchg %%rcx, guest_regs+0x10\n\t" \ - "xchg %%rdx, guest_regs+0x18\n\t" \ - "xchg %%rbp, guest_regs+0x30\n\t" \ - "xchg %%rsi, guest_regs+0x38\n\t" \ - "xchg %%rdi, guest_regs+0x40\n\t" \ - "xchg %%r8, guest_regs+0x48\n\t" \ - "xchg %%r9, guest_regs+0x50\n\t" \ - "xchg %%r10, guest_regs+0x58\n\t" \ - "xchg %%r11, guest_regs+0x60\n\t" \ - "xchg %%r12, guest_regs+0x68\n\t" \ - "xchg %%r13, guest_regs+0x70\n\t" \ - "xchg %%r14, guest_regs+0x78\n\t" \ - "xchg %%r15, guest_regs+0x80\n\t" - -#define LOAD_GPR_C SAVE_GPR_C - -/* - * selftests do not use interrupts so we dropped clgi/sti/cli/stgi - * for now. registers involved in LOAD/SAVE_GPR_C are eventually - * unmodified so they do not need to be in the clobber list. - */ -void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa) -{ - asm volatile ( - "vmload %[vmcb_gpa]\n\t" - "mov rflags, %%r15\n\t" // rflags - "mov %%r15, 0x170(%[vmcb])\n\t" - "mov guest_regs, %%r15\n\t" // rax - "mov %%r15, 0x1f8(%[vmcb])\n\t" - LOAD_GPR_C - "vmrun %[vmcb_gpa]\n\t" - SAVE_GPR_C - "mov 0x170(%[vmcb]), %%r15\n\t" // rflags - "mov %%r15, rflags\n\t" - "mov 0x1f8(%[vmcb]), %%r15\n\t" // rax - "mov %%r15, guest_regs\n\t" - "vmsave %[vmcb_gpa]\n\t" - : : [vmcb] "r" (vmcb), [vmcb_gpa] "a" (vmcb_gpa) - : "r15", "memory"); -} - -/* - * Open SEV_DEV_PATH if available, otherwise exit the entire program. - * - * Return: - * The opened file descriptor of /dev/sev. - */ -int open_sev_dev_path_or_exit(void) -{ - return open_path_or_exit(SEV_DEV_PATH, 0); -} diff --git a/tools/testing/selftests/kvm/lib/x86_64/ucall.c b/tools/testing/selftests/kvm/lib/x86_64/ucall.c deleted file mode 100644 index 1265cecc7dd1..000000000000 --- a/tools/testing/selftests/kvm/lib/x86_64/ucall.c +++ /dev/null @@ -1,56 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * ucall support. A ucall is a "hypercall to userspace". - * - * Copyright (C) 2018, Red Hat, Inc. - */ -#include "kvm_util.h" - -#define UCALL_PIO_PORT ((uint16_t)0x1000) - -void ucall_arch_do_ucall(vm_vaddr_t uc) -{ - /* - * FIXME: Revert this hack (the entire commit that added it) once nVMX - * preserves L2 GPRs across a nested VM-Exit. If a ucall from L2, e.g. - * to do a GUEST_SYNC(), lands the vCPU in L1, any and all GPRs can be - * clobbered by L1. Save and restore non-volatile GPRs (clobbering RBP - * in particular is problematic) along with RDX and RDI (which are - * inputs), and clobber volatile GPRs. *sigh* - */ -#define HORRIFIC_L2_UCALL_CLOBBER_HACK \ - "rcx", "rsi", "r8", "r9", "r10", "r11" - - asm volatile("push %%rbp\n\t" - "push %%r15\n\t" - "push %%r14\n\t" - "push %%r13\n\t" - "push %%r12\n\t" - "push %%rbx\n\t" - "push %%rdx\n\t" - "push %%rdi\n\t" - "in %[port], %%al\n\t" - "pop %%rdi\n\t" - "pop %%rdx\n\t" - "pop %%rbx\n\t" - "pop %%r12\n\t" - "pop %%r13\n\t" - "pop %%r14\n\t" - "pop %%r15\n\t" - "pop %%rbp\n\t" - : : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax", "memory", - HORRIFIC_L2_UCALL_CLOBBER_HACK); -} - -void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) -{ - struct kvm_run *run = vcpu->run; - - if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) { - struct kvm_regs regs; - - vcpu_regs_get(vcpu, ®s); - return (void *)regs.rdi; - } - return NULL; -} diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c deleted file mode 100644 index 089b8925b6b2..000000000000 --- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c +++ /dev/null @@ -1,554 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * tools/testing/selftests/kvm/lib/x86_64/vmx.c - * - * Copyright (C) 2018, Google LLC. - */ - -#include <asm/msr-index.h> - -#include "test_util.h" -#include "kvm_util.h" -#include "processor.h" -#include "vmx.h" - -#define PAGE_SHIFT_4K 12 - -#define KVM_EPT_PAGE_TABLE_MIN_PADDR 0x1c0000 - -bool enable_evmcs; - -struct hv_enlightened_vmcs *current_evmcs; -struct hv_vp_assist_page *current_vp_assist; - -struct eptPageTableEntry { - uint64_t readable:1; - uint64_t writable:1; - uint64_t executable:1; - uint64_t memory_type:3; - uint64_t ignore_pat:1; - uint64_t page_size:1; - uint64_t accessed:1; - uint64_t dirty:1; - uint64_t ignored_11_10:2; - uint64_t address:40; - uint64_t ignored_62_52:11; - uint64_t suppress_ve:1; -}; - -struct eptPageTablePointer { - uint64_t memory_type:3; - uint64_t page_walk_length:3; - uint64_t ad_enabled:1; - uint64_t reserved_11_07:5; - uint64_t address:40; - uint64_t reserved_63_52:12; -}; -int vcpu_enable_evmcs(struct kvm_vcpu *vcpu) -{ - uint16_t evmcs_ver; - - vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, - (unsigned long)&evmcs_ver); - - /* KVM should return supported EVMCS version range */ - TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) && - (evmcs_ver & 0xff) > 0, - "Incorrect EVMCS version range: %x:%x", - evmcs_ver & 0xff, evmcs_ver >> 8); - - return evmcs_ver; -} - -/* Allocate memory regions for nested VMX tests. - * - * Input Args: - * vm - The VM to allocate guest-virtual addresses in. - * - * Output Args: - * p_vmx_gva - The guest virtual address for the struct vmx_pages. - * - * Return: - * Pointer to structure with the addresses of the VMX areas. - */ -struct vmx_pages * -vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) -{ - vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm); - struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); - - /* Setup of a region of guest memory for the vmxon region. */ - vmx->vmxon = (void *)vm_vaddr_alloc_page(vm); - vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); - vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); - - /* Setup of a region of guest memory for a vmcs. */ - vmx->vmcs = (void *)vm_vaddr_alloc_page(vm); - vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs); - vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs); - - /* Setup of a region of guest memory for the MSR bitmap. */ - vmx->msr = (void *)vm_vaddr_alloc_page(vm); - vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr); - vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr); - memset(vmx->msr_hva, 0, getpagesize()); - - /* Setup of a region of guest memory for the shadow VMCS. */ - vmx->shadow_vmcs = (void *)vm_vaddr_alloc_page(vm); - vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs); - vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs); - - /* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */ - vmx->vmread = (void *)vm_vaddr_alloc_page(vm); - vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread); - vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread); - memset(vmx->vmread_hva, 0, getpagesize()); - - vmx->vmwrite = (void *)vm_vaddr_alloc_page(vm); - vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite); - vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite); - memset(vmx->vmwrite_hva, 0, getpagesize()); - - *p_vmx_gva = vmx_gva; - return vmx; -} - -bool prepare_for_vmx_operation(struct vmx_pages *vmx) -{ - uint64_t feature_control; - uint64_t required; - unsigned long cr0; - unsigned long cr4; - - /* - * Ensure bits in CR0 and CR4 are valid in VMX operation: - * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx. - * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx. - */ - __asm__ __volatile__("mov %%cr0, %0" : "=r"(cr0) : : "memory"); - cr0 &= rdmsr(MSR_IA32_VMX_CR0_FIXED1); - cr0 |= rdmsr(MSR_IA32_VMX_CR0_FIXED0); - __asm__ __volatile__("mov %0, %%cr0" : : "r"(cr0) : "memory"); - - __asm__ __volatile__("mov %%cr4, %0" : "=r"(cr4) : : "memory"); - cr4 &= rdmsr(MSR_IA32_VMX_CR4_FIXED1); - cr4 |= rdmsr(MSR_IA32_VMX_CR4_FIXED0); - /* Enable VMX operation */ - cr4 |= X86_CR4_VMXE; - __asm__ __volatile__("mov %0, %%cr4" : : "r"(cr4) : "memory"); - - /* - * Configure IA32_FEATURE_CONTROL MSR to allow VMXON: - * Bit 0: Lock bit. If clear, VMXON causes a #GP. - * Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON - * outside of SMX causes a #GP. - */ - required = FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; - required |= FEAT_CTL_LOCKED; - feature_control = rdmsr(MSR_IA32_FEAT_CTL); - if ((feature_control & required) != required) - wrmsr(MSR_IA32_FEAT_CTL, feature_control | required); - - /* Enter VMX root operation. */ - *(uint32_t *)(vmx->vmxon) = vmcs_revision(); - if (vmxon(vmx->vmxon_gpa)) - return false; - - return true; -} - -bool load_vmcs(struct vmx_pages *vmx) -{ - /* Load a VMCS. */ - *(uint32_t *)(vmx->vmcs) = vmcs_revision(); - if (vmclear(vmx->vmcs_gpa)) - return false; - - if (vmptrld(vmx->vmcs_gpa)) - return false; - - /* Setup shadow VMCS, do not load it yet. */ - *(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul; - if (vmclear(vmx->shadow_vmcs_gpa)) - return false; - - return true; -} - -static bool ept_vpid_cap_supported(uint64_t mask) -{ - return rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & mask; -} - -bool ept_1g_pages_supported(void) -{ - return ept_vpid_cap_supported(VMX_EPT_VPID_CAP_1G_PAGES); -} - -/* - * Initialize the control fields to the most basic settings possible. - */ -static inline void init_vmcs_control_fields(struct vmx_pages *vmx) -{ - uint32_t sec_exec_ctl = 0; - - vmwrite(VIRTUAL_PROCESSOR_ID, 0); - vmwrite(POSTED_INTR_NV, 0); - - vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS)); - - if (vmx->eptp_gpa) { - uint64_t ept_paddr; - struct eptPageTablePointer eptp = { - .memory_type = VMX_BASIC_MEM_TYPE_WB, - .page_walk_length = 3, /* + 1 */ - .ad_enabled = ept_vpid_cap_supported(VMX_EPT_VPID_CAP_AD_BITS), - .address = vmx->eptp_gpa >> PAGE_SHIFT_4K, - }; - - memcpy(&ept_paddr, &eptp, sizeof(ept_paddr)); - vmwrite(EPT_POINTER, ept_paddr); - sec_exec_ctl |= SECONDARY_EXEC_ENABLE_EPT; - } - - if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, sec_exec_ctl)) - vmwrite(CPU_BASED_VM_EXEC_CONTROL, - rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS); - else { - vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS)); - GUEST_ASSERT(!sec_exec_ctl); - } - - vmwrite(EXCEPTION_BITMAP, 0); - vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); - vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */ - vmwrite(CR3_TARGET_COUNT, 0); - vmwrite(VM_EXIT_CONTROLS, rdmsr(MSR_IA32_VMX_EXIT_CTLS) | - VM_EXIT_HOST_ADDR_SPACE_SIZE); /* 64-bit host */ - vmwrite(VM_EXIT_MSR_STORE_COUNT, 0); - vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0); - vmwrite(VM_ENTRY_CONTROLS, rdmsr(MSR_IA32_VMX_ENTRY_CTLS) | - VM_ENTRY_IA32E_MODE); /* 64-bit guest */ - vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0); - vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0); - vmwrite(TPR_THRESHOLD, 0); - - vmwrite(CR0_GUEST_HOST_MASK, 0); - vmwrite(CR4_GUEST_HOST_MASK, 0); - vmwrite(CR0_READ_SHADOW, get_cr0()); - vmwrite(CR4_READ_SHADOW, get_cr4()); - - vmwrite(MSR_BITMAP, vmx->msr_gpa); - vmwrite(VMREAD_BITMAP, vmx->vmread_gpa); - vmwrite(VMWRITE_BITMAP, vmx->vmwrite_gpa); -} - -/* - * Initialize the host state fields based on the current host state, with - * the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch - * or vmresume. - */ -static inline void init_vmcs_host_state(void) -{ - uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS); - - vmwrite(HOST_ES_SELECTOR, get_es()); - vmwrite(HOST_CS_SELECTOR, get_cs()); - vmwrite(HOST_SS_SELECTOR, get_ss()); - vmwrite(HOST_DS_SELECTOR, get_ds()); - vmwrite(HOST_FS_SELECTOR, get_fs()); - vmwrite(HOST_GS_SELECTOR, get_gs()); - vmwrite(HOST_TR_SELECTOR, get_tr()); - - if (exit_controls & VM_EXIT_LOAD_IA32_PAT) - vmwrite(HOST_IA32_PAT, rdmsr(MSR_IA32_CR_PAT)); - if (exit_controls & VM_EXIT_LOAD_IA32_EFER) - vmwrite(HOST_IA32_EFER, rdmsr(MSR_EFER)); - if (exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) - vmwrite(HOST_IA32_PERF_GLOBAL_CTRL, - rdmsr(MSR_CORE_PERF_GLOBAL_CTRL)); - - vmwrite(HOST_IA32_SYSENTER_CS, rdmsr(MSR_IA32_SYSENTER_CS)); - - vmwrite(HOST_CR0, get_cr0()); - vmwrite(HOST_CR3, get_cr3()); - vmwrite(HOST_CR4, get_cr4()); - vmwrite(HOST_FS_BASE, rdmsr(MSR_FS_BASE)); - vmwrite(HOST_GS_BASE, rdmsr(MSR_GS_BASE)); - vmwrite(HOST_TR_BASE, - get_desc64_base((struct desc64 *)(get_gdt().address + get_tr()))); - vmwrite(HOST_GDTR_BASE, get_gdt().address); - vmwrite(HOST_IDTR_BASE, get_idt().address); - vmwrite(HOST_IA32_SYSENTER_ESP, rdmsr(MSR_IA32_SYSENTER_ESP)); - vmwrite(HOST_IA32_SYSENTER_EIP, rdmsr(MSR_IA32_SYSENTER_EIP)); -} - -/* - * Initialize the guest state fields essentially as a clone of - * the host state fields. Some host state fields have fixed - * values, and we set the corresponding guest state fields accordingly. - */ -static inline void init_vmcs_guest_state(void *rip, void *rsp) -{ - vmwrite(GUEST_ES_SELECTOR, vmreadz(HOST_ES_SELECTOR)); - vmwrite(GUEST_CS_SELECTOR, vmreadz(HOST_CS_SELECTOR)); - vmwrite(GUEST_SS_SELECTOR, vmreadz(HOST_SS_SELECTOR)); - vmwrite(GUEST_DS_SELECTOR, vmreadz(HOST_DS_SELECTOR)); - vmwrite(GUEST_FS_SELECTOR, vmreadz(HOST_FS_SELECTOR)); - vmwrite(GUEST_GS_SELECTOR, vmreadz(HOST_GS_SELECTOR)); - vmwrite(GUEST_LDTR_SELECTOR, 0); - vmwrite(GUEST_TR_SELECTOR, vmreadz(HOST_TR_SELECTOR)); - vmwrite(GUEST_INTR_STATUS, 0); - vmwrite(GUEST_PML_INDEX, 0); - - vmwrite(VMCS_LINK_POINTER, -1ll); - vmwrite(GUEST_IA32_DEBUGCTL, 0); - vmwrite(GUEST_IA32_PAT, vmreadz(HOST_IA32_PAT)); - vmwrite(GUEST_IA32_EFER, vmreadz(HOST_IA32_EFER)); - vmwrite(GUEST_IA32_PERF_GLOBAL_CTRL, - vmreadz(HOST_IA32_PERF_GLOBAL_CTRL)); - - vmwrite(GUEST_ES_LIMIT, -1); - vmwrite(GUEST_CS_LIMIT, -1); - vmwrite(GUEST_SS_LIMIT, -1); - vmwrite(GUEST_DS_LIMIT, -1); - vmwrite(GUEST_FS_LIMIT, -1); - vmwrite(GUEST_GS_LIMIT, -1); - vmwrite(GUEST_LDTR_LIMIT, -1); - vmwrite(GUEST_TR_LIMIT, 0x67); - vmwrite(GUEST_GDTR_LIMIT, 0xffff); - vmwrite(GUEST_IDTR_LIMIT, 0xffff); - vmwrite(GUEST_ES_AR_BYTES, - vmreadz(GUEST_ES_SELECTOR) == 0 ? 0x10000 : 0xc093); - vmwrite(GUEST_CS_AR_BYTES, 0xa09b); - vmwrite(GUEST_SS_AR_BYTES, 0xc093); - vmwrite(GUEST_DS_AR_BYTES, - vmreadz(GUEST_DS_SELECTOR) == 0 ? 0x10000 : 0xc093); - vmwrite(GUEST_FS_AR_BYTES, - vmreadz(GUEST_FS_SELECTOR) == 0 ? 0x10000 : 0xc093); - vmwrite(GUEST_GS_AR_BYTES, - vmreadz(GUEST_GS_SELECTOR) == 0 ? 0x10000 : 0xc093); - vmwrite(GUEST_LDTR_AR_BYTES, 0x10000); - vmwrite(GUEST_TR_AR_BYTES, 0x8b); - vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); - vmwrite(GUEST_ACTIVITY_STATE, 0); - vmwrite(GUEST_SYSENTER_CS, vmreadz(HOST_IA32_SYSENTER_CS)); - vmwrite(VMX_PREEMPTION_TIMER_VALUE, 0); - - vmwrite(GUEST_CR0, vmreadz(HOST_CR0)); - vmwrite(GUEST_CR3, vmreadz(HOST_CR3)); - vmwrite(GUEST_CR4, vmreadz(HOST_CR4)); - vmwrite(GUEST_ES_BASE, 0); - vmwrite(GUEST_CS_BASE, 0); - vmwrite(GUEST_SS_BASE, 0); - vmwrite(GUEST_DS_BASE, 0); - vmwrite(GUEST_FS_BASE, vmreadz(HOST_FS_BASE)); - vmwrite(GUEST_GS_BASE, vmreadz(HOST_GS_BASE)); - vmwrite(GUEST_LDTR_BASE, 0); - vmwrite(GUEST_TR_BASE, vmreadz(HOST_TR_BASE)); - vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE)); - vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE)); - vmwrite(GUEST_DR7, 0x400); - vmwrite(GUEST_RSP, (uint64_t)rsp); - vmwrite(GUEST_RIP, (uint64_t)rip); - vmwrite(GUEST_RFLAGS, 2); - vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0); - vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP)); - vmwrite(GUEST_SYSENTER_EIP, vmreadz(HOST_IA32_SYSENTER_EIP)); -} - -void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp) -{ - init_vmcs_control_fields(vmx); - init_vmcs_host_state(); - init_vmcs_guest_state(guest_rip, guest_rsp); -} - -static void nested_create_pte(struct kvm_vm *vm, - struct eptPageTableEntry *pte, - uint64_t nested_paddr, - uint64_t paddr, - int current_level, - int target_level) -{ - if (!pte->readable) { - pte->writable = true; - pte->readable = true; - pte->executable = true; - pte->page_size = (current_level == target_level); - if (pte->page_size) - pte->address = paddr >> vm->page_shift; - else - pte->address = vm_alloc_page_table(vm) >> vm->page_shift; - } else { - /* - * Entry already present. Assert that the caller doesn't want - * a hugepage at this level, and that there isn't a hugepage at - * this level. - */ - TEST_ASSERT(current_level != target_level, - "Cannot create hugepage at level: %u, nested_paddr: 0x%lx", - current_level, nested_paddr); - TEST_ASSERT(!pte->page_size, - "Cannot create page table at level: %u, nested_paddr: 0x%lx", - current_level, nested_paddr); - } -} - - -void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, - uint64_t nested_paddr, uint64_t paddr, int target_level) -{ - const uint64_t page_size = PG_LEVEL_SIZE(target_level); - struct eptPageTableEntry *pt = vmx->eptp_hva, *pte; - uint16_t index; - - TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " - "unknown or unsupported guest mode, mode: 0x%x", vm->mode); - - TEST_ASSERT((nested_paddr >> 48) == 0, - "Nested physical address 0x%lx requires 5-level paging", - nested_paddr); - TEST_ASSERT((nested_paddr % page_size) == 0, - "Nested physical address not on page boundary,\n" - " nested_paddr: 0x%lx page_size: 0x%lx", - nested_paddr, page_size); - TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn, - "Physical address beyond beyond maximum supported,\n" - " nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", - paddr, vm->max_gfn, vm->page_size); - TEST_ASSERT((paddr % page_size) == 0, - "Physical address not on page boundary,\n" - " paddr: 0x%lx page_size: 0x%lx", - paddr, page_size); - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, - "Physical address beyond beyond maximum supported,\n" - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", - paddr, vm->max_gfn, vm->page_size); - - for (int level = PG_LEVEL_512G; level >= PG_LEVEL_4K; level--) { - index = (nested_paddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu; - pte = &pt[index]; - - nested_create_pte(vm, pte, nested_paddr, paddr, level, target_level); - - if (pte->page_size) - break; - - pt = addr_gpa2hva(vm, pte->address * vm->page_size); - } - - /* - * For now mark these as accessed and dirty because the only - * testcase we have needs that. Can be reconsidered later. - */ - pte->accessed = true; - pte->dirty = true; - -} - -void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, - uint64_t nested_paddr, uint64_t paddr) -{ - __nested_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K); -} - -/* - * Map a range of EPT guest physical addresses to the VM's physical address - * - * Input Args: - * vm - Virtual Machine - * nested_paddr - Nested guest physical address to map - * paddr - VM Physical Address - * size - The size of the range to map - * level - The level at which to map the range - * - * Output Args: None - * - * Return: None - * - * Within the VM given by vm, creates a nested guest translation for the - * page range starting at nested_paddr to the page range starting at paddr. - */ -void __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, - uint64_t nested_paddr, uint64_t paddr, uint64_t size, - int level) -{ - size_t page_size = PG_LEVEL_SIZE(level); - size_t npages = size / page_size; - - TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow"); - TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); - - while (npages--) { - __nested_pg_map(vmx, vm, nested_paddr, paddr, level); - nested_paddr += page_size; - paddr += page_size; - } -} - -void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, - uint64_t nested_paddr, uint64_t paddr, uint64_t size) -{ - __nested_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K); -} - -/* Prepare an identity extended page table that maps all the - * physical pages in VM. - */ -void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, - uint32_t memslot) -{ - sparsebit_idx_t i, last; - struct userspace_mem_region *region = - memslot2region(vm, memslot); - - i = (region->region.guest_phys_addr >> vm->page_shift) - 1; - last = i + (region->region.memory_size >> vm->page_shift); - for (;;) { - i = sparsebit_next_clear(region->unused_phy_pages, i); - if (i > last) - break; - - nested_map(vmx, vm, - (uint64_t)i << vm->page_shift, - (uint64_t)i << vm->page_shift, - 1 << vm->page_shift); - } -} - -/* Identity map a region with 1GiB Pages. */ -void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm, - uint64_t addr, uint64_t size) -{ - __nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G); -} - -bool kvm_cpu_has_ept(void) -{ - uint64_t ctrl; - - ctrl = kvm_get_feature_msr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) >> 32; - if (!(ctrl & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) - return false; - - ctrl = kvm_get_feature_msr(MSR_IA32_VMX_PROCBASED_CTLS2) >> 32; - return ctrl & SECONDARY_EXEC_ENABLE_EPT; -} - -void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, - uint32_t eptp_memslot) -{ - TEST_ASSERT(kvm_cpu_has_ept(), "KVM doesn't support nested EPT"); - - vmx->eptp = (void *)vm_vaddr_alloc_page(vm); - vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp); - vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp); -} - -void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm) -{ - vmx->apic_access = (void *)vm_vaddr_alloc_page(vm); - vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access); - vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access); -} |