summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/lib/x86_64/processor.c
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2022-10-06 00:45:12 +0000
committerSean Christopherson <seanjc@google.com>2022-11-16 16:58:56 -0800
commit96b69958c77d84e49c06ebe2e3502e4c1620e3c0 (patch)
treef64b7b5fbfc8e65e3d19c3feb326c4de30b40cb3 /tools/testing/selftests/kvm/lib/x86_64/processor.c
parentefe91dc307d00766911fbcb5021bdc3a1cf9c79e (diff)
KVM: selftests: Play nice with huge pages when getting PTEs/GPAs
Play nice with huge pages when getting PTEs and translating GVAs to GPAs, there's no reason to disallow using huge pages in selftests. Use PG_LEVEL_NONE to indicate that the caller doesn't care about the mapping level and just wants to get the pte+level. Signed-off-by: Sean Christopherson <seanjc@google.com> Link: https://lore.kernel.org/r/20221006004512.666529-8-seanjc@google.com
Diffstat (limited to 'tools/testing/selftests/kvm/lib/x86_64/processor.c')
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c45
1 files changed, 38 insertions, 7 deletions
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 053f64191122..efa20d0f9927 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -245,10 +245,26 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
}
}
-uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr)
+static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level)
+{
+ if (*pte & PTE_LARGE_MASK) {
+ TEST_ASSERT(*level == PG_LEVEL_NONE ||
+ *level == current_level,
+ "Unexpected hugepage at level %d\n", current_level);
+ *level = current_level;
+ }
+
+ return *level == current_level;
+}
+
+uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
+ int *level)
{
uint64_t *pml4e, *pdpe, *pde;
+ TEST_ASSERT(*level >= PG_LEVEL_NONE && *level < PG_LEVEL_NUM,
+ "Invalid PG_LEVEL_* '%d'", *level);
+
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
@@ -263,18 +279,27 @@ uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr)
"Canonical check failed. The virtual address is invalid.");
pml4e = virt_get_pte(vm, &vm->pgd, vaddr, PG_LEVEL_512G);
+ if (vm_is_target_pte(pml4e, level, PG_LEVEL_512G))
+ return pml4e;
pdpe = virt_get_pte(vm, pml4e, vaddr, PG_LEVEL_1G);
- TEST_ASSERT(!(*pdpe & PTE_LARGE_MASK),
- "Expected pdpe to map a pde not a 1-GByte page.");
+ if (vm_is_target_pte(pdpe, level, PG_LEVEL_1G))
+ return pdpe;
pde = virt_get_pte(vm, pdpe, vaddr, PG_LEVEL_2M);
- TEST_ASSERT(!(*pde & PTE_LARGE_MASK),
- "Expected pde to map a pte not a 2-MByte page.");
+ if (vm_is_target_pte(pde, level, PG_LEVEL_2M))
+ return pde;
return virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K);
}
+uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr)
+{
+ int level = PG_LEVEL_4K;
+
+ return __vm_get_page_table_entry(vm, vaddr, &level);
+}
+
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
uint64_t *pml4e, *pml4e_start;
@@ -458,11 +483,17 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
- uint64_t *pte = vm_get_page_table_entry(vm, gva);
+ int level = PG_LEVEL_NONE;
+ uint64_t *pte = __vm_get_page_table_entry(vm, gva, &level);
TEST_ASSERT(*pte & PTE_PRESENT_MASK,
"Leaf PTE not PRESENT for gva: 0x%08lx", gva);
- return PTE_GET_PA(*pte) | (gva & ~PAGE_MASK);
+
+ /*
+ * No need for a hugepage mask on the PTE, x86-64 requires the "unused"
+ * address bits to be zero.
+ */
+ return PTE_GET_PA(*pte) | (gva & ~HUGEPAGE_MASK(level));
}
static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt)