summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/aarch64
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/kvm/aarch64')
-rw-r--r--tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c167
-rw-r--r--tools/testing/selftests/kvm/aarch64/arch_timer.c480
-rw-r--r--tools/testing/selftests/kvm/aarch64/debug-exceptions.c607
-rw-r--r--tools/testing/selftests/kvm/aarch64/get-reg-list.c757
-rw-r--r--tools/testing/selftests/kvm/aarch64/hypercalls.c308
-rw-r--r--tools/testing/selftests/kvm/aarch64/page_fault_test.c1136
-rw-r--r--tools/testing/selftests/kvm/aarch64/psci_test.c198
-rw-r--r--tools/testing/selftests/kvm/aarch64/set_id_regs.c481
-rw-r--r--tools/testing/selftests/kvm/aarch64/smccc_filter.c268
-rw-r--r--tools/testing/selftests/kvm/aarch64/vcpu_width_config.c121
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_init.c716
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_irq.c855
-rw-r--r--tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c669
13 files changed, 0 insertions, 6763 deletions
diff --git a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c
deleted file mode 100644
index 8e5bd07a3727..000000000000
--- a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c
+++ /dev/null
@@ -1,167 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aarch32_id_regs - Test for ID register behavior on AArch64-only systems
- *
- * Copyright (c) 2022 Google LLC.
- *
- * Test that KVM handles the AArch64 views of the AArch32 ID registers as RAZ
- * and WI from userspace.
- */
-
-#include <stdint.h>
-
-#include "kvm_util.h"
-#include "processor.h"
-#include "test_util.h"
-#include <linux/bitfield.h>
-
-#define BAD_ID_REG_VAL 0x1badc0deul
-
-#define GUEST_ASSERT_REG_RAZ(reg) GUEST_ASSERT_EQ(read_sysreg_s(reg), 0)
-
-static void guest_main(void)
-{
- GUEST_ASSERT_REG_RAZ(SYS_ID_PFR0_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_PFR1_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_DFR0_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_AFR0_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR0_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR1_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR2_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR3_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR0_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR1_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR2_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR3_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR4_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR5_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR4_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR6_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_MVFR0_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_MVFR1_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_MVFR2_EL1);
- GUEST_ASSERT_REG_RAZ(sys_reg(3, 0, 0, 3, 3));
- GUEST_ASSERT_REG_RAZ(SYS_ID_PFR2_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_DFR1_EL1);
- GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR5_EL1);
- GUEST_ASSERT_REG_RAZ(sys_reg(3, 0, 0, 3, 7));
-
- GUEST_DONE();
-}
-
-static void test_guest_raz(struct kvm_vcpu *vcpu)
-{
- struct ucall uc;
-
- vcpu_run(vcpu);
-
- switch (get_ucall(vcpu, &uc)) {
- case UCALL_ABORT:
- REPORT_GUEST_ASSERT(uc);
- break;
- case UCALL_DONE:
- break;
- default:
- TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
- }
-}
-
-static uint64_t raz_wi_reg_ids[] = {
- KVM_ARM64_SYS_REG(SYS_ID_PFR0_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_PFR1_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_DFR0_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_MMFR0_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_MMFR1_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_MMFR2_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_MMFR3_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_ISAR0_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_ISAR1_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_ISAR2_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_ISAR3_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_ISAR4_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_ISAR5_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_MMFR4_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_ISAR6_EL1),
- KVM_ARM64_SYS_REG(SYS_MVFR0_EL1),
- KVM_ARM64_SYS_REG(SYS_MVFR1_EL1),
- KVM_ARM64_SYS_REG(SYS_MVFR2_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_PFR2_EL1),
- KVM_ARM64_SYS_REG(SYS_ID_MMFR5_EL1),
-};
-
-static void test_user_raz_wi(struct kvm_vcpu *vcpu)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(raz_wi_reg_ids); i++) {
- uint64_t reg_id = raz_wi_reg_ids[i];
- uint64_t val;
-
- vcpu_get_reg(vcpu, reg_id, &val);
- TEST_ASSERT_EQ(val, 0);
-
- /*
- * Expect the ioctl to succeed with no effect on the register
- * value.
- */
- vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
-
- vcpu_get_reg(vcpu, reg_id, &val);
- TEST_ASSERT_EQ(val, 0);
- }
-}
-
-static uint64_t raz_invariant_reg_ids[] = {
- KVM_ARM64_SYS_REG(SYS_ID_AFR0_EL1),
- KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 3)),
- KVM_ARM64_SYS_REG(SYS_ID_DFR1_EL1),
- KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 7)),
-};
-
-static void test_user_raz_invariant(struct kvm_vcpu *vcpu)
-{
- int i, r;
-
- for (i = 0; i < ARRAY_SIZE(raz_invariant_reg_ids); i++) {
- uint64_t reg_id = raz_invariant_reg_ids[i];
- uint64_t val;
-
- vcpu_get_reg(vcpu, reg_id, &val);
- TEST_ASSERT_EQ(val, 0);
-
- r = __vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
- TEST_ASSERT(r < 0 && errno == EINVAL,
- "unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
-
- vcpu_get_reg(vcpu, reg_id, &val);
- TEST_ASSERT_EQ(val, 0);
- }
-}
-
-
-
-static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
-{
- uint64_t val, el0;
-
- vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
-
- el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
- return el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY;
-}
-
-int main(void)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm;
-
- vm = vm_create_with_one_vcpu(&vcpu, guest_main);
-
- TEST_REQUIRE(vcpu_aarch64_only(vcpu));
-
- test_user_raz_wi(vcpu);
- test_user_raz_invariant(vcpu);
- test_guest_raz(vcpu);
-
- kvm_vm_free(vm);
-}
diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c
deleted file mode 100644
index 2cb8dd1f8275..000000000000
--- a/tools/testing/selftests/kvm/aarch64/arch_timer.c
+++ /dev/null
@@ -1,480 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch_timer.c - Tests the aarch64 timer IRQ functionality
- *
- * The test validates both the virtual and physical timer IRQs using
- * CVAL and TVAL registers. This consitutes the four stages in the test.
- * The guest's main thread configures the timer interrupt for a stage
- * and waits for it to fire, with a timeout equal to the timer period.
- * It asserts that the timeout doesn't exceed the timer period.
- *
- * On the other hand, upon receipt of an interrupt, the guest's interrupt
- * handler validates the interrupt by checking if the architectural state
- * is in compliance with the specifications.
- *
- * The test provides command-line options to configure the timer's
- * period (-p), number of vCPUs (-n), and iterations per stage (-i).
- * To stress-test the timer stack even more, an option to migrate the
- * vCPUs across pCPUs (-m), at a particular rate, is also provided.
- *
- * Copyright (c) 2021, Google LLC.
- */
-#define _GNU_SOURCE
-
-#include <stdlib.h>
-#include <pthread.h>
-#include <linux/kvm.h>
-#include <linux/sizes.h>
-#include <linux/bitmap.h>
-#include <sys/sysinfo.h>
-
-#include "kvm_util.h"
-#include "processor.h"
-#include "delay.h"
-#include "arch_timer.h"
-#include "gic.h"
-#include "vgic.h"
-
-#define NR_VCPUS_DEF 4
-#define NR_TEST_ITERS_DEF 5
-#define TIMER_TEST_PERIOD_MS_DEF 10
-#define TIMER_TEST_ERR_MARGIN_US 100
-#define TIMER_TEST_MIGRATION_FREQ_MS 2
-
-struct test_args {
- int nr_vcpus;
- int nr_iter;
- int timer_period_ms;
- int migration_freq_ms;
- struct kvm_arm_counter_offset offset;
-};
-
-static struct test_args test_args = {
- .nr_vcpus = NR_VCPUS_DEF,
- .nr_iter = NR_TEST_ITERS_DEF,
- .timer_period_ms = TIMER_TEST_PERIOD_MS_DEF,
- .migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS,
- .offset = { .reserved = 1 },
-};
-
-#define msecs_to_usecs(msec) ((msec) * 1000LL)
-
-#define GICD_BASE_GPA 0x8000000ULL
-#define GICR_BASE_GPA 0x80A0000ULL
-
-enum guest_stage {
- GUEST_STAGE_VTIMER_CVAL = 1,
- GUEST_STAGE_VTIMER_TVAL,
- GUEST_STAGE_PTIMER_CVAL,
- GUEST_STAGE_PTIMER_TVAL,
- GUEST_STAGE_MAX,
-};
-
-/* Shared variables between host and guest */
-struct test_vcpu_shared_data {
- int nr_iter;
- enum guest_stage guest_stage;
- uint64_t xcnt;
-};
-
-static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
-static pthread_t pt_vcpu_run[KVM_MAX_VCPUS];
-static struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS];
-
-static int vtimer_irq, ptimer_irq;
-
-static unsigned long *vcpu_done_map;
-static pthread_mutex_t vcpu_done_map_lock;
-
-static void
-guest_configure_timer_action(struct test_vcpu_shared_data *shared_data)
-{
- switch (shared_data->guest_stage) {
- case GUEST_STAGE_VTIMER_CVAL:
- timer_set_next_cval_ms(VIRTUAL, test_args.timer_period_ms);
- shared_data->xcnt = timer_get_cntct(VIRTUAL);
- timer_set_ctl(VIRTUAL, CTL_ENABLE);
- break;
- case GUEST_STAGE_VTIMER_TVAL:
- timer_set_next_tval_ms(VIRTUAL, test_args.timer_period_ms);
- shared_data->xcnt = timer_get_cntct(VIRTUAL);
- timer_set_ctl(VIRTUAL, CTL_ENABLE);
- break;
- case GUEST_STAGE_PTIMER_CVAL:
- timer_set_next_cval_ms(PHYSICAL, test_args.timer_period_ms);
- shared_data->xcnt = timer_get_cntct(PHYSICAL);
- timer_set_ctl(PHYSICAL, CTL_ENABLE);
- break;
- case GUEST_STAGE_PTIMER_TVAL:
- timer_set_next_tval_ms(PHYSICAL, test_args.timer_period_ms);
- shared_data->xcnt = timer_get_cntct(PHYSICAL);
- timer_set_ctl(PHYSICAL, CTL_ENABLE);
- break;
- default:
- GUEST_ASSERT(0);
- }
-}
-
-static void guest_validate_irq(unsigned int intid,
- struct test_vcpu_shared_data *shared_data)
-{
- enum guest_stage stage = shared_data->guest_stage;
- uint64_t xcnt = 0, xcnt_diff_us, cval = 0;
- unsigned long xctl = 0;
- unsigned int timer_irq = 0;
- unsigned int accessor;
-
- if (intid == IAR_SPURIOUS)
- return;
-
- switch (stage) {
- case GUEST_STAGE_VTIMER_CVAL:
- case GUEST_STAGE_VTIMER_TVAL:
- accessor = VIRTUAL;
- timer_irq = vtimer_irq;
- break;
- case GUEST_STAGE_PTIMER_CVAL:
- case GUEST_STAGE_PTIMER_TVAL:
- accessor = PHYSICAL;
- timer_irq = ptimer_irq;
- break;
- default:
- GUEST_ASSERT(0);
- return;
- }
-
- xctl = timer_get_ctl(accessor);
- if ((xctl & CTL_IMASK) || !(xctl & CTL_ENABLE))
- return;
-
- timer_set_ctl(accessor, CTL_IMASK);
- xcnt = timer_get_cntct(accessor);
- cval = timer_get_cval(accessor);
-
- xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt);
-
- /* Make sure we are dealing with the correct timer IRQ */
- GUEST_ASSERT_EQ(intid, timer_irq);
-
- /* Basic 'timer condition met' check */
- __GUEST_ASSERT(xcnt >= cval,
- "xcnt = 0x%llx, cval = 0x%llx, xcnt_diff_us = 0x%llx",
- xcnt, cval, xcnt_diff_us);
- __GUEST_ASSERT(xctl & CTL_ISTATUS, "xcnt = 0x%llx", xcnt);
-
- WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
-}
-
-static void guest_irq_handler(struct ex_regs *regs)
-{
- unsigned int intid = gic_get_and_ack_irq();
- uint32_t cpu = guest_get_vcpuid();
- struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
-
- guest_validate_irq(intid, shared_data);
-
- gic_set_eoi(intid);
-}
-
-static void guest_run_stage(struct test_vcpu_shared_data *shared_data,
- enum guest_stage stage)
-{
- uint32_t irq_iter, config_iter;
-
- shared_data->guest_stage = stage;
- shared_data->nr_iter = 0;
-
- for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
- /* Setup the next interrupt */
- guest_configure_timer_action(shared_data);
-
- /* Setup a timeout for the interrupt to arrive */
- udelay(msecs_to_usecs(test_args.timer_period_ms) +
- TIMER_TEST_ERR_MARGIN_US);
-
- irq_iter = READ_ONCE(shared_data->nr_iter);
- GUEST_ASSERT_EQ(config_iter + 1, irq_iter);
- }
-}
-
-static void guest_code(void)
-{
- uint32_t cpu = guest_get_vcpuid();
- struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
-
- local_irq_disable();
-
- gic_init(GIC_V3, test_args.nr_vcpus,
- (void *)GICD_BASE_GPA, (void *)GICR_BASE_GPA);
-
- timer_set_ctl(VIRTUAL, CTL_IMASK);
- timer_set_ctl(PHYSICAL, CTL_IMASK);
-
- gic_irq_enable(vtimer_irq);
- gic_irq_enable(ptimer_irq);
- local_irq_enable();
-
- guest_run_stage(shared_data, GUEST_STAGE_VTIMER_CVAL);
- guest_run_stage(shared_data, GUEST_STAGE_VTIMER_TVAL);
- guest_run_stage(shared_data, GUEST_STAGE_PTIMER_CVAL);
- guest_run_stage(shared_data, GUEST_STAGE_PTIMER_TVAL);
-
- GUEST_DONE();
-}
-
-static void *test_vcpu_run(void *arg)
-{
- unsigned int vcpu_idx = (unsigned long)arg;
- struct ucall uc;
- struct kvm_vcpu *vcpu = vcpus[vcpu_idx];
- struct kvm_vm *vm = vcpu->vm;
- struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx];
-
- vcpu_run(vcpu);
-
- /* Currently, any exit from guest is an indication of completion */
- pthread_mutex_lock(&vcpu_done_map_lock);
- __set_bit(vcpu_idx, vcpu_done_map);
- pthread_mutex_unlock(&vcpu_done_map_lock);
-
- switch (get_ucall(vcpu, &uc)) {
- case UCALL_SYNC:
- case UCALL_DONE:
- break;
- case UCALL_ABORT:
- sync_global_from_guest(vm, *shared_data);
- fprintf(stderr, "Guest assert failed, vcpu %u; stage; %u; iter: %u\n",
- vcpu_idx, shared_data->guest_stage, shared_data->nr_iter);
- REPORT_GUEST_ASSERT(uc);
- break;
- default:
- TEST_FAIL("Unexpected guest exit");
- }
-
- return NULL;
-}
-
-static uint32_t test_get_pcpu(void)
-{
- uint32_t pcpu;
- unsigned int nproc_conf;
- cpu_set_t online_cpuset;
-
- nproc_conf = get_nprocs_conf();
- sched_getaffinity(0, sizeof(cpu_set_t), &online_cpuset);
-
- /* Randomly find an available pCPU to place a vCPU on */
- do {
- pcpu = rand() % nproc_conf;
- } while (!CPU_ISSET(pcpu, &online_cpuset));
-
- return pcpu;
-}
-
-static int test_migrate_vcpu(unsigned int vcpu_idx)
-{
- int ret;
- cpu_set_t cpuset;
- uint32_t new_pcpu = test_get_pcpu();
-
- CPU_ZERO(&cpuset);
- CPU_SET(new_pcpu, &cpuset);
-
- pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu);
-
- ret = pthread_setaffinity_np(pt_vcpu_run[vcpu_idx],
- sizeof(cpuset), &cpuset);
-
- /* Allow the error where the vCPU thread is already finished */
- TEST_ASSERT(ret == 0 || ret == ESRCH,
- "Failed to migrate the vCPU:%u to pCPU: %u; ret: %d",
- vcpu_idx, new_pcpu, ret);
-
- return ret;
-}
-
-static void *test_vcpu_migration(void *arg)
-{
- unsigned int i, n_done;
- bool vcpu_done;
-
- do {
- usleep(msecs_to_usecs(test_args.migration_freq_ms));
-
- for (n_done = 0, i = 0; i < test_args.nr_vcpus; i++) {
- pthread_mutex_lock(&vcpu_done_map_lock);
- vcpu_done = test_bit(i, vcpu_done_map);
- pthread_mutex_unlock(&vcpu_done_map_lock);
-
- if (vcpu_done) {
- n_done++;
- continue;
- }
-
- test_migrate_vcpu(i);
- }
- } while (test_args.nr_vcpus != n_done);
-
- return NULL;
-}
-
-static void test_run(struct kvm_vm *vm)
-{
- pthread_t pt_vcpu_migration;
- unsigned int i;
- int ret;
-
- pthread_mutex_init(&vcpu_done_map_lock, NULL);
- vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus);
- TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap");
-
- for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) {
- ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run,
- (void *)(unsigned long)i);
- TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread", i);
- }
-
- /* Spawn a thread to control the vCPU migrations */
- if (test_args.migration_freq_ms) {
- srand(time(NULL));
-
- ret = pthread_create(&pt_vcpu_migration, NULL,
- test_vcpu_migration, NULL);
- TEST_ASSERT(!ret, "Failed to create the migration pthread");
- }
-
-
- for (i = 0; i < test_args.nr_vcpus; i++)
- pthread_join(pt_vcpu_run[i], NULL);
-
- if (test_args.migration_freq_ms)
- pthread_join(pt_vcpu_migration, NULL);
-
- bitmap_free(vcpu_done_map);
-}
-
-static void test_init_timer_irq(struct kvm_vm *vm)
-{
- /* Timer initid should be same for all the vCPUs, so query only vCPU-0 */
- vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
- KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq);
- vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
- KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq);
-
- sync_global_to_guest(vm, ptimer_irq);
- sync_global_to_guest(vm, vtimer_irq);
-
- pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
-}
-
-static int gic_fd;
-
-static struct kvm_vm *test_vm_create(void)
-{
- struct kvm_vm *vm;
- unsigned int i;
- int nr_vcpus = test_args.nr_vcpus;
-
- vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
-
- vm_init_descriptor_tables(vm);
- vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler);
-
- if (!test_args.offset.reserved) {
- if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET))
- vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &test_args.offset);
- else
- TEST_FAIL("no support for global offset");
- }
-
- for (i = 0; i < nr_vcpus; i++)
- vcpu_init_descriptor_tables(vcpus[i]);
-
- test_init_timer_irq(vm);
- gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
- __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3");
-
- /* Make all the test's cmdline args visible to the guest */
- sync_global_to_guest(vm, test_args);
-
- return vm;
-}
-
-static void test_vm_cleanup(struct kvm_vm *vm)
-{
- close(gic_fd);
- kvm_vm_free(vm);
-}
-
-static void test_print_help(char *name)
-{
- pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n",
- name);
- pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n",
- NR_VCPUS_DEF, KVM_MAX_VCPUS);
- pr_info("\t-i: Number of iterations per stage (default: %u)\n",
- NR_TEST_ITERS_DEF);
- pr_info("\t-p: Periodicity (in ms) of the guest timer (default: %u)\n",
- TIMER_TEST_PERIOD_MS_DEF);
- pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n",
- TIMER_TEST_MIGRATION_FREQ_MS);
- pr_info("\t-o: Counter offset (in counter cycles, default: 0)\n");
- pr_info("\t-h: print this help screen\n");
-}
-
-static bool parse_args(int argc, char *argv[])
-{
- int opt;
-
- while ((opt = getopt(argc, argv, "hn:i:p:m:o:")) != -1) {
- switch (opt) {
- case 'n':
- test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
- if (test_args.nr_vcpus > KVM_MAX_VCPUS) {
- pr_info("Max allowed vCPUs: %u\n",
- KVM_MAX_VCPUS);
- goto err;
- }
- break;
- case 'i':
- test_args.nr_iter = atoi_positive("Number of iterations", optarg);
- break;
- case 'p':
- test_args.timer_period_ms = atoi_positive("Periodicity", optarg);
- break;
- case 'm':
- test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg);
- break;
- case 'o':
- test_args.offset.counter_offset = strtol(optarg, NULL, 0);
- test_args.offset.reserved = 0;
- break;
- case 'h':
- default:
- goto err;
- }
- }
-
- return true;
-
-err:
- test_print_help(argv[0]);
- return false;
-}
-
-int main(int argc, char *argv[])
-{
- struct kvm_vm *vm;
-
- if (!parse_args(argc, argv))
- exit(KSFT_SKIP);
-
- __TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2,
- "At least two physical CPUs needed for vCPU migration");
-
- vm = test_vm_create();
- test_run(vm);
- test_vm_cleanup(vm);
-
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
deleted file mode 100644
index 866002917441..000000000000
--- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
+++ /dev/null
@@ -1,607 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <test_util.h>
-#include <kvm_util.h>
-#include <processor.h>
-#include <linux/bitfield.h>
-
-#define MDSCR_KDE (1 << 13)
-#define MDSCR_MDE (1 << 15)
-#define MDSCR_SS (1 << 0)
-
-#define DBGBCR_LEN8 (0xff << 5)
-#define DBGBCR_EXEC (0x0 << 3)
-#define DBGBCR_EL1 (0x1 << 1)
-#define DBGBCR_E (0x1 << 0)
-#define DBGBCR_LBN_SHIFT 16
-#define DBGBCR_BT_SHIFT 20
-#define DBGBCR_BT_ADDR_LINK_CTX (0x1 << DBGBCR_BT_SHIFT)
-#define DBGBCR_BT_CTX_LINK (0x3 << DBGBCR_BT_SHIFT)
-
-#define DBGWCR_LEN8 (0xff << 5)
-#define DBGWCR_RD (0x1 << 3)
-#define DBGWCR_WR (0x2 << 3)
-#define DBGWCR_EL1 (0x1 << 1)
-#define DBGWCR_E (0x1 << 0)
-#define DBGWCR_LBN_SHIFT 16
-#define DBGWCR_WT_SHIFT 20
-#define DBGWCR_WT_LINK (0x1 << DBGWCR_WT_SHIFT)
-
-#define SPSR_D (1 << 9)
-#define SPSR_SS (1 << 21)
-
-extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start, hw_bp_ctx;
-extern unsigned char iter_ss_begin, iter_ss_end;
-static volatile uint64_t sw_bp_addr, hw_bp_addr;
-static volatile uint64_t wp_addr, wp_data_addr;
-static volatile uint64_t svc_addr;
-static volatile uint64_t ss_addr[4], ss_idx;
-#define PC(v) ((uint64_t)&(v))
-
-#define GEN_DEBUG_WRITE_REG(reg_name) \
-static void write_##reg_name(int num, uint64_t val) \
-{ \
- switch (num) { \
- case 0: \
- write_sysreg(val, reg_name##0_el1); \
- break; \
- case 1: \
- write_sysreg(val, reg_name##1_el1); \
- break; \
- case 2: \
- write_sysreg(val, reg_name##2_el1); \
- break; \
- case 3: \
- write_sysreg(val, reg_name##3_el1); \
- break; \
- case 4: \
- write_sysreg(val, reg_name##4_el1); \
- break; \
- case 5: \
- write_sysreg(val, reg_name##5_el1); \
- break; \
- case 6: \
- write_sysreg(val, reg_name##6_el1); \
- break; \
- case 7: \
- write_sysreg(val, reg_name##7_el1); \
- break; \
- case 8: \
- write_sysreg(val, reg_name##8_el1); \
- break; \
- case 9: \
- write_sysreg(val, reg_name##9_el1); \
- break; \
- case 10: \
- write_sysreg(val, reg_name##10_el1); \
- break; \
- case 11: \
- write_sysreg(val, reg_name##11_el1); \
- break; \
- case 12: \
- write_sysreg(val, reg_name##12_el1); \
- break; \
- case 13: \
- write_sysreg(val, reg_name##13_el1); \
- break; \
- case 14: \
- write_sysreg(val, reg_name##14_el1); \
- break; \
- case 15: \
- write_sysreg(val, reg_name##15_el1); \
- break; \
- default: \
- GUEST_ASSERT(0); \
- } \
-}
-
-/* Define write_dbgbcr()/write_dbgbvr()/write_dbgwcr()/write_dbgwvr() */
-GEN_DEBUG_WRITE_REG(dbgbcr)
-GEN_DEBUG_WRITE_REG(dbgbvr)
-GEN_DEBUG_WRITE_REG(dbgwcr)
-GEN_DEBUG_WRITE_REG(dbgwvr)
-
-static void reset_debug_state(void)
-{
- uint8_t brps, wrps, i;
- uint64_t dfr0;
-
- asm volatile("msr daifset, #8");
-
- write_sysreg(0, osdlr_el1);
- write_sysreg(0, oslar_el1);
- isb();
-
- write_sysreg(0, mdscr_el1);
- write_sysreg(0, contextidr_el1);
-
- /* Reset all bcr/bvr/wcr/wvr registers */
- dfr0 = read_sysreg(id_aa64dfr0_el1);
- brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), dfr0);
- for (i = 0; i <= brps; i++) {
- write_dbgbcr(i, 0);
- write_dbgbvr(i, 0);
- }
- wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), dfr0);
- for (i = 0; i <= wrps; i++) {
- write_dbgwcr(i, 0);
- write_dbgwvr(i, 0);
- }
-
- isb();
-}
-
-static void enable_os_lock(void)
-{
- write_sysreg(1, oslar_el1);
- isb();
-
- GUEST_ASSERT(read_sysreg(oslsr_el1) & 2);
-}
-
-static void enable_monitor_debug_exceptions(void)
-{
- uint32_t mdscr;
-
- asm volatile("msr daifclr, #8");
-
- mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_MDE;
- write_sysreg(mdscr, mdscr_el1);
- isb();
-}
-
-static void install_wp(uint8_t wpn, uint64_t addr)
-{
- uint32_t wcr;
-
- wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E;
- write_dbgwcr(wpn, wcr);
- write_dbgwvr(wpn, addr);
-
- isb();
-
- enable_monitor_debug_exceptions();
-}
-
-static void install_hw_bp(uint8_t bpn, uint64_t addr)
-{
- uint32_t bcr;
-
- bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E;
- write_dbgbcr(bpn, bcr);
- write_dbgbvr(bpn, addr);
- isb();
-
- enable_monitor_debug_exceptions();
-}
-
-static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr,
- uint64_t ctx)
-{
- uint32_t wcr;
- uint64_t ctx_bcr;
-
- /* Setup a context-aware breakpoint for Linked Context ID Match */
- ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
- DBGBCR_BT_CTX_LINK;
- write_dbgbcr(ctx_bp, ctx_bcr);
- write_dbgbvr(ctx_bp, ctx);
-
- /* Setup a linked watchpoint (linked to the context-aware breakpoint) */
- wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E |
- DBGWCR_WT_LINK | ((uint32_t)ctx_bp << DBGWCR_LBN_SHIFT);
- write_dbgwcr(addr_wp, wcr);
- write_dbgwvr(addr_wp, addr);
- isb();
-
- enable_monitor_debug_exceptions();
-}
-
-void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr,
- uint64_t ctx)
-{
- uint32_t addr_bcr, ctx_bcr;
-
- /* Setup a context-aware breakpoint for Linked Context ID Match */
- ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
- DBGBCR_BT_CTX_LINK;
- write_dbgbcr(ctx_bp, ctx_bcr);
- write_dbgbvr(ctx_bp, ctx);
-
- /*
- * Setup a normal breakpoint for Linked Address Match, and link it
- * to the context-aware breakpoint.
- */
- addr_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
- DBGBCR_BT_ADDR_LINK_CTX |
- ((uint32_t)ctx_bp << DBGBCR_LBN_SHIFT);
- write_dbgbcr(addr_bp, addr_bcr);
- write_dbgbvr(addr_bp, addr);
- isb();
-
- enable_monitor_debug_exceptions();
-}
-
-static void install_ss(void)
-{
- uint32_t mdscr;
-
- asm volatile("msr daifclr, #8");
-
- mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_SS;
- write_sysreg(mdscr, mdscr_el1);
- isb();
-}
-
-static volatile char write_data;
-
-static void guest_code(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
-{
- uint64_t ctx = 0xabcdef; /* a random context number */
-
- /* Software-breakpoint */
- reset_debug_state();
- asm volatile("sw_bp: brk #0");
- GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp));
-
- /* Hardware-breakpoint */
- reset_debug_state();
- install_hw_bp(bpn, PC(hw_bp));
- asm volatile("hw_bp: nop");
- GUEST_ASSERT_EQ(hw_bp_addr, PC(hw_bp));
-
- /* Hardware-breakpoint + svc */
- reset_debug_state();
- install_hw_bp(bpn, PC(bp_svc));
- asm volatile("bp_svc: svc #0");
- GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_svc));
- GUEST_ASSERT_EQ(svc_addr, PC(bp_svc) + 4);
-
- /* Hardware-breakpoint + software-breakpoint */
- reset_debug_state();
- install_hw_bp(bpn, PC(bp_brk));
- asm volatile("bp_brk: brk #0");
- GUEST_ASSERT_EQ(sw_bp_addr, PC(bp_brk));
- GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_brk));
-
- /* Watchpoint */
- reset_debug_state();
- install_wp(wpn, PC(write_data));
- write_data = 'x';
- GUEST_ASSERT_EQ(write_data, 'x');
- GUEST_ASSERT_EQ(wp_data_addr, PC(write_data));
-
- /* Single-step */
- reset_debug_state();
- install_ss();
- ss_idx = 0;
- asm volatile("ss_start:\n"
- "mrs x0, esr_el1\n"
- "add x0, x0, #1\n"
- "msr daifset, #8\n"
- : : : "x0");
- GUEST_ASSERT_EQ(ss_addr[0], PC(ss_start));
- GUEST_ASSERT_EQ(ss_addr[1], PC(ss_start) + 4);
- GUEST_ASSERT_EQ(ss_addr[2], PC(ss_start) + 8);
-
- /* OS Lock does not block software-breakpoint */
- reset_debug_state();
- enable_os_lock();
- sw_bp_addr = 0;
- asm volatile("sw_bp2: brk #0");
- GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp2));
-
- /* OS Lock blocking hardware-breakpoint */
- reset_debug_state();
- enable_os_lock();
- install_hw_bp(bpn, PC(hw_bp2));
- hw_bp_addr = 0;
- asm volatile("hw_bp2: nop");
- GUEST_ASSERT_EQ(hw_bp_addr, 0);
-
- /* OS Lock blocking watchpoint */
- reset_debug_state();
- enable_os_lock();
- write_data = '\0';
- wp_data_addr = 0;
- install_wp(wpn, PC(write_data));
- write_data = 'x';
- GUEST_ASSERT_EQ(write_data, 'x');
- GUEST_ASSERT_EQ(wp_data_addr, 0);
-
- /* OS Lock blocking single-step */
- reset_debug_state();
- enable_os_lock();
- ss_addr[0] = 0;
- install_ss();
- ss_idx = 0;
- asm volatile("mrs x0, esr_el1\n\t"
- "add x0, x0, #1\n\t"
- "msr daifset, #8\n\t"
- : : : "x0");
- GUEST_ASSERT_EQ(ss_addr[0], 0);
-
- /* Linked hardware-breakpoint */
- hw_bp_addr = 0;
- reset_debug_state();
- install_hw_bp_ctx(bpn, ctx_bpn, PC(hw_bp_ctx), ctx);
- /* Set context id */
- write_sysreg(ctx, contextidr_el1);
- isb();
- asm volatile("hw_bp_ctx: nop");
- write_sysreg(0, contextidr_el1);
- GUEST_ASSERT_EQ(hw_bp_addr, PC(hw_bp_ctx));
-
- /* Linked watchpoint */
- reset_debug_state();
- install_wp_ctx(wpn, ctx_bpn, PC(write_data), ctx);
- /* Set context id */
- write_sysreg(ctx, contextidr_el1);
- isb();
- write_data = 'x';
- GUEST_ASSERT_EQ(write_data, 'x');
- GUEST_ASSERT_EQ(wp_data_addr, PC(write_data));
-
- GUEST_DONE();
-}
-
-static void guest_sw_bp_handler(struct ex_regs *regs)
-{
- sw_bp_addr = regs->pc;
- regs->pc += 4;
-}
-
-static void guest_hw_bp_handler(struct ex_regs *regs)
-{
- hw_bp_addr = regs->pc;
- regs->pstate |= SPSR_D;
-}
-
-static void guest_wp_handler(struct ex_regs *regs)
-{
- wp_data_addr = read_sysreg(far_el1);
- wp_addr = regs->pc;
- regs->pstate |= SPSR_D;
-}
-
-static void guest_ss_handler(struct ex_regs *regs)
-{
- __GUEST_ASSERT(ss_idx < 4, "Expected index < 4, got '%u'", ss_idx);
- ss_addr[ss_idx++] = regs->pc;
- regs->pstate |= SPSR_SS;
-}
-
-static void guest_svc_handler(struct ex_regs *regs)
-{
- svc_addr = regs->pc;
-}
-
-static void guest_code_ss(int test_cnt)
-{
- uint64_t i;
- uint64_t bvr, wvr, w_bvr, w_wvr;
-
- for (i = 0; i < test_cnt; i++) {
- /* Bits [1:0] of dbg{b,w}vr are RES0 */
- w_bvr = i << 2;
- w_wvr = i << 2;
-
- /*
- * Enable Single Step execution. Note! This _must_ be a bare
- * ucall as the ucall() path uses atomic operations to manage
- * the ucall structures, and the built-in "atomics" are usually
- * implemented via exclusive access instructions. The exlusive
- * monitor is cleared on ERET, and so taking debug exceptions
- * during a LDREX=>STREX sequence will prevent forward progress
- * and hang the guest/test.
- */
- GUEST_UCALL_NONE();
-
- /*
- * The userspace will verify that the pc is as expected during
- * single step execution between iter_ss_begin and iter_ss_end.
- */
- asm volatile("iter_ss_begin:nop\n");
-
- write_sysreg(w_bvr, dbgbvr0_el1);
- write_sysreg(w_wvr, dbgwvr0_el1);
- bvr = read_sysreg(dbgbvr0_el1);
- wvr = read_sysreg(dbgwvr0_el1);
-
- /* Userspace disables Single Step when the end is nigh. */
- asm volatile("iter_ss_end:\n");
-
- GUEST_ASSERT_EQ(bvr, w_bvr);
- GUEST_ASSERT_EQ(wvr, w_wvr);
- }
- GUEST_DONE();
-}
-
-static int debug_version(uint64_t id_aa64dfr0)
-{
- return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), id_aa64dfr0);
-}
-
-static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm;
- struct ucall uc;
-
- vm = vm_create_with_one_vcpu(&vcpu, guest_code);
-
- vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vcpu);
-
- vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
- ESR_EC_BRK_INS, guest_sw_bp_handler);
- vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
- ESR_EC_HW_BP_CURRENT, guest_hw_bp_handler);
- vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
- ESR_EC_WP_CURRENT, guest_wp_handler);
- vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
- ESR_EC_SSTEP_CURRENT, guest_ss_handler);
- vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
- ESR_EC_SVC64, guest_svc_handler);
-
- /* Specify bpn/wpn/ctx_bpn to be tested */
- vcpu_args_set(vcpu, 3, bpn, wpn, ctx_bpn);
- pr_debug("Use bpn#%d, wpn#%d and ctx_bpn#%d\n", bpn, wpn, ctx_bpn);
-
- vcpu_run(vcpu);
- switch (get_ucall(vcpu, &uc)) {
- case UCALL_ABORT:
- REPORT_GUEST_ASSERT(uc);
- break;
- case UCALL_DONE:
- goto done;
- default:
- TEST_FAIL("Unknown ucall %lu", uc.cmd);
- }
-
-done:
- kvm_vm_free(vm);
-}
-
-void test_single_step_from_userspace(int test_cnt)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm;
- struct ucall uc;
- struct kvm_run *run;
- uint64_t pc, cmd;
- uint64_t test_pc = 0;
- bool ss_enable = false;
- struct kvm_guest_debug debug = {};
-
- vm = vm_create_with_one_vcpu(&vcpu, guest_code_ss);
- run = vcpu->run;
- vcpu_args_set(vcpu, 1, test_cnt);
-
- while (1) {
- vcpu_run(vcpu);
- if (run->exit_reason != KVM_EXIT_DEBUG) {
- cmd = get_ucall(vcpu, &uc);
- if (cmd == UCALL_ABORT) {
- REPORT_GUEST_ASSERT(uc);
- /* NOT REACHED */
- } else if (cmd == UCALL_DONE) {
- break;
- }
-
- TEST_ASSERT(cmd == UCALL_NONE,
- "Unexpected ucall cmd 0x%lx", cmd);
-
- debug.control = KVM_GUESTDBG_ENABLE |
- KVM_GUESTDBG_SINGLESTEP;
- ss_enable = true;
- vcpu_guest_debug_set(vcpu, &debug);
- continue;
- }
-
- TEST_ASSERT(ss_enable, "Unexpected KVM_EXIT_DEBUG");
-
- /* Check if the current pc is expected. */
- vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
- TEST_ASSERT(!test_pc || pc == test_pc,
- "Unexpected pc 0x%lx (expected 0x%lx)",
- pc, test_pc);
-
- if ((pc + 4) == (uint64_t)&iter_ss_end) {
- test_pc = 0;
- debug.control = KVM_GUESTDBG_ENABLE;
- ss_enable = false;
- vcpu_guest_debug_set(vcpu, &debug);
- continue;
- }
-
- /*
- * If the current pc is between iter_ss_bgin and
- * iter_ss_end, the pc for the next KVM_EXIT_DEBUG should
- * be the current pc + 4.
- */
- if ((pc >= (uint64_t)&iter_ss_begin) &&
- (pc < (uint64_t)&iter_ss_end))
- test_pc = pc + 4;
- else
- test_pc = 0;
- }
-
- kvm_vm_free(vm);
-}
-
-/*
- * Run debug testing using the various breakpoint#, watchpoint# and
- * context-aware breakpoint# with the given ID_AA64DFR0_EL1 configuration.
- */
-void test_guest_debug_exceptions_all(uint64_t aa64dfr0)
-{
- uint8_t brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base;
- int b, w, c;
-
- /* Number of breakpoints */
- brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), aa64dfr0) + 1;
- __TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required");
-
- /* Number of watchpoints */
- wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), aa64dfr0) + 1;
-
- /* Number of context aware breakpoints */
- ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_CTX_CMPs), aa64dfr0) + 1;
-
- pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__,
- brp_num, wrp_num, ctx_brp_num);
-
- /* Number of normal (non-context aware) breakpoints */
- normal_brp_num = brp_num - ctx_brp_num;
-
- /* Lowest context aware breakpoint number */
- ctx_brp_base = normal_brp_num;
-
- /* Run tests with all supported breakpoints/watchpoints */
- for (c = ctx_brp_base; c < ctx_brp_base + ctx_brp_num; c++) {
- for (b = 0; b < normal_brp_num; b++) {
- for (w = 0; w < wrp_num; w++)
- test_guest_debug_exceptions(b, w, c);
- }
- }
-}
-
-static void help(char *name)
-{
- puts("");
- printf("Usage: %s [-h] [-i iterations of the single step test]\n", name);
- puts("");
- exit(0);
-}
-
-int main(int argc, char *argv[])
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm;
- int opt;
- int ss_iteration = 10000;
- uint64_t aa64dfr0;
-
- vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &aa64dfr0);
- __TEST_REQUIRE(debug_version(aa64dfr0) >= 6,
- "Armv8 debug architecture not supported.");
- kvm_vm_free(vm);
-
- while ((opt = getopt(argc, argv, "i:")) != -1) {
- switch (opt) {
- case 'i':
- ss_iteration = atoi_positive("Number of iterations", optarg);
- break;
- case 'h':
- default:
- help(argv[0]);
- break;
- }
- }
-
- test_guest_debug_exceptions_all(aa64dfr0);
- test_single_step_from_userspace(ss_iteration);
-
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
deleted file mode 100644
index 709d7d721760..000000000000
--- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c
+++ /dev/null
@@ -1,757 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Check for KVM_GET_REG_LIST regressions.
- *
- * Copyright (C) 2020, Red Hat, Inc.
- *
- * While the blessed list should be created from the oldest possible
- * kernel, we can't go older than v5.2, though, because that's the first
- * release which includes df205b5c6328 ("KVM: arm64: Filter out invalid
- * core register IDs in KVM_GET_REG_LIST"). Without that commit the core
- * registers won't match expectations.
- */
-#include <stdio.h>
-#include "kvm_util.h"
-#include "test_util.h"
-#include "processor.h"
-
-struct feature_id_reg {
- __u64 reg;
- __u64 id_reg;
- __u64 feat_shift;
- __u64 feat_min;
-};
-
-static struct feature_id_reg feat_id_regs[] = {
- {
- ARM64_SYS_REG(3, 0, 2, 0, 3), /* TCR2_EL1 */
- ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
- 0,
- 1
- },
- {
- ARM64_SYS_REG(3, 0, 10, 2, 2), /* PIRE0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
- 4,
- 1
- },
- {
- ARM64_SYS_REG(3, 0, 10, 2, 3), /* PIR_EL1 */
- ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
- 4,
- 1
- }
-};
-
-bool filter_reg(__u64 reg)
-{
- /*
- * DEMUX register presence depends on the host's CLIDR_EL1.
- * This means there's no set of them that we can bless.
- */
- if ((reg & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
- return true;
-
- return false;
-}
-
-static bool check_supported_feat_reg(struct kvm_vcpu *vcpu, __u64 reg)
-{
- int i, ret;
- __u64 data, feat_val;
-
- for (i = 0; i < ARRAY_SIZE(feat_id_regs); i++) {
- if (feat_id_regs[i].reg == reg) {
- ret = __vcpu_get_reg(vcpu, feat_id_regs[i].id_reg, &data);
- if (ret < 0)
- return false;
-
- feat_val = ((data >> feat_id_regs[i].feat_shift) & 0xf);
- return feat_val >= feat_id_regs[i].feat_min;
- }
- }
-
- return true;
-}
-
-bool check_supported_reg(struct kvm_vcpu *vcpu, __u64 reg)
-{
- return check_supported_feat_reg(vcpu, reg);
-}
-
-bool check_reject_set(int err)
-{
- return err == EPERM;
-}
-
-void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
-{
- struct vcpu_reg_sublist *s;
- int feature;
-
- for_each_sublist(c, s) {
- if (s->finalize) {
- feature = s->feature;
- vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
- }
- }
-}
-
-#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
-
-#define CORE_REGS_XX_NR_WORDS 2
-#define CORE_SPSR_XX_NR_WORDS 2
-#define CORE_FPREGS_XX_NR_WORDS 4
-
-static const char *core_id_to_str(const char *prefix, __u64 id)
-{
- __u64 core_off = id & ~REG_MASK, idx;
-
- /*
- * core_off is the offset into struct kvm_regs
- */
- switch (core_off) {
- case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
- KVM_REG_ARM_CORE_REG(regs.regs[30]):
- idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
- TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", prefix, idx);
- return strdup_printf("KVM_REG_ARM_CORE_REG(regs.regs[%lld])", idx);
- case KVM_REG_ARM_CORE_REG(regs.sp):
- return "KVM_REG_ARM_CORE_REG(regs.sp)";
- case KVM_REG_ARM_CORE_REG(regs.pc):
- return "KVM_REG_ARM_CORE_REG(regs.pc)";
- case KVM_REG_ARM_CORE_REG(regs.pstate):
- return "KVM_REG_ARM_CORE_REG(regs.pstate)";
- case KVM_REG_ARM_CORE_REG(sp_el1):
- return "KVM_REG_ARM_CORE_REG(sp_el1)";
- case KVM_REG_ARM_CORE_REG(elr_el1):
- return "KVM_REG_ARM_CORE_REG(elr_el1)";
- case KVM_REG_ARM_CORE_REG(spsr[0]) ...
- KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
- idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
- TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", prefix, idx);
- return strdup_printf("KVM_REG_ARM_CORE_REG(spsr[%lld])", idx);
- case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
- KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
- idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
- TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", prefix, idx);
- return strdup_printf("KVM_REG_ARM_CORE_REG(fp_regs.vregs[%lld])", idx);
- case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
- return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
- case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
- return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)";
- }
-
- TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id);
- return NULL;
-}
-
-static const char *sve_id_to_str(const char *prefix, __u64 id)
-{
- __u64 sve_off, n, i;
-
- if (id == KVM_REG_ARM64_SVE_VLS)
- return "KVM_REG_ARM64_SVE_VLS";
-
- sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1));
- i = id & (KVM_ARM64_SVE_MAX_SLICES - 1);
-
- TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", prefix, id);
-
- switch (sve_off) {
- case KVM_REG_ARM64_SVE_ZREG_BASE ...
- KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1:
- n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
- TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
- "%s: Unexpected bits set in SVE ZREG id: 0x%llx", prefix, id);
- return strdup_printf("KVM_REG_ARM64_SVE_ZREG(%lld, 0)", n);
- case KVM_REG_ARM64_SVE_PREG_BASE ...
- KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
- n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
- TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
- "%s: Unexpected bits set in SVE PREG id: 0x%llx", prefix, id);
- return strdup_printf("KVM_REG_ARM64_SVE_PREG(%lld, 0)", n);
- case KVM_REG_ARM64_SVE_FFR_BASE:
- TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
- "%s: Unexpected bits set in SVE FFR id: 0x%llx", prefix, id);
- return "KVM_REG_ARM64_SVE_FFR(0)";
- }
-
- return NULL;
-}
-
-void print_reg(const char *prefix, __u64 id)
-{
- unsigned op0, op1, crn, crm, op2;
- const char *reg_size = NULL;
-
- TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64,
- "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", prefix, id);
-
- switch (id & KVM_REG_SIZE_MASK) {
- case KVM_REG_SIZE_U8:
- reg_size = "KVM_REG_SIZE_U8";
- break;
- case KVM_REG_SIZE_U16:
- reg_size = "KVM_REG_SIZE_U16";
- break;
- case KVM_REG_SIZE_U32:
- reg_size = "KVM_REG_SIZE_U32";
- break;
- case KVM_REG_SIZE_U64:
- reg_size = "KVM_REG_SIZE_U64";
- break;
- case KVM_REG_SIZE_U128:
- reg_size = "KVM_REG_SIZE_U128";
- break;
- case KVM_REG_SIZE_U256:
- reg_size = "KVM_REG_SIZE_U256";
- break;
- case KVM_REG_SIZE_U512:
- reg_size = "KVM_REG_SIZE_U512";
- break;
- case KVM_REG_SIZE_U1024:
- reg_size = "KVM_REG_SIZE_U1024";
- break;
- case KVM_REG_SIZE_U2048:
- reg_size = "KVM_REG_SIZE_U2048";
- break;
- default:
- TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
- prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
- }
-
- switch (id & KVM_REG_ARM_COPROC_MASK) {
- case KVM_REG_ARM_CORE:
- printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(prefix, id));
- break;
- case KVM_REG_ARM_DEMUX:
- TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)),
- "%s: Unexpected bits set in DEMUX reg id: 0x%llx", prefix, id);
- printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n",
- reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK);
- break;
- case KVM_REG_ARM64_SYSREG:
- op0 = (id & KVM_REG_ARM64_SYSREG_OP0_MASK) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT;
- op1 = (id & KVM_REG_ARM64_SYSREG_OP1_MASK) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT;
- crn = (id & KVM_REG_ARM64_SYSREG_CRN_MASK) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT;
- crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT;
- op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT;
- TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2),
- "%s: Unexpected bits set in SYSREG reg id: 0x%llx", prefix, id);
- printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2);
- break;
- case KVM_REG_ARM_FW:
- TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff),
- "%s: Unexpected bits set in FW reg id: 0x%llx", prefix, id);
- printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
- break;
- case KVM_REG_ARM_FW_FEAT_BMAP:
- TEST_ASSERT(id == KVM_REG_ARM_FW_FEAT_BMAP_REG(id & 0xffff),
- "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", prefix, id);
- printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff);
- break;
- case KVM_REG_ARM64_SVE:
- printf("\t%s,\n", sve_id_to_str(prefix, id));
- break;
- default:
- TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx",
- prefix, (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
- }
-}
-
-/*
- * The original blessed list was primed with the output of kernel version
- * v4.15 with --core-reg-fixup and then later updated with new registers.
- * (The --core-reg-fixup option and it's fixup function have been removed
- * from the test, as it's unlikely to use this type of test on a kernel
- * older than v5.2.)
- *
- * The blessed list is up to date with kernel version v6.4 (or so we hope)
- */
-static __u64 base_regs[] = {
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[1]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[2]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[3]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[4]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[5]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[6]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[7]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[8]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[9]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[10]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[11]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[12]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[13]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[14]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[15]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[16]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[17]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[18]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[19]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[20]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[21]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[22]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[23]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[24]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[25]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[26]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[27]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[28]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[29]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[30]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.sp),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pc),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pstate),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(sp_el1),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(elr_el1),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[0]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[1]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[2]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[3]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
- KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
- KVM_REG_ARM_FW_REG(0), /* KVM_REG_ARM_PSCI_VERSION */
- KVM_REG_ARM_FW_REG(1), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */
- KVM_REG_ARM_FW_REG(2), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */
- KVM_REG_ARM_FW_REG(3), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */
- KVM_REG_ARM_FW_FEAT_BMAP_REG(0), /* KVM_REG_ARM_STD_BMAP */
- KVM_REG_ARM_FW_FEAT_BMAP_REG(1), /* KVM_REG_ARM_STD_HYP_BMAP */
- KVM_REG_ARM_FW_FEAT_BMAP_REG(2), /* KVM_REG_ARM_VENDOR_HYP_BMAP */
- ARM64_SYS_REG(3, 3, 14, 3, 1), /* CNTV_CTL_EL0 */
- ARM64_SYS_REG(3, 3, 14, 3, 2), /* CNTV_CVAL_EL0 */
- ARM64_SYS_REG(3, 3, 14, 0, 2),
- ARM64_SYS_REG(3, 0, 0, 0, 0), /* MIDR_EL1 */
- ARM64_SYS_REG(3, 0, 0, 0, 6), /* REVIDR_EL1 */
- ARM64_SYS_REG(3, 1, 0, 0, 1), /* CLIDR_EL1 */
- ARM64_SYS_REG(3, 1, 0, 0, 7), /* AIDR_EL1 */
- ARM64_SYS_REG(3, 3, 0, 0, 1), /* CTR_EL0 */
- ARM64_SYS_REG(2, 0, 0, 0, 4),
- ARM64_SYS_REG(2, 0, 0, 0, 5),
- ARM64_SYS_REG(2, 0, 0, 0, 6),
- ARM64_SYS_REG(2, 0, 0, 0, 7),
- ARM64_SYS_REG(2, 0, 0, 1, 4),
- ARM64_SYS_REG(2, 0, 0, 1, 5),
- ARM64_SYS_REG(2, 0, 0, 1, 6),
- ARM64_SYS_REG(2, 0, 0, 1, 7),
- ARM64_SYS_REG(2, 0, 0, 2, 0), /* MDCCINT_EL1 */
- ARM64_SYS_REG(2, 0, 0, 2, 2), /* MDSCR_EL1 */
- ARM64_SYS_REG(2, 0, 0, 2, 4),
- ARM64_SYS_REG(2, 0, 0, 2, 5),
- ARM64_SYS_REG(2, 0, 0, 2, 6),
- ARM64_SYS_REG(2, 0, 0, 2, 7),
- ARM64_SYS_REG(2, 0, 0, 3, 4),
- ARM64_SYS_REG(2, 0, 0, 3, 5),
- ARM64_SYS_REG(2, 0, 0, 3, 6),
- ARM64_SYS_REG(2, 0, 0, 3, 7),
- ARM64_SYS_REG(2, 0, 0, 4, 4),
- ARM64_SYS_REG(2, 0, 0, 4, 5),
- ARM64_SYS_REG(2, 0, 0, 4, 6),
- ARM64_SYS_REG(2, 0, 0, 4, 7),
- ARM64_SYS_REG(2, 0, 0, 5, 4),
- ARM64_SYS_REG(2, 0, 0, 5, 5),
- ARM64_SYS_REG(2, 0, 0, 5, 6),
- ARM64_SYS_REG(2, 0, 0, 5, 7),
- ARM64_SYS_REG(2, 0, 0, 6, 4),
- ARM64_SYS_REG(2, 0, 0, 6, 5),
- ARM64_SYS_REG(2, 0, 0, 6, 6),
- ARM64_SYS_REG(2, 0, 0, 6, 7),
- ARM64_SYS_REG(2, 0, 0, 7, 4),
- ARM64_SYS_REG(2, 0, 0, 7, 5),
- ARM64_SYS_REG(2, 0, 0, 7, 6),
- ARM64_SYS_REG(2, 0, 0, 7, 7),
- ARM64_SYS_REG(2, 0, 0, 8, 4),
- ARM64_SYS_REG(2, 0, 0, 8, 5),
- ARM64_SYS_REG(2, 0, 0, 8, 6),
- ARM64_SYS_REG(2, 0, 0, 8, 7),
- ARM64_SYS_REG(2, 0, 0, 9, 4),
- ARM64_SYS_REG(2, 0, 0, 9, 5),
- ARM64_SYS_REG(2, 0, 0, 9, 6),
- ARM64_SYS_REG(2, 0, 0, 9, 7),
- ARM64_SYS_REG(2, 0, 0, 10, 4),
- ARM64_SYS_REG(2, 0, 0, 10, 5),
- ARM64_SYS_REG(2, 0, 0, 10, 6),
- ARM64_SYS_REG(2, 0, 0, 10, 7),
- ARM64_SYS_REG(2, 0, 0, 11, 4),
- ARM64_SYS_REG(2, 0, 0, 11, 5),
- ARM64_SYS_REG(2, 0, 0, 11, 6),
- ARM64_SYS_REG(2, 0, 0, 11, 7),
- ARM64_SYS_REG(2, 0, 0, 12, 4),
- ARM64_SYS_REG(2, 0, 0, 12, 5),
- ARM64_SYS_REG(2, 0, 0, 12, 6),
- ARM64_SYS_REG(2, 0, 0, 12, 7),
- ARM64_SYS_REG(2, 0, 0, 13, 4),
- ARM64_SYS_REG(2, 0, 0, 13, 5),
- ARM64_SYS_REG(2, 0, 0, 13, 6),
- ARM64_SYS_REG(2, 0, 0, 13, 7),
- ARM64_SYS_REG(2, 0, 0, 14, 4),
- ARM64_SYS_REG(2, 0, 0, 14, 5),
- ARM64_SYS_REG(2, 0, 0, 14, 6),
- ARM64_SYS_REG(2, 0, 0, 14, 7),
- ARM64_SYS_REG(2, 0, 0, 15, 4),
- ARM64_SYS_REG(2, 0, 0, 15, 5),
- ARM64_SYS_REG(2, 0, 0, 15, 6),
- ARM64_SYS_REG(2, 0, 0, 15, 7),
- ARM64_SYS_REG(2, 0, 1, 1, 4), /* OSLSR_EL1 */
- ARM64_SYS_REG(2, 4, 0, 7, 0), /* DBGVCR32_EL2 */
- ARM64_SYS_REG(3, 0, 0, 0, 5), /* MPIDR_EL1 */
- ARM64_SYS_REG(3, 0, 0, 1, 0), /* ID_PFR0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 1, 1), /* ID_PFR1_EL1 */
- ARM64_SYS_REG(3, 0, 0, 1, 2), /* ID_DFR0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 1, 3), /* ID_AFR0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 1, 4), /* ID_MMFR0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 1, 5), /* ID_MMFR1_EL1 */
- ARM64_SYS_REG(3, 0, 0, 1, 6), /* ID_MMFR2_EL1 */
- ARM64_SYS_REG(3, 0, 0, 1, 7), /* ID_MMFR3_EL1 */
- ARM64_SYS_REG(3, 0, 0, 2, 0), /* ID_ISAR0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 2, 1), /* ID_ISAR1_EL1 */
- ARM64_SYS_REG(3, 0, 0, 2, 2), /* ID_ISAR2_EL1 */
- ARM64_SYS_REG(3, 0, 0, 2, 3), /* ID_ISAR3_EL1 */
- ARM64_SYS_REG(3, 0, 0, 2, 4), /* ID_ISAR4_EL1 */
- ARM64_SYS_REG(3, 0, 0, 2, 5), /* ID_ISAR5_EL1 */
- ARM64_SYS_REG(3, 0, 0, 2, 6), /* ID_MMFR4_EL1 */
- ARM64_SYS_REG(3, 0, 0, 2, 7), /* ID_ISAR6_EL1 */
- ARM64_SYS_REG(3, 0, 0, 3, 0), /* MVFR0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 3, 1), /* MVFR1_EL1 */
- ARM64_SYS_REG(3, 0, 0, 3, 2), /* MVFR2_EL1 */
- ARM64_SYS_REG(3, 0, 0, 3, 3),
- ARM64_SYS_REG(3, 0, 0, 3, 4), /* ID_PFR2_EL1 */
- ARM64_SYS_REG(3, 0, 0, 3, 5), /* ID_DFR1_EL1 */
- ARM64_SYS_REG(3, 0, 0, 3, 6), /* ID_MMFR5_EL1 */
- ARM64_SYS_REG(3, 0, 0, 3, 7),
- ARM64_SYS_REG(3, 0, 0, 4, 0), /* ID_AA64PFR0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 4, 1), /* ID_AA64PFR1_EL1 */
- ARM64_SYS_REG(3, 0, 0, 4, 2), /* ID_AA64PFR2_EL1 */
- ARM64_SYS_REG(3, 0, 0, 4, 3),
- ARM64_SYS_REG(3, 0, 0, 4, 4), /* ID_AA64ZFR0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 4, 5), /* ID_AA64SMFR0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 4, 6),
- ARM64_SYS_REG(3, 0, 0, 4, 7),
- ARM64_SYS_REG(3, 0, 0, 5, 0), /* ID_AA64DFR0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 5, 1), /* ID_AA64DFR1_EL1 */
- ARM64_SYS_REG(3, 0, 0, 5, 2),
- ARM64_SYS_REG(3, 0, 0, 5, 3),
- ARM64_SYS_REG(3, 0, 0, 5, 4), /* ID_AA64AFR0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 5, 5), /* ID_AA64AFR1_EL1 */
- ARM64_SYS_REG(3, 0, 0, 5, 6),
- ARM64_SYS_REG(3, 0, 0, 5, 7),
- ARM64_SYS_REG(3, 0, 0, 6, 0), /* ID_AA64ISAR0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 6, 1), /* ID_AA64ISAR1_EL1 */
- ARM64_SYS_REG(3, 0, 0, 6, 2), /* ID_AA64ISAR2_EL1 */
- ARM64_SYS_REG(3, 0, 0, 6, 3),
- ARM64_SYS_REG(3, 0, 0, 6, 4),
- ARM64_SYS_REG(3, 0, 0, 6, 5),
- ARM64_SYS_REG(3, 0, 0, 6, 6),
- ARM64_SYS_REG(3, 0, 0, 6, 7),
- ARM64_SYS_REG(3, 0, 0, 7, 0), /* ID_AA64MMFR0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 7, 1), /* ID_AA64MMFR1_EL1 */
- ARM64_SYS_REG(3, 0, 0, 7, 2), /* ID_AA64MMFR2_EL1 */
- ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
- ARM64_SYS_REG(3, 0, 0, 7, 4), /* ID_AA64MMFR4_EL1 */
- ARM64_SYS_REG(3, 0, 0, 7, 5),
- ARM64_SYS_REG(3, 0, 0, 7, 6),
- ARM64_SYS_REG(3, 0, 0, 7, 7),
- ARM64_SYS_REG(3, 0, 1, 0, 0), /* SCTLR_EL1 */
- ARM64_SYS_REG(3, 0, 1, 0, 1), /* ACTLR_EL1 */
- ARM64_SYS_REG(3, 0, 1, 0, 2), /* CPACR_EL1 */
- ARM64_SYS_REG(3, 0, 2, 0, 0), /* TTBR0_EL1 */
- ARM64_SYS_REG(3, 0, 2, 0, 1), /* TTBR1_EL1 */
- ARM64_SYS_REG(3, 0, 2, 0, 2), /* TCR_EL1 */
- ARM64_SYS_REG(3, 0, 2, 0, 3), /* TCR2_EL1 */
- ARM64_SYS_REG(3, 0, 5, 1, 0), /* AFSR0_EL1 */
- ARM64_SYS_REG(3, 0, 5, 1, 1), /* AFSR1_EL1 */
- ARM64_SYS_REG(3, 0, 5, 2, 0), /* ESR_EL1 */
- ARM64_SYS_REG(3, 0, 6, 0, 0), /* FAR_EL1 */
- ARM64_SYS_REG(3, 0, 7, 4, 0), /* PAR_EL1 */
- ARM64_SYS_REG(3, 0, 10, 2, 0), /* MAIR_EL1 */
- ARM64_SYS_REG(3, 0, 10, 2, 2), /* PIRE0_EL1 */
- ARM64_SYS_REG(3, 0, 10, 2, 3), /* PIR_EL1 */
- ARM64_SYS_REG(3, 0, 10, 3, 0), /* AMAIR_EL1 */
- ARM64_SYS_REG(3, 0, 12, 0, 0), /* VBAR_EL1 */
- ARM64_SYS_REG(3, 0, 12, 1, 1), /* DISR_EL1 */
- ARM64_SYS_REG(3, 0, 13, 0, 1), /* CONTEXTIDR_EL1 */
- ARM64_SYS_REG(3, 0, 13, 0, 4), /* TPIDR_EL1 */
- ARM64_SYS_REG(3, 0, 14, 1, 0), /* CNTKCTL_EL1 */
- ARM64_SYS_REG(3, 2, 0, 0, 0), /* CSSELR_EL1 */
- ARM64_SYS_REG(3, 3, 13, 0, 2), /* TPIDR_EL0 */
- ARM64_SYS_REG(3, 3, 13, 0, 3), /* TPIDRRO_EL0 */
- ARM64_SYS_REG(3, 3, 14, 0, 1), /* CNTPCT_EL0 */
- ARM64_SYS_REG(3, 3, 14, 2, 1), /* CNTP_CTL_EL0 */
- ARM64_SYS_REG(3, 3, 14, 2, 2), /* CNTP_CVAL_EL0 */
- ARM64_SYS_REG(3, 4, 3, 0, 0), /* DACR32_EL2 */
- ARM64_SYS_REG(3, 4, 5, 0, 1), /* IFSR32_EL2 */
- ARM64_SYS_REG(3, 4, 5, 3, 0), /* FPEXC32_EL2 */
-};
-
-static __u64 pmu_regs[] = {
- ARM64_SYS_REG(3, 0, 9, 14, 1), /* PMINTENSET_EL1 */
- ARM64_SYS_REG(3, 0, 9, 14, 2), /* PMINTENCLR_EL1 */
- ARM64_SYS_REG(3, 3, 9, 12, 0), /* PMCR_EL0 */
- ARM64_SYS_REG(3, 3, 9, 12, 1), /* PMCNTENSET_EL0 */
- ARM64_SYS_REG(3, 3, 9, 12, 2), /* PMCNTENCLR_EL0 */
- ARM64_SYS_REG(3, 3, 9, 12, 3), /* PMOVSCLR_EL0 */
- ARM64_SYS_REG(3, 3, 9, 12, 4), /* PMSWINC_EL0 */
- ARM64_SYS_REG(3, 3, 9, 12, 5), /* PMSELR_EL0 */
- ARM64_SYS_REG(3, 3, 9, 13, 0), /* PMCCNTR_EL0 */
- ARM64_SYS_REG(3, 3, 9, 14, 0), /* PMUSERENR_EL0 */
- ARM64_SYS_REG(3, 3, 9, 14, 3), /* PMOVSSET_EL0 */
- ARM64_SYS_REG(3, 3, 14, 8, 0),
- ARM64_SYS_REG(3, 3, 14, 8, 1),
- ARM64_SYS_REG(3, 3, 14, 8, 2),
- ARM64_SYS_REG(3, 3, 14, 8, 3),
- ARM64_SYS_REG(3, 3, 14, 8, 4),
- ARM64_SYS_REG(3, 3, 14, 8, 5),
- ARM64_SYS_REG(3, 3, 14, 8, 6),
- ARM64_SYS_REG(3, 3, 14, 8, 7),
- ARM64_SYS_REG(3, 3, 14, 9, 0),
- ARM64_SYS_REG(3, 3, 14, 9, 1),
- ARM64_SYS_REG(3, 3, 14, 9, 2),
- ARM64_SYS_REG(3, 3, 14, 9, 3),
- ARM64_SYS_REG(3, 3, 14, 9, 4),
- ARM64_SYS_REG(3, 3, 14, 9, 5),
- ARM64_SYS_REG(3, 3, 14, 9, 6),
- ARM64_SYS_REG(3, 3, 14, 9, 7),
- ARM64_SYS_REG(3, 3, 14, 10, 0),
- ARM64_SYS_REG(3, 3, 14, 10, 1),
- ARM64_SYS_REG(3, 3, 14, 10, 2),
- ARM64_SYS_REG(3, 3, 14, 10, 3),
- ARM64_SYS_REG(3, 3, 14, 10, 4),
- ARM64_SYS_REG(3, 3, 14, 10, 5),
- ARM64_SYS_REG(3, 3, 14, 10, 6),
- ARM64_SYS_REG(3, 3, 14, 10, 7),
- ARM64_SYS_REG(3, 3, 14, 11, 0),
- ARM64_SYS_REG(3, 3, 14, 11, 1),
- ARM64_SYS_REG(3, 3, 14, 11, 2),
- ARM64_SYS_REG(3, 3, 14, 11, 3),
- ARM64_SYS_REG(3, 3, 14, 11, 4),
- ARM64_SYS_REG(3, 3, 14, 11, 5),
- ARM64_SYS_REG(3, 3, 14, 11, 6),
- ARM64_SYS_REG(3, 3, 14, 12, 0),
- ARM64_SYS_REG(3, 3, 14, 12, 1),
- ARM64_SYS_REG(3, 3, 14, 12, 2),
- ARM64_SYS_REG(3, 3, 14, 12, 3),
- ARM64_SYS_REG(3, 3, 14, 12, 4),
- ARM64_SYS_REG(3, 3, 14, 12, 5),
- ARM64_SYS_REG(3, 3, 14, 12, 6),
- ARM64_SYS_REG(3, 3, 14, 12, 7),
- ARM64_SYS_REG(3, 3, 14, 13, 0),
- ARM64_SYS_REG(3, 3, 14, 13, 1),
- ARM64_SYS_REG(3, 3, 14, 13, 2),
- ARM64_SYS_REG(3, 3, 14, 13, 3),
- ARM64_SYS_REG(3, 3, 14, 13, 4),
- ARM64_SYS_REG(3, 3, 14, 13, 5),
- ARM64_SYS_REG(3, 3, 14, 13, 6),
- ARM64_SYS_REG(3, 3, 14, 13, 7),
- ARM64_SYS_REG(3, 3, 14, 14, 0),
- ARM64_SYS_REG(3, 3, 14, 14, 1),
- ARM64_SYS_REG(3, 3, 14, 14, 2),
- ARM64_SYS_REG(3, 3, 14, 14, 3),
- ARM64_SYS_REG(3, 3, 14, 14, 4),
- ARM64_SYS_REG(3, 3, 14, 14, 5),
- ARM64_SYS_REG(3, 3, 14, 14, 6),
- ARM64_SYS_REG(3, 3, 14, 14, 7),
- ARM64_SYS_REG(3, 3, 14, 15, 0),
- ARM64_SYS_REG(3, 3, 14, 15, 1),
- ARM64_SYS_REG(3, 3, 14, 15, 2),
- ARM64_SYS_REG(3, 3, 14, 15, 3),
- ARM64_SYS_REG(3, 3, 14, 15, 4),
- ARM64_SYS_REG(3, 3, 14, 15, 5),
- ARM64_SYS_REG(3, 3, 14, 15, 6),
- ARM64_SYS_REG(3, 3, 14, 15, 7), /* PMCCFILTR_EL0 */
-};
-
-static __u64 vregs[] = {
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[1]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[2]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[3]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[4]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[5]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[6]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[7]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[8]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[9]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[10]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[11]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[12]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[13]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[14]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[15]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[16]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[17]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[18]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[19]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[20]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[21]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[22]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[23]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[24]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[25]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[26]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[27]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[28]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[29]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[30]),
- KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]),
-};
-
-static __u64 sve_regs[] = {
- KVM_REG_ARM64_SVE_VLS,
- KVM_REG_ARM64_SVE_ZREG(0, 0),
- KVM_REG_ARM64_SVE_ZREG(1, 0),
- KVM_REG_ARM64_SVE_ZREG(2, 0),
- KVM_REG_ARM64_SVE_ZREG(3, 0),
- KVM_REG_ARM64_SVE_ZREG(4, 0),
- KVM_REG_ARM64_SVE_ZREG(5, 0),
- KVM_REG_ARM64_SVE_ZREG(6, 0),
- KVM_REG_ARM64_SVE_ZREG(7, 0),
- KVM_REG_ARM64_SVE_ZREG(8, 0),
- KVM_REG_ARM64_SVE_ZREG(9, 0),
- KVM_REG_ARM64_SVE_ZREG(10, 0),
- KVM_REG_ARM64_SVE_ZREG(11, 0),
- KVM_REG_ARM64_SVE_ZREG(12, 0),
- KVM_REG_ARM64_SVE_ZREG(13, 0),
- KVM_REG_ARM64_SVE_ZREG(14, 0),
- KVM_REG_ARM64_SVE_ZREG(15, 0),
- KVM_REG_ARM64_SVE_ZREG(16, 0),
- KVM_REG_ARM64_SVE_ZREG(17, 0),
- KVM_REG_ARM64_SVE_ZREG(18, 0),
- KVM_REG_ARM64_SVE_ZREG(19, 0),
- KVM_REG_ARM64_SVE_ZREG(20, 0),
- KVM_REG_ARM64_SVE_ZREG(21, 0),
- KVM_REG_ARM64_SVE_ZREG(22, 0),
- KVM_REG_ARM64_SVE_ZREG(23, 0),
- KVM_REG_ARM64_SVE_ZREG(24, 0),
- KVM_REG_ARM64_SVE_ZREG(25, 0),
- KVM_REG_ARM64_SVE_ZREG(26, 0),
- KVM_REG_ARM64_SVE_ZREG(27, 0),
- KVM_REG_ARM64_SVE_ZREG(28, 0),
- KVM_REG_ARM64_SVE_ZREG(29, 0),
- KVM_REG_ARM64_SVE_ZREG(30, 0),
- KVM_REG_ARM64_SVE_ZREG(31, 0),
- KVM_REG_ARM64_SVE_PREG(0, 0),
- KVM_REG_ARM64_SVE_PREG(1, 0),
- KVM_REG_ARM64_SVE_PREG(2, 0),
- KVM_REG_ARM64_SVE_PREG(3, 0),
- KVM_REG_ARM64_SVE_PREG(4, 0),
- KVM_REG_ARM64_SVE_PREG(5, 0),
- KVM_REG_ARM64_SVE_PREG(6, 0),
- KVM_REG_ARM64_SVE_PREG(7, 0),
- KVM_REG_ARM64_SVE_PREG(8, 0),
- KVM_REG_ARM64_SVE_PREG(9, 0),
- KVM_REG_ARM64_SVE_PREG(10, 0),
- KVM_REG_ARM64_SVE_PREG(11, 0),
- KVM_REG_ARM64_SVE_PREG(12, 0),
- KVM_REG_ARM64_SVE_PREG(13, 0),
- KVM_REG_ARM64_SVE_PREG(14, 0),
- KVM_REG_ARM64_SVE_PREG(15, 0),
- KVM_REG_ARM64_SVE_FFR(0),
- ARM64_SYS_REG(3, 0, 1, 2, 0), /* ZCR_EL1 */
-};
-
-static __u64 sve_rejects_set[] = {
- KVM_REG_ARM64_SVE_VLS,
-};
-
-static __u64 pauth_addr_regs[] = {
- ARM64_SYS_REG(3, 0, 2, 1, 0), /* APIAKEYLO_EL1 */
- ARM64_SYS_REG(3, 0, 2, 1, 1), /* APIAKEYHI_EL1 */
- ARM64_SYS_REG(3, 0, 2, 1, 2), /* APIBKEYLO_EL1 */
- ARM64_SYS_REG(3, 0, 2, 1, 3), /* APIBKEYHI_EL1 */
- ARM64_SYS_REG(3, 0, 2, 2, 0), /* APDAKEYLO_EL1 */
- ARM64_SYS_REG(3, 0, 2, 2, 1), /* APDAKEYHI_EL1 */
- ARM64_SYS_REG(3, 0, 2, 2, 2), /* APDBKEYLO_EL1 */
- ARM64_SYS_REG(3, 0, 2, 2, 3) /* APDBKEYHI_EL1 */
-};
-
-static __u64 pauth_generic_regs[] = {
- ARM64_SYS_REG(3, 0, 2, 3, 0), /* APGAKEYLO_EL1 */
- ARM64_SYS_REG(3, 0, 2, 3, 1), /* APGAKEYHI_EL1 */
-};
-
-#define BASE_SUBLIST \
- { "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), }
-#define VREGS_SUBLIST \
- { "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), }
-#define PMU_SUBLIST \
- { "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \
- .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
-#define SVE_SUBLIST \
- { "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
- .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
- .rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), }
-#define PAUTH_SUBLIST \
- { \
- .name = "pauth_address", \
- .capability = KVM_CAP_ARM_PTRAUTH_ADDRESS, \
- .feature = KVM_ARM_VCPU_PTRAUTH_ADDRESS, \
- .regs = pauth_addr_regs, \
- .regs_n = ARRAY_SIZE(pauth_addr_regs), \
- }, \
- { \
- .name = "pauth_generic", \
- .capability = KVM_CAP_ARM_PTRAUTH_GENERIC, \
- .feature = KVM_ARM_VCPU_PTRAUTH_GENERIC, \
- .regs = pauth_generic_regs, \
- .regs_n = ARRAY_SIZE(pauth_generic_regs), \
- }
-
-static struct vcpu_reg_list vregs_config = {
- .sublists = {
- BASE_SUBLIST,
- VREGS_SUBLIST,
- {0},
- },
-};
-static struct vcpu_reg_list vregs_pmu_config = {
- .sublists = {
- BASE_SUBLIST,
- VREGS_SUBLIST,
- PMU_SUBLIST,
- {0},
- },
-};
-static struct vcpu_reg_list sve_config = {
- .sublists = {
- BASE_SUBLIST,
- SVE_SUBLIST,
- {0},
- },
-};
-static struct vcpu_reg_list sve_pmu_config = {
- .sublists = {
- BASE_SUBLIST,
- SVE_SUBLIST,
- PMU_SUBLIST,
- {0},
- },
-};
-static struct vcpu_reg_list pauth_config = {
- .sublists = {
- BASE_SUBLIST,
- VREGS_SUBLIST,
- PAUTH_SUBLIST,
- {0},
- },
-};
-static struct vcpu_reg_list pauth_pmu_config = {
- .sublists = {
- BASE_SUBLIST,
- VREGS_SUBLIST,
- PAUTH_SUBLIST,
- PMU_SUBLIST,
- {0},
- },
-};
-
-struct vcpu_reg_list *vcpu_configs[] = {
- &vregs_config,
- &vregs_pmu_config,
- &sve_config,
- &sve_pmu_config,
- &pauth_config,
- &pauth_pmu_config,
-};
-int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
diff --git a/tools/testing/selftests/kvm/aarch64/hypercalls.c b/tools/testing/selftests/kvm/aarch64/hypercalls.c
deleted file mode 100644
index 27c10e7a7e01..000000000000
--- a/tools/testing/selftests/kvm/aarch64/hypercalls.c
+++ /dev/null
@@ -1,308 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-/* hypercalls: Check the ARM64's psuedo-firmware bitmap register interface.
- *
- * The test validates the basic hypercall functionalities that are exposed
- * via the psuedo-firmware bitmap register. This includes the registers'
- * read/write behavior before and after the VM has started, and if the
- * hypercalls are properly masked or unmasked to the guest when disabled or
- * enabled from the KVM userspace, respectively.
- */
-#include <errno.h>
-#include <linux/arm-smccc.h>
-#include <asm/kvm.h>
-#include <kvm_util.h>
-
-#include "processor.h"
-
-#define FW_REG_ULIMIT_VAL(max_feat_bit) (GENMASK(max_feat_bit, 0))
-
-/* Last valid bits of the bitmapped firmware registers */
-#define KVM_REG_ARM_STD_BMAP_BIT_MAX 0
-#define KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX 0
-#define KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX 1
-
-struct kvm_fw_reg_info {
- uint64_t reg; /* Register definition */
- uint64_t max_feat_bit; /* Bit that represents the upper limit of the feature-map */
-};
-
-#define FW_REG_INFO(r) \
- { \
- .reg = r, \
- .max_feat_bit = r##_BIT_MAX, \
- }
-
-static const struct kvm_fw_reg_info fw_reg_info[] = {
- FW_REG_INFO(KVM_REG_ARM_STD_BMAP),
- FW_REG_INFO(KVM_REG_ARM_STD_HYP_BMAP),
- FW_REG_INFO(KVM_REG_ARM_VENDOR_HYP_BMAP),
-};
-
-enum test_stage {
- TEST_STAGE_REG_IFACE,
- TEST_STAGE_HVC_IFACE_FEAT_DISABLED,
- TEST_STAGE_HVC_IFACE_FEAT_ENABLED,
- TEST_STAGE_HVC_IFACE_FALSE_INFO,
- TEST_STAGE_END,
-};
-
-static int stage = TEST_STAGE_REG_IFACE;
-
-struct test_hvc_info {
- uint32_t func_id;
- uint64_t arg1;
-};
-
-#define TEST_HVC_INFO(f, a1) \
- { \
- .func_id = f, \
- .arg1 = a1, \
- }
-
-static const struct test_hvc_info hvc_info[] = {
- /* KVM_REG_ARM_STD_BMAP */
- TEST_HVC_INFO(ARM_SMCCC_TRNG_VERSION, 0),
- TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_TRNG_RND64),
- TEST_HVC_INFO(ARM_SMCCC_TRNG_GET_UUID, 0),
- TEST_HVC_INFO(ARM_SMCCC_TRNG_RND32, 0),
- TEST_HVC_INFO(ARM_SMCCC_TRNG_RND64, 0),
-
- /* KVM_REG_ARM_STD_HYP_BMAP */
- TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_HV_PV_TIME_FEATURES),
- TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_HV_PV_TIME_ST),
- TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_ST, 0),
-
- /* KVM_REG_ARM_VENDOR_HYP_BMAP */
- TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID,
- ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
- TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, 0),
- TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID, KVM_PTP_VIRT_COUNTER),
-};
-
-/* Feed false hypercall info to test the KVM behavior */
-static const struct test_hvc_info false_hvc_info[] = {
- /* Feature support check against a different family of hypercalls */
- TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
- TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_TRNG_RND64),
- TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_TRNG_RND64),
-};
-
-static void guest_test_hvc(const struct test_hvc_info *hc_info)
-{
- unsigned int i;
- struct arm_smccc_res res;
- unsigned int hvc_info_arr_sz;
-
- hvc_info_arr_sz =
- hc_info == hvc_info ? ARRAY_SIZE(hvc_info) : ARRAY_SIZE(false_hvc_info);
-
- for (i = 0; i < hvc_info_arr_sz; i++, hc_info++) {
- memset(&res, 0, sizeof(res));
- smccc_hvc(hc_info->func_id, hc_info->arg1, 0, 0, 0, 0, 0, 0, &res);
-
- switch (stage) {
- case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
- case TEST_STAGE_HVC_IFACE_FALSE_INFO:
- __GUEST_ASSERT(res.a0 == SMCCC_RET_NOT_SUPPORTED,
- "a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u",
- res.a0, hc_info->func_id, hc_info->arg1, stage);
- break;
- case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
- __GUEST_ASSERT(res.a0 != SMCCC_RET_NOT_SUPPORTED,
- "a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u",
- res.a0, hc_info->func_id, hc_info->arg1, stage);
- break;
- default:
- GUEST_FAIL("Unexpected stage = %u", stage);
- }
- }
-}
-
-static void guest_code(void)
-{
- while (stage != TEST_STAGE_END) {
- switch (stage) {
- case TEST_STAGE_REG_IFACE:
- break;
- case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
- case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
- guest_test_hvc(hvc_info);
- break;
- case TEST_STAGE_HVC_IFACE_FALSE_INFO:
- guest_test_hvc(false_hvc_info);
- break;
- default:
- GUEST_FAIL("Unexpected stage = %u", stage);
- }
-
- GUEST_SYNC(stage);
- }
-
- GUEST_DONE();
-}
-
-struct st_time {
- uint32_t rev;
- uint32_t attr;
- uint64_t st_time;
-};
-
-#define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63)
-#define ST_GPA_BASE (1 << 30)
-
-static void steal_time_init(struct kvm_vcpu *vcpu)
-{
- uint64_t st_ipa = (ulong)ST_GPA_BASE;
- unsigned int gpages;
-
- gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE);
- vm_userspace_mem_region_add(vcpu->vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
-
- vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PVTIME_CTRL,
- KVM_ARM_VCPU_PVTIME_IPA, &st_ipa);
-}
-
-static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
-{
- uint64_t val;
- unsigned int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
- const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
-
- /* First 'read' should be an upper limit of the features supported */
- vcpu_get_reg(vcpu, reg_info->reg, &val);
- TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
- "Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx",
- reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
-
- /* Test a 'write' by disabling all the features of the register map */
- ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
- TEST_ASSERT(ret == 0,
- "Failed to clear all the features of reg: 0x%lx; ret: %d",
- reg_info->reg, errno);
-
- vcpu_get_reg(vcpu, reg_info->reg, &val);
- TEST_ASSERT(val == 0,
- "Expected all the features to be cleared for reg: 0x%lx", reg_info->reg);
-
- /*
- * Test enabling a feature that's not supported.
- * Avoid this check if all the bits are occupied.
- */
- if (reg_info->max_feat_bit < 63) {
- ret = __vcpu_set_reg(vcpu, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
- TEST_ASSERT(ret != 0 && errno == EINVAL,
- "Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx",
- errno, reg_info->reg);
- }
- }
-}
-
-static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
-{
- uint64_t val;
- unsigned int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
- const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
-
- /*
- * Before starting the VM, the test clears all the bits.
- * Check if that's still the case.
- */
- vcpu_get_reg(vcpu, reg_info->reg, &val);
- TEST_ASSERT(val == 0,
- "Expected all the features to be cleared for reg: 0x%lx",
- reg_info->reg);
-
- /*
- * Since the VM has run at least once, KVM shouldn't allow modification of
- * the registers and should return EBUSY. Set the registers and check for
- * the expected errno.
- */
- ret = __vcpu_set_reg(vcpu, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
- TEST_ASSERT(ret != 0 && errno == EBUSY,
- "Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx",
- errno, reg_info->reg);
- }
-}
-
-static struct kvm_vm *test_vm_create(struct kvm_vcpu **vcpu)
-{
- struct kvm_vm *vm;
-
- vm = vm_create_with_one_vcpu(vcpu, guest_code);
-
- steal_time_init(*vcpu);
-
- return vm;
-}
-
-static void test_guest_stage(struct kvm_vm **vm, struct kvm_vcpu **vcpu)
-{
- int prev_stage = stage;
-
- pr_debug("Stage: %d\n", prev_stage);
-
- /* Sync the stage early, the VM might be freed below. */
- stage++;
- sync_global_to_guest(*vm, stage);
-
- switch (prev_stage) {
- case TEST_STAGE_REG_IFACE:
- test_fw_regs_after_vm_start(*vcpu);
- break;
- case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
- /* Start a new VM so that all the features are now enabled by default */
- kvm_vm_free(*vm);
- *vm = test_vm_create(vcpu);
- break;
- case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
- case TEST_STAGE_HVC_IFACE_FALSE_INFO:
- break;
- default:
- TEST_FAIL("Unknown test stage: %d", prev_stage);
- }
-}
-
-static void test_run(void)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm;
- struct ucall uc;
- bool guest_done = false;
-
- vm = test_vm_create(&vcpu);
-
- test_fw_regs_before_vm_start(vcpu);
-
- while (!guest_done) {
- vcpu_run(vcpu);
-
- switch (get_ucall(vcpu, &uc)) {
- case UCALL_SYNC:
- test_guest_stage(&vm, &vcpu);
- break;
- case UCALL_DONE:
- guest_done = true;
- break;
- case UCALL_ABORT:
- REPORT_GUEST_ASSERT(uc);
- break;
- default:
- TEST_FAIL("Unexpected guest exit");
- }
- }
-
- kvm_vm_free(vm);
-}
-
-int main(void)
-{
- test_run();
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c
deleted file mode 100644
index 53fddad57cbb..000000000000
--- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c
+++ /dev/null
@@ -1,1136 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * page_fault_test.c - Test stage 2 faults.
- *
- * This test tries different combinations of guest accesses (e.g., write,
- * S1PTW), backing source type (e.g., anon) and types of faults (e.g., read on
- * hugetlbfs with a hole). It checks that the expected handling method is
- * called (e.g., uffd faults with the right address and write/read flag).
- */
-#define _GNU_SOURCE
-#include <linux/bitmap.h>
-#include <fcntl.h>
-#include <test_util.h>
-#include <kvm_util.h>
-#include <processor.h>
-#include <asm/sysreg.h>
-#include <linux/bitfield.h>
-#include "guest_modes.h"
-#include "userfaultfd_util.h"
-
-/* Guest virtual addresses that point to the test page and its PTE. */
-#define TEST_GVA 0xc0000000
-#define TEST_EXEC_GVA (TEST_GVA + 0x8)
-#define TEST_PTE_GVA 0xb0000000
-#define TEST_DATA 0x0123456789ABCDEF
-
-static uint64_t *guest_test_memory = (uint64_t *)TEST_GVA;
-
-#define CMD_NONE (0)
-#define CMD_SKIP_TEST (1ULL << 1)
-#define CMD_HOLE_PT (1ULL << 2)
-#define CMD_HOLE_DATA (1ULL << 3)
-#define CMD_CHECK_WRITE_IN_DIRTY_LOG (1ULL << 4)
-#define CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG (1ULL << 5)
-#define CMD_CHECK_NO_WRITE_IN_DIRTY_LOG (1ULL << 6)
-#define CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG (1ULL << 7)
-#define CMD_SET_PTE_AF (1ULL << 8)
-
-#define PREPARE_FN_NR 10
-#define CHECK_FN_NR 10
-
-static struct event_cnt {
- int mmio_exits;
- int fail_vcpu_runs;
- int uffd_faults;
- /* uffd_faults is incremented from multiple threads. */
- pthread_mutex_t uffd_faults_mutex;
-} events;
-
-struct test_desc {
- const char *name;
- uint64_t mem_mark_cmd;
- /* Skip the test if any prepare function returns false */
- bool (*guest_prepare[PREPARE_FN_NR])(void);
- void (*guest_test)(void);
- void (*guest_test_check[CHECK_FN_NR])(void);
- uffd_handler_t uffd_pt_handler;
- uffd_handler_t uffd_data_handler;
- void (*dabt_handler)(struct ex_regs *regs);
- void (*iabt_handler)(struct ex_regs *regs);
- void (*mmio_handler)(struct kvm_vm *vm, struct kvm_run *run);
- void (*fail_vcpu_run_handler)(int ret);
- uint32_t pt_memslot_flags;
- uint32_t data_memslot_flags;
- bool skip;
- struct event_cnt expected_events;
-};
-
-struct test_params {
- enum vm_mem_backing_src_type src_type;
- struct test_desc *test_desc;
-};
-
-static inline void flush_tlb_page(uint64_t vaddr)
-{
- uint64_t page = vaddr >> 12;
-
- dsb(ishst);
- asm volatile("tlbi vaae1is, %0" :: "r" (page));
- dsb(ish);
- isb();
-}
-
-static void guest_write64(void)
-{
- uint64_t val;
-
- WRITE_ONCE(*guest_test_memory, TEST_DATA);
- val = READ_ONCE(*guest_test_memory);
- GUEST_ASSERT_EQ(val, TEST_DATA);
-}
-
-/* Check the system for atomic instructions. */
-static bool guest_check_lse(void)
-{
- uint64_t isar0 = read_sysreg(id_aa64isar0_el1);
- uint64_t atomic;
-
- atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC), isar0);
- return atomic >= 2;
-}
-
-static bool guest_check_dc_zva(void)
-{
- uint64_t dczid = read_sysreg(dczid_el0);
- uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_EL0_DZP), dczid);
-
- return dzp == 0;
-}
-
-/* Compare and swap instruction. */
-static void guest_cas(void)
-{
- uint64_t val;
-
- GUEST_ASSERT(guest_check_lse());
- asm volatile(".arch_extension lse\n"
- "casal %0, %1, [%2]\n"
- :: "r" (0ul), "r" (TEST_DATA), "r" (guest_test_memory));
- val = READ_ONCE(*guest_test_memory);
- GUEST_ASSERT_EQ(val, TEST_DATA);
-}
-
-static void guest_read64(void)
-{
- uint64_t val;
-
- val = READ_ONCE(*guest_test_memory);
- GUEST_ASSERT_EQ(val, 0);
-}
-
-/* Address translation instruction */
-static void guest_at(void)
-{
- uint64_t par;
-
- asm volatile("at s1e1r, %0" :: "r" (guest_test_memory));
- isb();
- par = read_sysreg(par_el1);
-
- /* Bit 1 indicates whether the AT was successful */
- GUEST_ASSERT_EQ(par & 1, 0);
-}
-
-/*
- * The size of the block written by "dc zva" is guaranteed to be between (2 <<
- * 0) and (2 << 9), which is safe in our case as we need the write to happen
- * for at least a word, and not more than a page.
- */
-static void guest_dc_zva(void)
-{
- uint16_t val;
-
- asm volatile("dc zva, %0" :: "r" (guest_test_memory));
- dsb(ish);
- val = READ_ONCE(*guest_test_memory);
- GUEST_ASSERT_EQ(val, 0);
-}
-
-/*
- * Pre-indexing loads and stores don't have a valid syndrome (ESR_EL2.ISV==0).
- * And that's special because KVM must take special care with those: they
- * should still count as accesses for dirty logging or user-faulting, but
- * should be handled differently on mmio.
- */
-static void guest_ld_preidx(void)
-{
- uint64_t val;
- uint64_t addr = TEST_GVA - 8;
-
- /*
- * This ends up accessing "TEST_GVA + 8 - 8", where "TEST_GVA - 8" is
- * in a gap between memslots not backing by anything.
- */
- asm volatile("ldr %0, [%1, #8]!"
- : "=r" (val), "+r" (addr));
- GUEST_ASSERT_EQ(val, 0);
- GUEST_ASSERT_EQ(addr, TEST_GVA);
-}
-
-static void guest_st_preidx(void)
-{
- uint64_t val = TEST_DATA;
- uint64_t addr = TEST_GVA - 8;
-
- asm volatile("str %0, [%1, #8]!"
- : "+r" (val), "+r" (addr));
-
- GUEST_ASSERT_EQ(addr, TEST_GVA);
- val = READ_ONCE(*guest_test_memory);
-}
-
-static bool guest_set_ha(void)
-{
- uint64_t mmfr1 = read_sysreg(id_aa64mmfr1_el1);
- uint64_t hadbs, tcr;
-
- /* Skip if HA is not supported. */
- hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS), mmfr1);
- if (hadbs == 0)
- return false;
-
- tcr = read_sysreg(tcr_el1) | TCR_EL1_HA;
- write_sysreg(tcr, tcr_el1);
- isb();
-
- return true;
-}
-
-static bool guest_clear_pte_af(void)
-{
- *((uint64_t *)TEST_PTE_GVA) &= ~PTE_AF;
- flush_tlb_page(TEST_GVA);
-
- return true;
-}
-
-static void guest_check_pte_af(void)
-{
- dsb(ish);
- GUEST_ASSERT_EQ(*((uint64_t *)TEST_PTE_GVA) & PTE_AF, PTE_AF);
-}
-
-static void guest_check_write_in_dirty_log(void)
-{
- GUEST_SYNC(CMD_CHECK_WRITE_IN_DIRTY_LOG);
-}
-
-static void guest_check_no_write_in_dirty_log(void)
-{
- GUEST_SYNC(CMD_CHECK_NO_WRITE_IN_DIRTY_LOG);
-}
-
-static void guest_check_s1ptw_wr_in_dirty_log(void)
-{
- GUEST_SYNC(CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG);
-}
-
-static void guest_check_no_s1ptw_wr_in_dirty_log(void)
-{
- GUEST_SYNC(CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG);
-}
-
-static void guest_exec(void)
-{
- int (*code)(void) = (int (*)(void))TEST_EXEC_GVA;
- int ret;
-
- ret = code();
- GUEST_ASSERT_EQ(ret, 0x77);
-}
-
-static bool guest_prepare(struct test_desc *test)
-{
- bool (*prepare_fn)(void);
- int i;
-
- for (i = 0; i < PREPARE_FN_NR; i++) {
- prepare_fn = test->guest_prepare[i];
- if (prepare_fn && !prepare_fn())
- return false;
- }
-
- return true;
-}
-
-static void guest_test_check(struct test_desc *test)
-{
- void (*check_fn)(void);
- int i;
-
- for (i = 0; i < CHECK_FN_NR; i++) {
- check_fn = test->guest_test_check[i];
- if (check_fn)
- check_fn();
- }
-}
-
-static void guest_code(struct test_desc *test)
-{
- if (!guest_prepare(test))
- GUEST_SYNC(CMD_SKIP_TEST);
-
- GUEST_SYNC(test->mem_mark_cmd);
-
- if (test->guest_test)
- test->guest_test();
-
- guest_test_check(test);
- GUEST_DONE();
-}
-
-static void no_dabt_handler(struct ex_regs *regs)
-{
- GUEST_FAIL("Unexpected dabt, far_el1 = 0x%llx", read_sysreg(far_el1));
-}
-
-static void no_iabt_handler(struct ex_regs *regs)
-{
- GUEST_FAIL("Unexpected iabt, pc = 0x%lx", regs->pc);
-}
-
-static struct uffd_args {
- char *copy;
- void *hva;
- uint64_t paging_size;
-} pt_args, data_args;
-
-/* Returns true to continue the test, and false if it should be skipped. */
-static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
- struct uffd_args *args)
-{
- uint64_t addr = msg->arg.pagefault.address;
- uint64_t flags = msg->arg.pagefault.flags;
- struct uffdio_copy copy;
- int ret;
-
- TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING,
- "The only expected UFFD mode is MISSING");
- TEST_ASSERT_EQ(addr, (uint64_t)args->hva);
-
- pr_debug("uffd fault: addr=%p write=%d\n",
- (void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE));
-
- copy.src = (uint64_t)args->copy;
- copy.dst = addr;
- copy.len = args->paging_size;
- copy.mode = 0;
-
- ret = ioctl(uffd, UFFDIO_COPY, &copy);
- if (ret == -1) {
- pr_info("Failed UFFDIO_COPY in 0x%lx with errno: %d\n",
- addr, errno);
- return ret;
- }
-
- pthread_mutex_lock(&events.uffd_faults_mutex);
- events.uffd_faults += 1;
- pthread_mutex_unlock(&events.uffd_faults_mutex);
- return 0;
-}
-
-static int uffd_pt_handler(int mode, int uffd, struct uffd_msg *msg)
-{
- return uffd_generic_handler(mode, uffd, msg, &pt_args);
-}
-
-static int uffd_data_handler(int mode, int uffd, struct uffd_msg *msg)
-{
- return uffd_generic_handler(mode, uffd, msg, &data_args);
-}
-
-static void setup_uffd_args(struct userspace_mem_region *region,
- struct uffd_args *args)
-{
- args->hva = (void *)region->region.userspace_addr;
- args->paging_size = region->region.memory_size;
-
- args->copy = malloc(args->paging_size);
- TEST_ASSERT(args->copy, "Failed to allocate data copy.");
- memcpy(args->copy, args->hva, args->paging_size);
-}
-
-static void setup_uffd(struct kvm_vm *vm, struct test_params *p,
- struct uffd_desc **pt_uffd, struct uffd_desc **data_uffd)
-{
- struct test_desc *test = p->test_desc;
- int uffd_mode = UFFDIO_REGISTER_MODE_MISSING;
-
- setup_uffd_args(vm_get_mem_region(vm, MEM_REGION_PT), &pt_args);
- setup_uffd_args(vm_get_mem_region(vm, MEM_REGION_TEST_DATA), &data_args);
-
- *pt_uffd = NULL;
- if (test->uffd_pt_handler)
- *pt_uffd = uffd_setup_demand_paging(uffd_mode, 0,
- pt_args.hva,
- pt_args.paging_size,
- test->uffd_pt_handler);
-
- *data_uffd = NULL;
- if (test->uffd_data_handler)
- *data_uffd = uffd_setup_demand_paging(uffd_mode, 0,
- data_args.hva,
- data_args.paging_size,
- test->uffd_data_handler);
-}
-
-static void free_uffd(struct test_desc *test, struct uffd_desc *pt_uffd,
- struct uffd_desc *data_uffd)
-{
- if (test->uffd_pt_handler)
- uffd_stop_demand_paging(pt_uffd);
- if (test->uffd_data_handler)
- uffd_stop_demand_paging(data_uffd);
-
- free(pt_args.copy);
- free(data_args.copy);
-}
-
-static int uffd_no_handler(int mode, int uffd, struct uffd_msg *msg)
-{
- TEST_FAIL("There was no UFFD fault expected.");
- return -1;
-}
-
-/* Returns false if the test should be skipped. */
-static bool punch_hole_in_backing_store(struct kvm_vm *vm,
- struct userspace_mem_region *region)
-{
- void *hva = (void *)region->region.userspace_addr;
- uint64_t paging_size = region->region.memory_size;
- int ret, fd = region->fd;
-
- if (fd != -1) {
- ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
- 0, paging_size);
- TEST_ASSERT(ret == 0, "fallocate failed");
- } else {
- ret = madvise(hva, paging_size, MADV_DONTNEED);
- TEST_ASSERT(ret == 0, "madvise failed");
- }
-
- return true;
-}
-
-static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run)
-{
- struct userspace_mem_region *region;
- void *hva;
-
- region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
- hva = (void *)region->region.userspace_addr;
-
- TEST_ASSERT_EQ(run->mmio.phys_addr, region->region.guest_phys_addr);
-
- memcpy(hva, run->mmio.data, run->mmio.len);
- events.mmio_exits += 1;
-}
-
-static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run)
-{
- uint64_t data;
-
- memcpy(&data, run->mmio.data, sizeof(data));
- pr_debug("addr=%lld len=%d w=%d data=%lx\n",
- run->mmio.phys_addr, run->mmio.len,
- run->mmio.is_write, data);
- TEST_FAIL("There was no MMIO exit expected.");
-}
-
-static bool check_write_in_dirty_log(struct kvm_vm *vm,
- struct userspace_mem_region *region,
- uint64_t host_pg_nr)
-{
- unsigned long *bmap;
- bool first_page_dirty;
- uint64_t size = region->region.memory_size;
-
- /* getpage_size() is not always equal to vm->page_size */
- bmap = bitmap_zalloc(size / getpagesize());
- kvm_vm_get_dirty_log(vm, region->region.slot, bmap);
- first_page_dirty = test_bit(host_pg_nr, bmap);
- free(bmap);
- return first_page_dirty;
-}
-
-/* Returns true to continue the test, and false if it should be skipped. */
-static bool handle_cmd(struct kvm_vm *vm, int cmd)
-{
- struct userspace_mem_region *data_region, *pt_region;
- bool continue_test = true;
- uint64_t pte_gpa, pte_pg;
-
- data_region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
- pt_region = vm_get_mem_region(vm, MEM_REGION_PT);
- pte_gpa = addr_hva2gpa(vm, virt_get_pte_hva(vm, TEST_GVA));
- pte_pg = (pte_gpa - pt_region->region.guest_phys_addr) / getpagesize();
-
- if (cmd == CMD_SKIP_TEST)
- continue_test = false;
-
- if (cmd & CMD_HOLE_PT)
- continue_test = punch_hole_in_backing_store(vm, pt_region);
- if (cmd & CMD_HOLE_DATA)
- continue_test = punch_hole_in_backing_store(vm, data_region);
- if (cmd & CMD_CHECK_WRITE_IN_DIRTY_LOG)
- TEST_ASSERT(check_write_in_dirty_log(vm, data_region, 0),
- "Missing write in dirty log");
- if (cmd & CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG)
- TEST_ASSERT(check_write_in_dirty_log(vm, pt_region, pte_pg),
- "Missing s1ptw write in dirty log");
- if (cmd & CMD_CHECK_NO_WRITE_IN_DIRTY_LOG)
- TEST_ASSERT(!check_write_in_dirty_log(vm, data_region, 0),
- "Unexpected write in dirty log");
- if (cmd & CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG)
- TEST_ASSERT(!check_write_in_dirty_log(vm, pt_region, pte_pg),
- "Unexpected s1ptw write in dirty log");
-
- return continue_test;
-}
-
-void fail_vcpu_run_no_handler(int ret)
-{
- TEST_FAIL("Unexpected vcpu run failure");
-}
-
-void fail_vcpu_run_mmio_no_syndrome_handler(int ret)
-{
- TEST_ASSERT(errno == ENOSYS,
- "The mmio handler should have returned not implemented.");
- events.fail_vcpu_runs += 1;
-}
-
-typedef uint32_t aarch64_insn_t;
-extern aarch64_insn_t __exec_test[2];
-
-noinline void __return_0x77(void)
-{
- asm volatile("__exec_test: mov x0, #0x77\n"
- "ret\n");
-}
-
-/*
- * Note that this function runs on the host before the test VM starts: there's
- * no need to sync the D$ and I$ caches.
- */
-static void load_exec_code_for_test(struct kvm_vm *vm)
-{
- uint64_t *code;
- struct userspace_mem_region *region;
- void *hva;
-
- region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
- hva = (void *)region->region.userspace_addr;
-
- assert(TEST_EXEC_GVA > TEST_GVA);
- code = hva + TEST_EXEC_GVA - TEST_GVA;
- memcpy(code, __exec_test, sizeof(__exec_test));
-}
-
-static void setup_abort_handlers(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
- struct test_desc *test)
-{
- vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vcpu);
-
- vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
- ESR_EC_DABT, no_dabt_handler);
- vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
- ESR_EC_IABT, no_iabt_handler);
-}
-
-static void setup_gva_maps(struct kvm_vm *vm)
-{
- struct userspace_mem_region *region;
- uint64_t pte_gpa;
-
- region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
- /* Map TEST_GVA first. This will install a new PTE. */
- virt_pg_map(vm, TEST_GVA, region->region.guest_phys_addr);
- /* Then map TEST_PTE_GVA to the above PTE. */
- pte_gpa = addr_hva2gpa(vm, virt_get_pte_hva(vm, TEST_GVA));
- virt_pg_map(vm, TEST_PTE_GVA, pte_gpa);
-}
-
-enum pf_test_memslots {
- CODE_AND_DATA_MEMSLOT,
- PAGE_TABLE_MEMSLOT,
- TEST_DATA_MEMSLOT,
-};
-
-/*
- * Create a memslot for code and data at pfn=0, and test-data and PT ones
- * at max_gfn.
- */
-static void setup_memslots(struct kvm_vm *vm, struct test_params *p)
-{
- uint64_t backing_src_pagesz = get_backing_src_pagesz(p->src_type);
- uint64_t guest_page_size = vm->page_size;
- uint64_t max_gfn = vm_compute_max_gfn(vm);
- /* Enough for 2M of code when using 4K guest pages. */
- uint64_t code_npages = 512;
- uint64_t pt_size, data_size, data_gpa;
-
- /*
- * This test requires 1 pgd, 2 pud, 4 pmd, and 6 pte pages when using
- * VM_MODE_P48V48_4K. Note that the .text takes ~1.6MBs. That's 13
- * pages. VM_MODE_P48V48_4K is the mode with most PT pages; let's use
- * twice that just in case.
- */
- pt_size = 26 * guest_page_size;
-
- /* memslot sizes and gpa's must be aligned to the backing page size */
- pt_size = align_up(pt_size, backing_src_pagesz);
- data_size = align_up(guest_page_size, backing_src_pagesz);
- data_gpa = (max_gfn * guest_page_size) - data_size;
- data_gpa = align_down(data_gpa, backing_src_pagesz);
-
- vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0,
- CODE_AND_DATA_MEMSLOT, code_npages, 0);
- vm->memslots[MEM_REGION_CODE] = CODE_AND_DATA_MEMSLOT;
- vm->memslots[MEM_REGION_DATA] = CODE_AND_DATA_MEMSLOT;
-
- vm_userspace_mem_region_add(vm, p->src_type, data_gpa - pt_size,
- PAGE_TABLE_MEMSLOT, pt_size / guest_page_size,
- p->test_desc->pt_memslot_flags);
- vm->memslots[MEM_REGION_PT] = PAGE_TABLE_MEMSLOT;
-
- vm_userspace_mem_region_add(vm, p->src_type, data_gpa, TEST_DATA_MEMSLOT,
- data_size / guest_page_size,
- p->test_desc->data_memslot_flags);
- vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT;
-}
-
-static void setup_ucall(struct kvm_vm *vm)
-{
- struct userspace_mem_region *region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
-
- ucall_init(vm, region->region.guest_phys_addr + region->region.memory_size);
-}
-
-static void setup_default_handlers(struct test_desc *test)
-{
- if (!test->mmio_handler)
- test->mmio_handler = mmio_no_handler;
-
- if (!test->fail_vcpu_run_handler)
- test->fail_vcpu_run_handler = fail_vcpu_run_no_handler;
-}
-
-static void check_event_counts(struct test_desc *test)
-{
- TEST_ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults);
- TEST_ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits);
- TEST_ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs);
-}
-
-static void print_test_banner(enum vm_guest_mode mode, struct test_params *p)
-{
- struct test_desc *test = p->test_desc;
-
- pr_debug("Test: %s\n", test->name);
- pr_debug("Testing guest mode: %s\n", vm_guest_mode_string(mode));
- pr_debug("Testing memory backing src type: %s\n",
- vm_mem_backing_src_alias(p->src_type)->name);
-}
-
-static void reset_event_counts(void)
-{
- memset(&events, 0, sizeof(events));
-}
-
-/*
- * This function either succeeds, skips the test (after setting test->skip), or
- * fails with a TEST_FAIL that aborts all tests.
- */
-static void vcpu_run_loop(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
- struct test_desc *test)
-{
- struct kvm_run *run;
- struct ucall uc;
- int ret;
-
- run = vcpu->run;
-
- for (;;) {
- ret = _vcpu_run(vcpu);
- if (ret) {
- test->fail_vcpu_run_handler(ret);
- goto done;
- }
-
- switch (get_ucall(vcpu, &uc)) {
- case UCALL_SYNC:
- if (!handle_cmd(vm, uc.args[1])) {
- test->skip = true;
- goto done;
- }
- break;
- case UCALL_ABORT:
- REPORT_GUEST_ASSERT(uc);
- break;
- case UCALL_DONE:
- goto done;
- case UCALL_NONE:
- if (run->exit_reason == KVM_EXIT_MMIO)
- test->mmio_handler(vm, run);
- break;
- default:
- TEST_FAIL("Unknown ucall %lu", uc.cmd);
- }
- }
-
-done:
- pr_debug(test->skip ? "Skipped.\n" : "Done.\n");
-}
-
-static void run_test(enum vm_guest_mode mode, void *arg)
-{
- struct test_params *p = (struct test_params *)arg;
- struct test_desc *test = p->test_desc;
- struct kvm_vm *vm;
- struct kvm_vcpu *vcpu;
- struct uffd_desc *pt_uffd, *data_uffd;
-
- print_test_banner(mode, p);
-
- vm = ____vm_create(VM_SHAPE(mode));
- setup_memslots(vm, p);
- kvm_vm_elf_load(vm, program_invocation_name);
- setup_ucall(vm);
- vcpu = vm_vcpu_add(vm, 0, guest_code);
-
- setup_gva_maps(vm);
-
- reset_event_counts();
-
- /*
- * Set some code in the data memslot for the guest to execute (only
- * applicable to the EXEC tests). This has to be done before
- * setup_uffd() as that function copies the memslot data for the uffd
- * handler.
- */
- load_exec_code_for_test(vm);
- setup_uffd(vm, p, &pt_uffd, &data_uffd);
- setup_abort_handlers(vm, vcpu, test);
- setup_default_handlers(test);
- vcpu_args_set(vcpu, 1, test);
-
- vcpu_run_loop(vm, vcpu, test);
-
- kvm_vm_free(vm);
- free_uffd(test, pt_uffd, data_uffd);
-
- /*
- * Make sure we check the events after the uffd threads have exited,
- * which means they updated their respective event counters.
- */
- if (!test->skip)
- check_event_counts(test);
-}
-
-static void help(char *name)
-{
- puts("");
- printf("usage: %s [-h] [-s mem-type]\n", name);
- puts("");
- guest_modes_help();
- backing_src_help("-s");
- puts("");
-}
-
-#define SNAME(s) #s
-#define SCAT2(a, b) SNAME(a ## _ ## b)
-#define SCAT3(a, b, c) SCAT2(a, SCAT2(b, c))
-#define SCAT4(a, b, c, d) SCAT2(a, SCAT3(b, c, d))
-
-#define _CHECK(_test) _CHECK_##_test
-#define _PREPARE(_test) _PREPARE_##_test
-#define _PREPARE_guest_read64 NULL
-#define _PREPARE_guest_ld_preidx NULL
-#define _PREPARE_guest_write64 NULL
-#define _PREPARE_guest_st_preidx NULL
-#define _PREPARE_guest_exec NULL
-#define _PREPARE_guest_at NULL
-#define _PREPARE_guest_dc_zva guest_check_dc_zva
-#define _PREPARE_guest_cas guest_check_lse
-
-/* With or without access flag checks */
-#define _PREPARE_with_af guest_set_ha, guest_clear_pte_af
-#define _PREPARE_no_af NULL
-#define _CHECK_with_af guest_check_pte_af
-#define _CHECK_no_af NULL
-
-/* Performs an access and checks that no faults were triggered. */
-#define TEST_ACCESS(_access, _with_af, _mark_cmd) \
-{ \
- .name = SCAT3(_access, _with_af, #_mark_cmd), \
- .guest_prepare = { _PREPARE(_with_af), \
- _PREPARE(_access) }, \
- .mem_mark_cmd = _mark_cmd, \
- .guest_test = _access, \
- .guest_test_check = { _CHECK(_with_af) }, \
- .expected_events = { 0 }, \
-}
-
-#define TEST_UFFD(_access, _with_af, _mark_cmd, \
- _uffd_data_handler, _uffd_pt_handler, _uffd_faults) \
-{ \
- .name = SCAT4(uffd, _access, _with_af, #_mark_cmd), \
- .guest_prepare = { _PREPARE(_with_af), \
- _PREPARE(_access) }, \
- .guest_test = _access, \
- .mem_mark_cmd = _mark_cmd, \
- .guest_test_check = { _CHECK(_with_af) }, \
- .uffd_data_handler = _uffd_data_handler, \
- .uffd_pt_handler = _uffd_pt_handler, \
- .expected_events = { .uffd_faults = _uffd_faults, }, \
-}
-
-#define TEST_DIRTY_LOG(_access, _with_af, _test_check, _pt_check) \
-{ \
- .name = SCAT3(dirty_log, _access, _with_af), \
- .data_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
- .pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
- .guest_prepare = { _PREPARE(_with_af), \
- _PREPARE(_access) }, \
- .guest_test = _access, \
- .guest_test_check = { _CHECK(_with_af), _test_check, _pt_check }, \
- .expected_events = { 0 }, \
-}
-
-#define TEST_UFFD_AND_DIRTY_LOG(_access, _with_af, _uffd_data_handler, \
- _uffd_faults, _test_check, _pt_check) \
-{ \
- .name = SCAT3(uffd_and_dirty_log, _access, _with_af), \
- .data_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
- .pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
- .guest_prepare = { _PREPARE(_with_af), \
- _PREPARE(_access) }, \
- .guest_test = _access, \
- .mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
- .guest_test_check = { _CHECK(_with_af), _test_check, _pt_check }, \
- .uffd_data_handler = _uffd_data_handler, \
- .uffd_pt_handler = uffd_pt_handler, \
- .expected_events = { .uffd_faults = _uffd_faults, }, \
-}
-
-#define TEST_RO_MEMSLOT(_access, _mmio_handler, _mmio_exits) \
-{ \
- .name = SCAT2(ro_memslot, _access), \
- .data_memslot_flags = KVM_MEM_READONLY, \
- .pt_memslot_flags = KVM_MEM_READONLY, \
- .guest_prepare = { _PREPARE(_access) }, \
- .guest_test = _access, \
- .mmio_handler = _mmio_handler, \
- .expected_events = { .mmio_exits = _mmio_exits }, \
-}
-
-#define TEST_RO_MEMSLOT_NO_SYNDROME(_access) \
-{ \
- .name = SCAT2(ro_memslot_no_syndrome, _access), \
- .data_memslot_flags = KVM_MEM_READONLY, \
- .pt_memslot_flags = KVM_MEM_READONLY, \
- .guest_prepare = { _PREPARE(_access) }, \
- .guest_test = _access, \
- .fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
- .expected_events = { .fail_vcpu_runs = 1 }, \
-}
-
-#define TEST_RO_MEMSLOT_AND_DIRTY_LOG(_access, _mmio_handler, _mmio_exits, \
- _test_check) \
-{ \
- .name = SCAT2(ro_memslot, _access), \
- .data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
- .pt_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
- .guest_prepare = { _PREPARE(_access) }, \
- .guest_test = _access, \
- .guest_test_check = { _test_check }, \
- .mmio_handler = _mmio_handler, \
- .expected_events = { .mmio_exits = _mmio_exits}, \
-}
-
-#define TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(_access, _test_check) \
-{ \
- .name = SCAT2(ro_memslot_no_syn_and_dlog, _access), \
- .data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
- .pt_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
- .guest_prepare = { _PREPARE(_access) }, \
- .guest_test = _access, \
- .guest_test_check = { _test_check }, \
- .fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
- .expected_events = { .fail_vcpu_runs = 1 }, \
-}
-
-#define TEST_RO_MEMSLOT_AND_UFFD(_access, _mmio_handler, _mmio_exits, \
- _uffd_data_handler, _uffd_faults) \
-{ \
- .name = SCAT2(ro_memslot_uffd, _access), \
- .data_memslot_flags = KVM_MEM_READONLY, \
- .pt_memslot_flags = KVM_MEM_READONLY, \
- .mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
- .guest_prepare = { _PREPARE(_access) }, \
- .guest_test = _access, \
- .uffd_data_handler = _uffd_data_handler, \
- .uffd_pt_handler = uffd_pt_handler, \
- .mmio_handler = _mmio_handler, \
- .expected_events = { .mmio_exits = _mmio_exits, \
- .uffd_faults = _uffd_faults }, \
-}
-
-#define TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(_access, _uffd_data_handler, \
- _uffd_faults) \
-{ \
- .name = SCAT2(ro_memslot_no_syndrome, _access), \
- .data_memslot_flags = KVM_MEM_READONLY, \
- .pt_memslot_flags = KVM_MEM_READONLY, \
- .mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
- .guest_prepare = { _PREPARE(_access) }, \
- .guest_test = _access, \
- .uffd_data_handler = _uffd_data_handler, \
- .uffd_pt_handler = uffd_pt_handler, \
- .fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
- .expected_events = { .fail_vcpu_runs = 1, \
- .uffd_faults = _uffd_faults }, \
-}
-
-static struct test_desc tests[] = {
-
- /* Check that HW is setting the Access Flag (AF) (sanity checks). */
- TEST_ACCESS(guest_read64, with_af, CMD_NONE),
- TEST_ACCESS(guest_ld_preidx, with_af, CMD_NONE),
- TEST_ACCESS(guest_cas, with_af, CMD_NONE),
- TEST_ACCESS(guest_write64, with_af, CMD_NONE),
- TEST_ACCESS(guest_st_preidx, with_af, CMD_NONE),
- TEST_ACCESS(guest_dc_zva, with_af, CMD_NONE),
- TEST_ACCESS(guest_exec, with_af, CMD_NONE),
-
- /*
- * Punch a hole in the data backing store, and then try multiple
- * accesses: reads should rturn zeroes, and writes should
- * re-populate the page. Moreover, the test also check that no
- * exception was generated in the guest. Note that this
- * reading/writing behavior is the same as reading/writing a
- * punched page (with fallocate(FALLOC_FL_PUNCH_HOLE)) from
- * userspace.
- */
- TEST_ACCESS(guest_read64, no_af, CMD_HOLE_DATA),
- TEST_ACCESS(guest_cas, no_af, CMD_HOLE_DATA),
- TEST_ACCESS(guest_ld_preidx, no_af, CMD_HOLE_DATA),
- TEST_ACCESS(guest_write64, no_af, CMD_HOLE_DATA),
- TEST_ACCESS(guest_st_preidx, no_af, CMD_HOLE_DATA),
- TEST_ACCESS(guest_at, no_af, CMD_HOLE_DATA),
- TEST_ACCESS(guest_dc_zva, no_af, CMD_HOLE_DATA),
-
- /*
- * Punch holes in the data and PT backing stores and mark them for
- * userfaultfd handling. This should result in 2 faults: the access
- * on the data backing store, and its respective S1 page table walk
- * (S1PTW).
- */
- TEST_UFFD(guest_read64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_handler, uffd_pt_handler, 2),
- TEST_UFFD(guest_read64, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_handler, uffd_pt_handler, 2),
- TEST_UFFD(guest_cas, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_handler, uffd_pt_handler, 2),
- /*
- * Can't test guest_at with_af as it's IMPDEF whether the AF is set.
- * The S1PTW fault should still be marked as a write.
- */
- TEST_UFFD(guest_at, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_no_handler, uffd_pt_handler, 1),
- TEST_UFFD(guest_ld_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_handler, uffd_pt_handler, 2),
- TEST_UFFD(guest_write64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_handler, uffd_pt_handler, 2),
- TEST_UFFD(guest_dc_zva, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_handler, uffd_pt_handler, 2),
- TEST_UFFD(guest_st_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_handler, uffd_pt_handler, 2),
- TEST_UFFD(guest_exec, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_handler, uffd_pt_handler, 2),
-
- /*
- * Try accesses when the data and PT memory regions are both
- * tracked for dirty logging.
- */
- TEST_DIRTY_LOG(guest_read64, with_af, guest_check_no_write_in_dirty_log,
- guest_check_s1ptw_wr_in_dirty_log),
- TEST_DIRTY_LOG(guest_read64, no_af, guest_check_no_write_in_dirty_log,
- guest_check_no_s1ptw_wr_in_dirty_log),
- TEST_DIRTY_LOG(guest_ld_preidx, with_af,
- guest_check_no_write_in_dirty_log,
- guest_check_s1ptw_wr_in_dirty_log),
- TEST_DIRTY_LOG(guest_at, no_af, guest_check_no_write_in_dirty_log,
- guest_check_no_s1ptw_wr_in_dirty_log),
- TEST_DIRTY_LOG(guest_exec, with_af, guest_check_no_write_in_dirty_log,
- guest_check_s1ptw_wr_in_dirty_log),
- TEST_DIRTY_LOG(guest_write64, with_af, guest_check_write_in_dirty_log,
- guest_check_s1ptw_wr_in_dirty_log),
- TEST_DIRTY_LOG(guest_cas, with_af, guest_check_write_in_dirty_log,
- guest_check_s1ptw_wr_in_dirty_log),
- TEST_DIRTY_LOG(guest_dc_zva, with_af, guest_check_write_in_dirty_log,
- guest_check_s1ptw_wr_in_dirty_log),
- TEST_DIRTY_LOG(guest_st_preidx, with_af, guest_check_write_in_dirty_log,
- guest_check_s1ptw_wr_in_dirty_log),
-
- /*
- * Access when the data and PT memory regions are both marked for
- * dirty logging and UFFD at the same time. The expected result is
- * that writes should mark the dirty log and trigger a userfaultfd
- * write fault. Reads/execs should result in a read userfaultfd
- * fault, and nothing in the dirty log. Any S1PTW should result in
- * a write in the dirty log and a userfaultfd write.
- */
- TEST_UFFD_AND_DIRTY_LOG(guest_read64, with_af,
- uffd_data_handler, 2,
- guest_check_no_write_in_dirty_log,
- guest_check_s1ptw_wr_in_dirty_log),
- TEST_UFFD_AND_DIRTY_LOG(guest_read64, no_af,
- uffd_data_handler, 2,
- guest_check_no_write_in_dirty_log,
- guest_check_no_s1ptw_wr_in_dirty_log),
- TEST_UFFD_AND_DIRTY_LOG(guest_ld_preidx, with_af,
- uffd_data_handler,
- 2, guest_check_no_write_in_dirty_log,
- guest_check_s1ptw_wr_in_dirty_log),
- TEST_UFFD_AND_DIRTY_LOG(guest_at, with_af, uffd_no_handler, 1,
- guest_check_no_write_in_dirty_log,
- guest_check_s1ptw_wr_in_dirty_log),
- TEST_UFFD_AND_DIRTY_LOG(guest_exec, with_af,
- uffd_data_handler, 2,
- guest_check_no_write_in_dirty_log,
- guest_check_s1ptw_wr_in_dirty_log),
- TEST_UFFD_AND_DIRTY_LOG(guest_write64, with_af,
- uffd_data_handler,
- 2, guest_check_write_in_dirty_log,
- guest_check_s1ptw_wr_in_dirty_log),
- TEST_UFFD_AND_DIRTY_LOG(guest_cas, with_af,
- uffd_data_handler, 2,
- guest_check_write_in_dirty_log,
- guest_check_s1ptw_wr_in_dirty_log),
- TEST_UFFD_AND_DIRTY_LOG(guest_dc_zva, with_af,
- uffd_data_handler,
- 2, guest_check_write_in_dirty_log,
- guest_check_s1ptw_wr_in_dirty_log),
- TEST_UFFD_AND_DIRTY_LOG(guest_st_preidx, with_af,
- uffd_data_handler, 2,
- guest_check_write_in_dirty_log,
- guest_check_s1ptw_wr_in_dirty_log),
- /*
- * Access when both the PT and data regions are marked read-only
- * (with KVM_MEM_READONLY). Writes with a syndrome result in an
- * MMIO exit, writes with no syndrome (e.g., CAS) result in a
- * failed vcpu run, and reads/execs with and without syndroms do
- * not fault.
- */
- TEST_RO_MEMSLOT(guest_read64, 0, 0),
- TEST_RO_MEMSLOT(guest_ld_preidx, 0, 0),
- TEST_RO_MEMSLOT(guest_at, 0, 0),
- TEST_RO_MEMSLOT(guest_exec, 0, 0),
- TEST_RO_MEMSLOT(guest_write64, mmio_on_test_gpa_handler, 1),
- TEST_RO_MEMSLOT_NO_SYNDROME(guest_dc_zva),
- TEST_RO_MEMSLOT_NO_SYNDROME(guest_cas),
- TEST_RO_MEMSLOT_NO_SYNDROME(guest_st_preidx),
-
- /*
- * The PT and data regions are both read-only and marked
- * for dirty logging at the same time. The expected result is that
- * for writes there should be no write in the dirty log. The
- * readonly handling is the same as if the memslot was not marked
- * for dirty logging: writes with a syndrome result in an MMIO
- * exit, and writes with no syndrome result in a failed vcpu run.
- */
- TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_read64, 0, 0,
- guest_check_no_write_in_dirty_log),
- TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_ld_preidx, 0, 0,
- guest_check_no_write_in_dirty_log),
- TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_at, 0, 0,
- guest_check_no_write_in_dirty_log),
- TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_exec, 0, 0,
- guest_check_no_write_in_dirty_log),
- TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_write64, mmio_on_test_gpa_handler,
- 1, guest_check_no_write_in_dirty_log),
- TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_dc_zva,
- guest_check_no_write_in_dirty_log),
- TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_cas,
- guest_check_no_write_in_dirty_log),
- TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_st_preidx,
- guest_check_no_write_in_dirty_log),
-
- /*
- * The PT and data regions are both read-only and punched with
- * holes tracked with userfaultfd. The expected result is the
- * union of both userfaultfd and read-only behaviors. For example,
- * write accesses result in a userfaultfd write fault and an MMIO
- * exit. Writes with no syndrome result in a failed vcpu run and
- * no userfaultfd write fault. Reads result in userfaultfd getting
- * triggered.
- */
- TEST_RO_MEMSLOT_AND_UFFD(guest_read64, 0, 0, uffd_data_handler, 2),
- TEST_RO_MEMSLOT_AND_UFFD(guest_ld_preidx, 0, 0, uffd_data_handler, 2),
- TEST_RO_MEMSLOT_AND_UFFD(guest_at, 0, 0, uffd_no_handler, 1),
- TEST_RO_MEMSLOT_AND_UFFD(guest_exec, 0, 0, uffd_data_handler, 2),
- TEST_RO_MEMSLOT_AND_UFFD(guest_write64, mmio_on_test_gpa_handler, 1,
- uffd_data_handler, 2),
- TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_cas, uffd_data_handler, 2),
- TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_dc_zva, uffd_no_handler, 1),
- TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_st_preidx, uffd_no_handler, 1),
-
- { 0 }
-};
-
-static void for_each_test_and_guest_mode(enum vm_mem_backing_src_type src_type)
-{
- struct test_desc *t;
-
- for (t = &tests[0]; t->name; t++) {
- if (t->skip)
- continue;
-
- struct test_params p = {
- .src_type = src_type,
- .test_desc = t,
- };
-
- for_each_guest_mode(run_test, &p);
- }
-}
-
-int main(int argc, char *argv[])
-{
- enum vm_mem_backing_src_type src_type;
- int opt;
-
- src_type = DEFAULT_VM_MEM_SRC;
-
- while ((opt = getopt(argc, argv, "hm:s:")) != -1) {
- switch (opt) {
- case 'm':
- guest_modes_cmdline(optarg);
- break;
- case 's':
- src_type = parse_backing_src_type(optarg);
- break;
- case 'h':
- default:
- help(argv[0]);
- exit(0);
- }
- }
-
- for_each_test_and_guest_mode(src_type);
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/aarch64/psci_test.c b/tools/testing/selftests/kvm/aarch64/psci_test.c
deleted file mode 100644
index 9b004905d1d3..000000000000
--- a/tools/testing/selftests/kvm/aarch64/psci_test.c
+++ /dev/null
@@ -1,198 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * psci_test - Tests relating to KVM's PSCI implementation.
- *
- * Copyright (c) 2021 Google LLC.
- *
- * This test includes:
- * - A regression test for a race between KVM servicing the PSCI CPU_ON call
- * and userspace reading the targeted vCPU's registers.
- * - A test for KVM's handling of PSCI SYSTEM_SUSPEND and the associated
- * KVM_SYSTEM_EVENT_SUSPEND UAPI.
- */
-
-#define _GNU_SOURCE
-
-#include <linux/psci.h>
-
-#include "kvm_util.h"
-#include "processor.h"
-#include "test_util.h"
-
-#define CPU_ON_ENTRY_ADDR 0xfeedf00dul
-#define CPU_ON_CONTEXT_ID 0xdeadc0deul
-
-static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr,
- uint64_t context_id)
-{
- struct arm_smccc_res res;
-
- smccc_hvc(PSCI_0_2_FN64_CPU_ON, target_cpu, entry_addr, context_id,
- 0, 0, 0, 0, &res);
-
- return res.a0;
-}
-
-static uint64_t psci_affinity_info(uint64_t target_affinity,
- uint64_t lowest_affinity_level)
-{
- struct arm_smccc_res res;
-
- smccc_hvc(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity, lowest_affinity_level,
- 0, 0, 0, 0, 0, &res);
-
- return res.a0;
-}
-
-static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id)
-{
- struct arm_smccc_res res;
-
- smccc_hvc(PSCI_1_0_FN64_SYSTEM_SUSPEND, entry_addr, context_id,
- 0, 0, 0, 0, 0, &res);
-
- return res.a0;
-}
-
-static uint64_t psci_features(uint32_t func_id)
-{
- struct arm_smccc_res res;
-
- smccc_hvc(PSCI_1_0_FN_PSCI_FEATURES, func_id, 0, 0, 0, 0, 0, 0, &res);
-
- return res.a0;
-}
-
-static void vcpu_power_off(struct kvm_vcpu *vcpu)
-{
- struct kvm_mp_state mp_state = {
- .mp_state = KVM_MP_STATE_STOPPED,
- };
-
- vcpu_mp_state_set(vcpu, &mp_state);
-}
-
-static struct kvm_vm *setup_vm(void *guest_code, struct kvm_vcpu **source,
- struct kvm_vcpu **target)
-{
- struct kvm_vcpu_init init;
- struct kvm_vm *vm;
-
- vm = vm_create(2);
-
- vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
- init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
-
- *source = aarch64_vcpu_add(vm, 0, &init, guest_code);
- *target = aarch64_vcpu_add(vm, 1, &init, guest_code);
-
- return vm;
-}
-
-static void enter_guest(struct kvm_vcpu *vcpu)
-{
- struct ucall uc;
-
- vcpu_run(vcpu);
- if (get_ucall(vcpu, &uc) == UCALL_ABORT)
- REPORT_GUEST_ASSERT(uc);
-}
-
-static void assert_vcpu_reset(struct kvm_vcpu *vcpu)
-{
- uint64_t obs_pc, obs_x0;
-
- vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &obs_pc);
- vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0]), &obs_x0);
-
- TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR,
- "unexpected target cpu pc: %lx (expected: %lx)",
- obs_pc, CPU_ON_ENTRY_ADDR);
- TEST_ASSERT(obs_x0 == CPU_ON_CONTEXT_ID,
- "unexpected target context id: %lx (expected: %lx)",
- obs_x0, CPU_ON_CONTEXT_ID);
-}
-
-static void guest_test_cpu_on(uint64_t target_cpu)
-{
- uint64_t target_state;
-
- GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID));
-
- do {
- target_state = psci_affinity_info(target_cpu, 0);
-
- GUEST_ASSERT((target_state == PSCI_0_2_AFFINITY_LEVEL_ON) ||
- (target_state == PSCI_0_2_AFFINITY_LEVEL_OFF));
- } while (target_state != PSCI_0_2_AFFINITY_LEVEL_ON);
-
- GUEST_DONE();
-}
-
-static void host_test_cpu_on(void)
-{
- struct kvm_vcpu *source, *target;
- uint64_t target_mpidr;
- struct kvm_vm *vm;
- struct ucall uc;
-
- vm = setup_vm(guest_test_cpu_on, &source, &target);
-
- /*
- * make sure the target is already off when executing the test.
- */
- vcpu_power_off(target);
-
- vcpu_get_reg(target, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr);
- vcpu_args_set(source, 1, target_mpidr & MPIDR_HWID_BITMASK);
- enter_guest(source);
-
- if (get_ucall(source, &uc) != UCALL_DONE)
- TEST_FAIL("Unhandled ucall: %lu", uc.cmd);
-
- assert_vcpu_reset(target);
- kvm_vm_free(vm);
-}
-
-static void guest_test_system_suspend(void)
-{
- uint64_t ret;
-
- /* assert that SYSTEM_SUSPEND is discoverable */
- GUEST_ASSERT(!psci_features(PSCI_1_0_FN_SYSTEM_SUSPEND));
- GUEST_ASSERT(!psci_features(PSCI_1_0_FN64_SYSTEM_SUSPEND));
-
- ret = psci_system_suspend(CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID);
- GUEST_SYNC(ret);
-}
-
-static void host_test_system_suspend(void)
-{
- struct kvm_vcpu *source, *target;
- struct kvm_run *run;
- struct kvm_vm *vm;
-
- vm = setup_vm(guest_test_system_suspend, &source, &target);
- vm_enable_cap(vm, KVM_CAP_ARM_SYSTEM_SUSPEND, 0);
-
- vcpu_power_off(target);
- run = source->run;
-
- enter_guest(source);
-
- TEST_ASSERT_KVM_EXIT_REASON(source, KVM_EXIT_SYSTEM_EVENT);
- TEST_ASSERT(run->system_event.type == KVM_SYSTEM_EVENT_SUSPEND,
- "Unhandled system event: %u (expected: %u)",
- run->system_event.type, KVM_SYSTEM_EVENT_SUSPEND);
-
- kvm_vm_free(vm);
-}
-
-int main(void)
-{
- TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SYSTEM_SUSPEND));
-
- host_test_cpu_on();
- host_test_system_suspend();
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/aarch64/set_id_regs.c b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
deleted file mode 100644
index bac05210b539..000000000000
--- a/tools/testing/selftests/kvm/aarch64/set_id_regs.c
+++ /dev/null
@@ -1,481 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * set_id_regs - Test for setting ID register from usersapce.
- *
- * Copyright (c) 2023 Google LLC.
- *
- *
- * Test that KVM supports setting ID registers from userspace and handles the
- * feature set correctly.
- */
-
-#include <stdint.h>
-#include "kvm_util.h"
-#include "processor.h"
-#include "test_util.h"
-#include <linux/bitfield.h>
-
-enum ftr_type {
- FTR_EXACT, /* Use a predefined safe value */
- FTR_LOWER_SAFE, /* Smaller value is safe */
- FTR_HIGHER_SAFE, /* Bigger value is safe */
- FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
- FTR_END, /* Mark the last ftr bits */
-};
-
-#define FTR_SIGNED true /* Value should be treated as signed */
-#define FTR_UNSIGNED false /* Value should be treated as unsigned */
-
-struct reg_ftr_bits {
- char *name;
- bool sign;
- enum ftr_type type;
- uint8_t shift;
- uint64_t mask;
- int64_t safe_val;
-};
-
-struct test_feature_reg {
- uint32_t reg;
- const struct reg_ftr_bits *ftr_bits;
-};
-
-#define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL) \
- { \
- .name = #NAME, \
- .sign = SIGNED, \
- .type = TYPE, \
- .shift = SHIFT, \
- .mask = MASK, \
- .safe_val = SAFE_VAL, \
- }
-
-#define REG_FTR_BITS(type, reg, field, safe_val) \
- __REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \
- reg##_##field##_MASK, safe_val)
-
-#define S_REG_FTR_BITS(type, reg, field, safe_val) \
- __REG_FTR_BITS(reg##_##field, FTR_SIGNED, type, reg##_##field##_SHIFT, \
- reg##_##field##_MASK, safe_val)
-
-#define REG_FTR_END \
- { \
- .type = FTR_END, \
- }
-
-static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = {
- S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, 0),
- REG_FTR_END,
-};
-
-static const struct reg_ftr_bits ftr_id_dfr0_el1[] = {
- S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, PerfMon, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, CopDbg, 0),
- REG_FTR_END,
-};
-
-static const struct reg_ftr_bits ftr_id_aa64isar0_el1[] = {
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RNDR, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TLB, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TS, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, FHM, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, DP, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM4, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM3, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA3, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RDM, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TME, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, ATOMIC, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, CRC32, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA2, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA1, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, AES, 0),
- REG_FTR_END,
-};
-
-static const struct reg_ftr_bits ftr_id_aa64isar1_el1[] = {
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LS64, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, XS, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, I8MM, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DGH, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, BF16, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SPECRES, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SB, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FRINTTS, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LRCPC, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FCMA, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, JSCVT, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DPB, 0),
- REG_FTR_END,
-};
-
-static const struct reg_ftr_bits ftr_id_aa64isar2_el1[] = {
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, BC, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, RPRES, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, WFxT, 0),
- REG_FTR_END,
-};
-
-static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV3, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 0),
- REG_FTR_END,
-};
-
-static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ECV, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, EXS, 0),
- S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN4, 0),
- S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN64, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN16, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGENDEL0, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, SNSMEM, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGEND, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ASIDBITS, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, PARANGE, 0),
- REG_FTR_END,
-};
-
-static const struct reg_ftr_bits ftr_id_aa64mmfr1_el1[] = {
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TIDCP1, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, AFP, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, ETS, 0),
- REG_FTR_BITS(FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1, SpecSEI, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, PAN, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, LO, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HPDS, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HAFDBS, 0),
- REG_FTR_END,
-};
-
-static const struct reg_ftr_bits ftr_id_aa64mmfr2_el1[] = {
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, E0PD, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, BBM, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, TTL, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, AT, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, ST, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, VARange, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, IESB, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, LSM, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, UAO, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, CnP, 0),
- REG_FTR_END,
-};
-
-static const struct reg_ftr_bits ftr_id_aa64zfr0_el1[] = {
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F64MM, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F32MM, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, I8MM, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SM4, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SHA3, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BF16, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BitPerm, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, AES, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SVEver, 0),
- REG_FTR_END,
-};
-
-#define TEST_REG(id, table) \
- { \
- .reg = id, \
- .ftr_bits = &((table)[0]), \
- }
-
-static struct test_feature_reg test_regs[] = {
- TEST_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0_el1),
- TEST_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0_el1),
- TEST_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0_el1),
- TEST_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1_el1),
- TEST_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2_el1),
- TEST_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0_el1),
- TEST_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0_el1),
- TEST_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1_el1),
- TEST_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2_el1),
- TEST_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0_el1),
-};
-
-#define GUEST_REG_SYNC(id) GUEST_SYNC_ARGS(0, id, read_sysreg_s(id), 0, 0);
-
-static void guest_code(void)
-{
- GUEST_REG_SYNC(SYS_ID_AA64DFR0_EL1);
- GUEST_REG_SYNC(SYS_ID_DFR0_EL1);
- GUEST_REG_SYNC(SYS_ID_AA64ISAR0_EL1);
- GUEST_REG_SYNC(SYS_ID_AA64ISAR1_EL1);
- GUEST_REG_SYNC(SYS_ID_AA64ISAR2_EL1);
- GUEST_REG_SYNC(SYS_ID_AA64PFR0_EL1);
- GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1);
- GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);
- GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
- GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
-
- GUEST_DONE();
-}
-
-/* Return a safe value to a given ftr_bits an ftr value */
-uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
-{
- uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
-
- if (ftr_bits->type == FTR_UNSIGNED) {
- switch (ftr_bits->type) {
- case FTR_EXACT:
- ftr = ftr_bits->safe_val;
- break;
- case FTR_LOWER_SAFE:
- if (ftr > 0)
- ftr--;
- break;
- case FTR_HIGHER_SAFE:
- if (ftr < ftr_max)
- ftr++;
- break;
- case FTR_HIGHER_OR_ZERO_SAFE:
- if (ftr == ftr_max)
- ftr = 0;
- else if (ftr != 0)
- ftr++;
- break;
- default:
- break;
- }
- } else if (ftr != ftr_max) {
- switch (ftr_bits->type) {
- case FTR_EXACT:
- ftr = ftr_bits->safe_val;
- break;
- case FTR_LOWER_SAFE:
- if (ftr > 0)
- ftr--;
- break;
- case FTR_HIGHER_SAFE:
- if (ftr < ftr_max - 1)
- ftr++;
- break;
- case FTR_HIGHER_OR_ZERO_SAFE:
- if (ftr != 0 && ftr != ftr_max - 1)
- ftr++;
- break;
- default:
- break;
- }
- }
-
- return ftr;
-}
-
-/* Return an invalid value to a given ftr_bits an ftr value */
-uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
-{
- uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
-
- if (ftr_bits->type == FTR_UNSIGNED) {
- switch (ftr_bits->type) {
- case FTR_EXACT:
- ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
- break;
- case FTR_LOWER_SAFE:
- ftr++;
- break;
- case FTR_HIGHER_SAFE:
- ftr--;
- break;
- case FTR_HIGHER_OR_ZERO_SAFE:
- if (ftr == 0)
- ftr = ftr_max;
- else
- ftr--;
- break;
- default:
- break;
- }
- } else if (ftr != ftr_max) {
- switch (ftr_bits->type) {
- case FTR_EXACT:
- ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
- break;
- case FTR_LOWER_SAFE:
- ftr++;
- break;
- case FTR_HIGHER_SAFE:
- ftr--;
- break;
- case FTR_HIGHER_OR_ZERO_SAFE:
- if (ftr == 0)
- ftr = ftr_max - 1;
- else
- ftr--;
- break;
- default:
- break;
- }
- } else {
- ftr = 0;
- }
-
- return ftr;
-}
-
-static void test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
- const struct reg_ftr_bits *ftr_bits)
-{
- uint8_t shift = ftr_bits->shift;
- uint64_t mask = ftr_bits->mask;
- uint64_t val, new_val, ftr;
-
- vcpu_get_reg(vcpu, reg, &val);
- ftr = (val & mask) >> shift;
-
- ftr = get_safe_value(ftr_bits, ftr);
-
- ftr <<= shift;
- val &= ~mask;
- val |= ftr;
-
- vcpu_set_reg(vcpu, reg, val);
- vcpu_get_reg(vcpu, reg, &new_val);
- TEST_ASSERT_EQ(new_val, val);
-}
-
-static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
- const struct reg_ftr_bits *ftr_bits)
-{
- uint8_t shift = ftr_bits->shift;
- uint64_t mask = ftr_bits->mask;
- uint64_t val, old_val, ftr;
- int r;
-
- vcpu_get_reg(vcpu, reg, &val);
- ftr = (val & mask) >> shift;
-
- ftr = get_invalid_value(ftr_bits, ftr);
-
- old_val = val;
- ftr <<= shift;
- val &= ~mask;
- val |= ftr;
-
- r = __vcpu_set_reg(vcpu, reg, val);
- TEST_ASSERT(r < 0 && errno == EINVAL,
- "Unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
-
- vcpu_get_reg(vcpu, reg, &val);
- TEST_ASSERT_EQ(val, old_val);
-}
-
-static void test_user_set_reg(struct kvm_vcpu *vcpu, bool aarch64_only)
-{
- uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
- struct reg_mask_range range = {
- .addr = (__u64)masks,
- };
- int ret;
-
- /* KVM should return error when reserved field is not zero */
- range.reserved[0] = 1;
- ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
- TEST_ASSERT(ret, "KVM doesn't check invalid parameters.");
-
- /* Get writable masks for feature ID registers */
- memset(range.reserved, 0, sizeof(range.reserved));
- vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
-
- for (int i = 0; i < ARRAY_SIZE(test_regs); i++) {
- const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits;
- uint32_t reg_id = test_regs[i].reg;
- uint64_t reg = KVM_ARM64_SYS_REG(reg_id);
- int idx;
-
- /* Get the index to masks array for the idreg */
- idx = KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(reg_id), sys_reg_Op1(reg_id),
- sys_reg_CRn(reg_id), sys_reg_CRm(reg_id),
- sys_reg_Op2(reg_id));
-
- for (int j = 0; ftr_bits[j].type != FTR_END; j++) {
- /* Skip aarch32 reg on aarch64 only system, since they are RAZ/WI. */
- if (aarch64_only && sys_reg_CRm(reg_id) < 4) {
- ksft_test_result_skip("%s on AARCH64 only system\n",
- ftr_bits[j].name);
- continue;
- }
-
- /* Make sure the feature field is writable */
- TEST_ASSERT_EQ(masks[idx] & ftr_bits[j].mask, ftr_bits[j].mask);
-
- test_reg_set_fail(vcpu, reg, &ftr_bits[j]);
- test_reg_set_success(vcpu, reg, &ftr_bits[j]);
-
- ksft_test_result_pass("%s\n", ftr_bits[j].name);
- }
- }
-}
-
-static void test_guest_reg_read(struct kvm_vcpu *vcpu)
-{
- bool done = false;
- struct ucall uc;
- uint64_t val;
-
- while (!done) {
- vcpu_run(vcpu);
-
- switch (get_ucall(vcpu, &uc)) {
- case UCALL_ABORT:
- REPORT_GUEST_ASSERT(uc);
- break;
- case UCALL_SYNC:
- /* Make sure the written values are seen by guest */
- vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(uc.args[2]), &val);
- TEST_ASSERT_EQ(val, uc.args[3]);
- break;
- case UCALL_DONE:
- done = true;
- break;
- default:
- TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
- }
- }
-}
-
-int main(void)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm;
- bool aarch64_only;
- uint64_t val, el0;
- int ftr_cnt;
-
- TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES));
-
- vm = vm_create_with_one_vcpu(&vcpu, guest_code);
-
- /* Check for AARCH64 only system */
- vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
- el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
- aarch64_only = (el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
-
- ksft_print_header();
-
- ftr_cnt = ARRAY_SIZE(ftr_id_aa64dfr0_el1) + ARRAY_SIZE(ftr_id_dfr0_el1) +
- ARRAY_SIZE(ftr_id_aa64isar0_el1) + ARRAY_SIZE(ftr_id_aa64isar1_el1) +
- ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
- ARRAY_SIZE(ftr_id_aa64mmfr0_el1) + ARRAY_SIZE(ftr_id_aa64mmfr1_el1) +
- ARRAY_SIZE(ftr_id_aa64mmfr2_el1) + ARRAY_SIZE(ftr_id_aa64zfr0_el1) -
- ARRAY_SIZE(test_regs);
-
- ksft_set_plan(ftr_cnt);
-
- test_user_set_reg(vcpu, aarch64_only);
- test_guest_reg_read(vcpu);
-
- kvm_vm_free(vm);
-
- ksft_finished();
-}
diff --git a/tools/testing/selftests/kvm/aarch64/smccc_filter.c b/tools/testing/selftests/kvm/aarch64/smccc_filter.c
deleted file mode 100644
index 2d189f3da228..000000000000
--- a/tools/testing/selftests/kvm/aarch64/smccc_filter.c
+++ /dev/null
@@ -1,268 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * smccc_filter - Tests for the SMCCC filter UAPI.
- *
- * Copyright (c) 2023 Google LLC
- *
- * This test includes:
- * - Tests that the UAPI constraints are upheld by KVM. For example, userspace
- * is prevented from filtering the architecture range of SMCCC calls.
- * - Test that the filter actions (DENIED, FWD_TO_USER) work as intended.
- */
-
-#include <linux/arm-smccc.h>
-#include <linux/psci.h>
-#include <stdint.h>
-
-#include "processor.h"
-#include "test_util.h"
-
-enum smccc_conduit {
- HVC_INSN,
- SMC_INSN,
-};
-
-#define for_each_conduit(conduit) \
- for (conduit = HVC_INSN; conduit <= SMC_INSN; conduit++)
-
-static void guest_main(uint32_t func_id, enum smccc_conduit conduit)
-{
- struct arm_smccc_res res;
-
- if (conduit == SMC_INSN)
- smccc_smc(func_id, 0, 0, 0, 0, 0, 0, 0, &res);
- else
- smccc_hvc(func_id, 0, 0, 0, 0, 0, 0, 0, &res);
-
- GUEST_SYNC(res.a0);
-}
-
-static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions,
- enum kvm_smccc_filter_action action)
-{
- struct kvm_smccc_filter filter = {
- .base = start,
- .nr_functions = nr_functions,
- .action = action,
- };
-
- return __kvm_device_attr_set(vm->fd, KVM_ARM_VM_SMCCC_CTRL,
- KVM_ARM_VM_SMCCC_FILTER, &filter);
-}
-
-static void set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions,
- enum kvm_smccc_filter_action action)
-{
- int ret = __set_smccc_filter(vm, start, nr_functions, action);
-
- TEST_ASSERT(!ret, "failed to configure SMCCC filter: %d", ret);
-}
-
-static struct kvm_vm *setup_vm(struct kvm_vcpu **vcpu)
-{
- struct kvm_vcpu_init init;
- struct kvm_vm *vm;
-
- vm = vm_create(1);
- vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
-
- /*
- * Enable in-kernel emulation of PSCI to ensure that calls are denied
- * due to the SMCCC filter, not because of KVM.
- */
- init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
-
- *vcpu = aarch64_vcpu_add(vm, 0, &init, guest_main);
- return vm;
-}
-
-static void test_pad_must_be_zero(void)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm = setup_vm(&vcpu);
- struct kvm_smccc_filter filter = {
- .base = PSCI_0_2_FN_PSCI_VERSION,
- .nr_functions = 1,
- .action = KVM_SMCCC_FILTER_DENY,
- .pad = { -1 },
- };
- int r;
-
- r = __kvm_device_attr_set(vm->fd, KVM_ARM_VM_SMCCC_CTRL,
- KVM_ARM_VM_SMCCC_FILTER, &filter);
- TEST_ASSERT(r < 0 && errno == EINVAL,
- "Setting filter with nonzero padding should return EINVAL");
-}
-
-/* Ensure that userspace cannot filter the Arm Architecture SMCCC range */
-static void test_filter_reserved_range(void)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm = setup_vm(&vcpu);
- uint32_t smc64_fn;
- int r;
-
- r = __set_smccc_filter(vm, ARM_SMCCC_ARCH_WORKAROUND_1,
- 1, KVM_SMCCC_FILTER_DENY);
- TEST_ASSERT(r < 0 && errno == EEXIST,
- "Attempt to filter reserved range should return EEXIST");
-
- smc64_fn = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64,
- 0, 0);
-
- r = __set_smccc_filter(vm, smc64_fn, 1, KVM_SMCCC_FILTER_DENY);
- TEST_ASSERT(r < 0 && errno == EEXIST,
- "Attempt to filter reserved range should return EEXIST");
-
- kvm_vm_free(vm);
-}
-
-static void test_invalid_nr_functions(void)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm = setup_vm(&vcpu);
- int r;
-
- r = __set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 0, KVM_SMCCC_FILTER_DENY);
- TEST_ASSERT(r < 0 && errno == EINVAL,
- "Attempt to filter 0 functions should return EINVAL");
-
- kvm_vm_free(vm);
-}
-
-static void test_overflow_nr_functions(void)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm = setup_vm(&vcpu);
- int r;
-
- r = __set_smccc_filter(vm, ~0, ~0, KVM_SMCCC_FILTER_DENY);
- TEST_ASSERT(r < 0 && errno == EINVAL,
- "Attempt to overflow filter range should return EINVAL");
-
- kvm_vm_free(vm);
-}
-
-static void test_reserved_action(void)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm = setup_vm(&vcpu);
- int r;
-
- r = __set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 1, -1);
- TEST_ASSERT(r < 0 && errno == EINVAL,
- "Attempt to use reserved filter action should return EINVAL");
-
- kvm_vm_free(vm);
-}
-
-
-/* Test that overlapping configurations of the SMCCC filter are rejected */
-static void test_filter_overlap(void)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm = setup_vm(&vcpu);
- int r;
-
- set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 1, KVM_SMCCC_FILTER_DENY);
-
- r = __set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 1, KVM_SMCCC_FILTER_DENY);
- TEST_ASSERT(r < 0 && errno == EEXIST,
- "Attempt to filter already configured range should return EEXIST");
-
- kvm_vm_free(vm);
-}
-
-static void expect_call_denied(struct kvm_vcpu *vcpu)
-{
- struct ucall uc;
-
- if (get_ucall(vcpu, &uc) != UCALL_SYNC)
- TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
-
- TEST_ASSERT(uc.args[1] == SMCCC_RET_NOT_SUPPORTED,
- "Unexpected SMCCC return code: %lu", uc.args[1]);
-}
-
-/* Denied SMCCC calls have a return code of SMCCC_RET_NOT_SUPPORTED */
-static void test_filter_denied(void)
-{
- enum smccc_conduit conduit;
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm;
-
- for_each_conduit(conduit) {
- vm = setup_vm(&vcpu);
-
- set_smccc_filter(vm, PSCI_0_2_FN_PSCI_VERSION, 1, KVM_SMCCC_FILTER_DENY);
- vcpu_args_set(vcpu, 2, PSCI_0_2_FN_PSCI_VERSION, conduit);
-
- vcpu_run(vcpu);
- expect_call_denied(vcpu);
-
- kvm_vm_free(vm);
- }
-}
-
-static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, uint32_t func_id,
- enum smccc_conduit conduit)
-{
- struct kvm_run *run = vcpu->run;
-
- TEST_ASSERT(run->exit_reason == KVM_EXIT_HYPERCALL,
- "Unexpected exit reason: %u", run->exit_reason);
- TEST_ASSERT(run->hypercall.nr == func_id,
- "Unexpected SMCCC function: %llu", run->hypercall.nr);
-
- if (conduit == SMC_INSN)
- TEST_ASSERT(run->hypercall.flags & KVM_HYPERCALL_EXIT_SMC,
- "KVM_HYPERCALL_EXIT_SMC is not set");
- else
- TEST_ASSERT(!(run->hypercall.flags & KVM_HYPERCALL_EXIT_SMC),
- "KVM_HYPERCALL_EXIT_SMC is set");
-}
-
-/* SMCCC calls forwarded to userspace cause KVM_EXIT_HYPERCALL exits */
-static void test_filter_fwd_to_user(void)
-{
- enum smccc_conduit conduit;
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm;
-
- for_each_conduit(conduit) {
- vm = setup_vm(&vcpu);
-
- set_smccc_filter(vm, PSCI_0_2_FN_PSCI_VERSION, 1, KVM_SMCCC_FILTER_FWD_TO_USER);
- vcpu_args_set(vcpu, 2, PSCI_0_2_FN_PSCI_VERSION, conduit);
-
- vcpu_run(vcpu);
- expect_call_fwd_to_user(vcpu, PSCI_0_2_FN_PSCI_VERSION, conduit);
-
- kvm_vm_free(vm);
- }
-}
-
-static bool kvm_supports_smccc_filter(void)
-{
- struct kvm_vm *vm = vm_create_barebones();
- int r;
-
- r = __kvm_has_device_attr(vm->fd, KVM_ARM_VM_SMCCC_CTRL, KVM_ARM_VM_SMCCC_FILTER);
-
- kvm_vm_free(vm);
- return !r;
-}
-
-int main(void)
-{
- TEST_REQUIRE(kvm_supports_smccc_filter());
-
- test_pad_must_be_zero();
- test_invalid_nr_functions();
- test_overflow_nr_functions();
- test_reserved_action();
- test_filter_reserved_range();
- test_filter_overlap();
- test_filter_denied();
- test_filter_fwd_to_user();
-}
diff --git a/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c b/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
deleted file mode 100644
index 80b74c6f152b..000000000000
--- a/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * vcpu_width_config - Test KVM_ARM_VCPU_INIT() with KVM_ARM_VCPU_EL1_32BIT.
- *
- * Copyright (c) 2022 Google LLC.
- *
- * This is a test that ensures that non-mixed-width vCPUs (all 64bit vCPUs
- * or all 32bit vcPUs) can be configured and mixed-width vCPUs cannot be
- * configured.
- */
-
-#include "kvm_util.h"
-#include "processor.h"
-#include "test_util.h"
-
-
-/*
- * Add a vCPU, run KVM_ARM_VCPU_INIT with @init0, and then
- * add another vCPU, and run KVM_ARM_VCPU_INIT with @init1.
- */
-static int add_init_2vcpus(struct kvm_vcpu_init *init0,
- struct kvm_vcpu_init *init1)
-{
- struct kvm_vcpu *vcpu0, *vcpu1;
- struct kvm_vm *vm;
- int ret;
-
- vm = vm_create_barebones();
-
- vcpu0 = __vm_vcpu_add(vm, 0);
- ret = __vcpu_ioctl(vcpu0, KVM_ARM_VCPU_INIT, init0);
- if (ret)
- goto free_exit;
-
- vcpu1 = __vm_vcpu_add(vm, 1);
- ret = __vcpu_ioctl(vcpu1, KVM_ARM_VCPU_INIT, init1);
-
-free_exit:
- kvm_vm_free(vm);
- return ret;
-}
-
-/*
- * Add two vCPUs, then run KVM_ARM_VCPU_INIT for one vCPU with @init0,
- * and run KVM_ARM_VCPU_INIT for another vCPU with @init1.
- */
-static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init0,
- struct kvm_vcpu_init *init1)
-{
- struct kvm_vcpu *vcpu0, *vcpu1;
- struct kvm_vm *vm;
- int ret;
-
- vm = vm_create_barebones();
-
- vcpu0 = __vm_vcpu_add(vm, 0);
- vcpu1 = __vm_vcpu_add(vm, 1);
-
- ret = __vcpu_ioctl(vcpu0, KVM_ARM_VCPU_INIT, init0);
- if (ret)
- goto free_exit;
-
- ret = __vcpu_ioctl(vcpu1, KVM_ARM_VCPU_INIT, init1);
-
-free_exit:
- kvm_vm_free(vm);
- return ret;
-}
-
-/*
- * Tests that two 64bit vCPUs can be configured, two 32bit vCPUs can be
- * configured, and two mixed-width vCPUs cannot be configured.
- * Each of those three cases, configure vCPUs in two different orders.
- * The one is running KVM_CREATE_VCPU for 2 vCPUs, and then running
- * KVM_ARM_VCPU_INIT for them.
- * The other is running KVM_CREATE_VCPU and KVM_ARM_VCPU_INIT for a vCPU,
- * and then run those commands for another vCPU.
- */
-int main(void)
-{
- struct kvm_vcpu_init init0, init1;
- struct kvm_vm *vm;
- int ret;
-
- TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_EL1_32BIT));
-
- /* Get the preferred target type and copy that to init1 for later use */
- vm = vm_create_barebones();
- vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init0);
- kvm_vm_free(vm);
- init1 = init0;
-
- /* Test with 64bit vCPUs */
- ret = add_init_2vcpus(&init0, &init0);
- TEST_ASSERT(ret == 0,
- "Configuring 64bit EL1 vCPUs failed unexpectedly");
- ret = add_2vcpus_init_2vcpus(&init0, &init0);
- TEST_ASSERT(ret == 0,
- "Configuring 64bit EL1 vCPUs failed unexpectedly");
-
- /* Test with 32bit vCPUs */
- init0.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
- ret = add_init_2vcpus(&init0, &init0);
- TEST_ASSERT(ret == 0,
- "Configuring 32bit EL1 vCPUs failed unexpectedly");
- ret = add_2vcpus_init_2vcpus(&init0, &init0);
- TEST_ASSERT(ret == 0,
- "Configuring 32bit EL1 vCPUs failed unexpectedly");
-
- /* Test with mixed-width vCPUs */
- init0.features[0] = 0;
- init1.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
- ret = add_init_2vcpus(&init0, &init1);
- TEST_ASSERT(ret != 0,
- "Configuring mixed-width vCPUs worked unexpectedly");
- ret = add_2vcpus_init_2vcpus(&init0, &init1);
- TEST_ASSERT(ret != 0,
- "Configuring mixed-width vCPUs worked unexpectedly");
-
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c
deleted file mode 100644
index eef816b80993..000000000000
--- a/tools/testing/selftests/kvm/aarch64/vgic_init.c
+++ /dev/null
@@ -1,716 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * vgic init sequence tests
- *
- * Copyright (C) 2020, Red Hat, Inc.
- */
-#define _GNU_SOURCE
-#include <linux/kernel.h>
-#include <sys/syscall.h>
-#include <asm/kvm.h>
-#include <asm/kvm_para.h>
-
-#include "test_util.h"
-#include "kvm_util.h"
-#include "processor.h"
-#include "vgic.h"
-
-#define NR_VCPUS 4
-
-#define REG_OFFSET(vcpu, offset) (((uint64_t)vcpu << 32) | offset)
-
-#define GICR_TYPER 0x8
-
-#define VGIC_DEV_IS_V2(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V2)
-#define VGIC_DEV_IS_V3(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V3)
-
-struct vm_gic {
- struct kvm_vm *vm;
- int gic_fd;
- uint32_t gic_dev_type;
-};
-
-static uint64_t max_phys_size;
-
-/*
- * Helpers to access a redistributor register and verify the ioctl() failed or
- * succeeded as expected, and provided the correct value on success.
- */
-static void v3_redist_reg_get_errno(int gicv3_fd, int vcpu, int offset,
- int want, const char *msg)
-{
- uint32_t ignored_val;
- int ret = __kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
- REG_OFFSET(vcpu, offset), &ignored_val);
-
- TEST_ASSERT(ret && errno == want, "%s; want errno = %d", msg, want);
-}
-
-static void v3_redist_reg_get(int gicv3_fd, int vcpu, int offset, uint32_t want,
- const char *msg)
-{
- uint32_t val;
-
- kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
- REG_OFFSET(vcpu, offset), &val);
- TEST_ASSERT(val == want, "%s; want '0x%x', got '0x%x'", msg, want, val);
-}
-
-/* dummy guest code */
-static void guest_code(void)
-{
- GUEST_SYNC(0);
- GUEST_SYNC(1);
- GUEST_SYNC(2);
- GUEST_DONE();
-}
-
-/* we don't want to assert on run execution, hence that helper */
-static int run_vcpu(struct kvm_vcpu *vcpu)
-{
- return __vcpu_run(vcpu) ? -errno : 0;
-}
-
-static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
- uint32_t nr_vcpus,
- struct kvm_vcpu *vcpus[])
-{
- struct vm_gic v;
-
- v.gic_dev_type = gic_dev_type;
- v.vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
- v.gic_fd = kvm_create_device(v.vm, gic_dev_type);
-
- return v;
-}
-
-static void vm_gic_destroy(struct vm_gic *v)
-{
- close(v->gic_fd);
- kvm_vm_free(v->vm);
-}
-
-struct vgic_region_attr {
- uint64_t attr;
- uint64_t size;
- uint64_t alignment;
-};
-
-struct vgic_region_attr gic_v3_dist_region = {
- .attr = KVM_VGIC_V3_ADDR_TYPE_DIST,
- .size = 0x10000,
- .alignment = 0x10000,
-};
-
-struct vgic_region_attr gic_v3_redist_region = {
- .attr = KVM_VGIC_V3_ADDR_TYPE_REDIST,
- .size = NR_VCPUS * 0x20000,
- .alignment = 0x10000,
-};
-
-struct vgic_region_attr gic_v2_dist_region = {
- .attr = KVM_VGIC_V2_ADDR_TYPE_DIST,
- .size = 0x1000,
- .alignment = 0x1000,
-};
-
-struct vgic_region_attr gic_v2_cpu_region = {
- .attr = KVM_VGIC_V2_ADDR_TYPE_CPU,
- .size = 0x2000,
- .alignment = 0x1000,
-};
-
-/**
- * Helper routine that performs KVM device tests in general. Eventually the
- * ARM_VGIC (GICv2 or GICv3) device gets created with an overlapping
- * DIST/REDIST (or DIST/CPUIF for GICv2). Assumption is 4 vcpus are going to be
- * used hence the overlap. In the case of GICv3, A RDIST region is set at @0x0
- * and a DIST region is set @0x70000. The GICv2 case sets a CPUIF @0x0 and a
- * DIST region @0x1000.
- */
-static void subtest_dist_rdist(struct vm_gic *v)
-{
- int ret;
- uint64_t addr;
- struct vgic_region_attr rdist; /* CPU interface in GICv2*/
- struct vgic_region_attr dist;
-
- rdist = VGIC_DEV_IS_V3(v->gic_dev_type) ? gic_v3_redist_region
- : gic_v2_cpu_region;
- dist = VGIC_DEV_IS_V3(v->gic_dev_type) ? gic_v3_dist_region
- : gic_v2_dist_region;
-
- /* Check existing group/attributes */
- kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, dist.attr);
-
- kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, rdist.attr);
-
- /* check non existing attribute */
- ret = __kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, -1);
- TEST_ASSERT(ret && errno == ENXIO, "attribute not supported");
-
- /* misaligned DIST and REDIST address settings */
- addr = dist.alignment / 0x10;
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- dist.attr, &addr);
- TEST_ASSERT(ret && errno == EINVAL, "GIC dist base not aligned");
-
- addr = rdist.alignment / 0x10;
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- rdist.attr, &addr);
- TEST_ASSERT(ret && errno == EINVAL, "GIC redist/cpu base not aligned");
-
- /* out of range address */
- addr = max_phys_size;
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- dist.attr, &addr);
- TEST_ASSERT(ret && errno == E2BIG, "dist address beyond IPA limit");
-
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- rdist.attr, &addr);
- TEST_ASSERT(ret && errno == E2BIG, "redist address beyond IPA limit");
-
- /* Space for half a rdist (a rdist is: 2 * rdist.alignment). */
- addr = max_phys_size - dist.alignment;
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- rdist.attr, &addr);
- TEST_ASSERT(ret && errno == E2BIG,
- "half of the redist is beyond IPA limit");
-
- /* set REDIST base address @0x0*/
- addr = 0x00000;
- kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- rdist.attr, &addr);
-
- /* Attempt to create a second legacy redistributor region */
- addr = 0xE0000;
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- rdist.attr, &addr);
- TEST_ASSERT(ret && errno == EEXIST, "GIC redist base set again");
-
- ret = __kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST);
- if (!ret) {
- /* Attempt to mix legacy and new redistributor regions */
- addr = REDIST_REGION_ATTR_ADDR(NR_VCPUS, 0x100000, 0, 0);
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- TEST_ASSERT(ret && errno == EINVAL,
- "attempt to mix GICv3 REDIST and REDIST_REGION");
- }
-
- /*
- * Set overlapping DIST / REDIST, cannot be detected here. Will be detected
- * on first vcpu run instead.
- */
- addr = rdist.size - rdist.alignment;
- kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- dist.attr, &addr);
-}
-
-/* Test the new REDIST region API */
-static void subtest_v3_redist_regions(struct vm_gic *v)
-{
- uint64_t addr, expected_addr;
- int ret;
-
- ret = __kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST);
- TEST_ASSERT(!ret, "Multiple redist regions advertised");
-
- addr = REDIST_REGION_ATTR_ADDR(NR_VCPUS, 0x100000, 2, 0);
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- TEST_ASSERT(ret && errno == EINVAL, "redist region attr value with flags != 0");
-
- addr = REDIST_REGION_ATTR_ADDR(0, 0x100000, 0, 0);
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- TEST_ASSERT(ret && errno == EINVAL, "redist region attr value with count== 0");
-
- addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 1);
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- TEST_ASSERT(ret && errno == EINVAL,
- "attempt to register the first rdist region with index != 0");
-
- addr = REDIST_REGION_ATTR_ADDR(2, 0x201000, 0, 1);
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- TEST_ASSERT(ret && errno == EINVAL, "rdist region with misaligned address");
-
- addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 0);
- kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
-
- addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 1);
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- TEST_ASSERT(ret && errno == EINVAL, "register an rdist region with already used index");
-
- addr = REDIST_REGION_ATTR_ADDR(1, 0x210000, 0, 2);
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- TEST_ASSERT(ret && errno == EINVAL,
- "register an rdist region overlapping with another one");
-
- addr = REDIST_REGION_ATTR_ADDR(1, 0x240000, 0, 2);
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- TEST_ASSERT(ret && errno == EINVAL, "register redist region with index not +1");
-
- addr = REDIST_REGION_ATTR_ADDR(1, 0x240000, 0, 1);
- kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
-
- addr = REDIST_REGION_ATTR_ADDR(1, max_phys_size, 0, 2);
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- TEST_ASSERT(ret && errno == E2BIG,
- "register redist region with base address beyond IPA range");
-
- /* The last redist is above the pa range. */
- addr = REDIST_REGION_ATTR_ADDR(2, max_phys_size - 0x30000, 0, 2);
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- TEST_ASSERT(ret && errno == E2BIG,
- "register redist region with top address beyond IPA range");
-
- addr = 0x260000;
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr);
- TEST_ASSERT(ret && errno == EINVAL,
- "Mix KVM_VGIC_V3_ADDR_TYPE_REDIST and REDIST_REGION");
-
- /*
- * Now there are 2 redist regions:
- * region 0 @ 0x200000 2 redists
- * region 1 @ 0x240000 1 redist
- * Attempt to read their characteristics
- */
-
- addr = REDIST_REGION_ATTR_ADDR(0, 0, 0, 0);
- expected_addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 0);
- ret = __kvm_device_attr_get(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- TEST_ASSERT(!ret && addr == expected_addr, "read characteristics of region #0");
-
- addr = REDIST_REGION_ATTR_ADDR(0, 0, 0, 1);
- expected_addr = REDIST_REGION_ATTR_ADDR(1, 0x240000, 0, 1);
- ret = __kvm_device_attr_get(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- TEST_ASSERT(!ret && addr == expected_addr, "read characteristics of region #1");
-
- addr = REDIST_REGION_ATTR_ADDR(0, 0, 0, 2);
- ret = __kvm_device_attr_get(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- TEST_ASSERT(ret && errno == ENOENT, "read characteristics of non existing region");
-
- addr = 0x260000;
- kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_DIST, &addr);
-
- addr = REDIST_REGION_ATTR_ADDR(1, 0x260000, 0, 2);
- ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- TEST_ASSERT(ret && errno == EINVAL, "register redist region colliding with dist");
-}
-
-/*
- * VGIC KVM device is created and initialized before the secondary CPUs
- * get created
- */
-static void test_vgic_then_vcpus(uint32_t gic_dev_type)
-{
- struct kvm_vcpu *vcpus[NR_VCPUS];
- struct vm_gic v;
- int ret, i;
-
- v = vm_gic_create_with_vcpus(gic_dev_type, 1, vcpus);
-
- subtest_dist_rdist(&v);
-
- /* Add the rest of the VCPUs */
- for (i = 1; i < NR_VCPUS; ++i)
- vcpus[i] = vm_vcpu_add(v.vm, i, guest_code);
-
- ret = run_vcpu(vcpus[3]);
- TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run");
-
- vm_gic_destroy(&v);
-}
-
-/* All the VCPUs are created before the VGIC KVM device gets initialized */
-static void test_vcpus_then_vgic(uint32_t gic_dev_type)
-{
- struct kvm_vcpu *vcpus[NR_VCPUS];
- struct vm_gic v;
- int ret;
-
- v = vm_gic_create_with_vcpus(gic_dev_type, NR_VCPUS, vcpus);
-
- subtest_dist_rdist(&v);
-
- ret = run_vcpu(vcpus[3]);
- TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run");
-
- vm_gic_destroy(&v);
-}
-
-static void test_v3_new_redist_regions(void)
-{
- struct kvm_vcpu *vcpus[NR_VCPUS];
- void *dummy = NULL;
- struct vm_gic v;
- uint64_t addr;
- int ret;
-
- v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
- subtest_v3_redist_regions(&v);
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
- KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
-
- ret = run_vcpu(vcpus[3]);
- TEST_ASSERT(ret == -ENXIO, "running without sufficient number of rdists");
- vm_gic_destroy(&v);
-
- /* step2 */
-
- v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
- subtest_v3_redist_regions(&v);
-
- addr = REDIST_REGION_ATTR_ADDR(1, 0x280000, 0, 2);
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
-
- ret = run_vcpu(vcpus[3]);
- TEST_ASSERT(ret == -EBUSY, "running without vgic explicit init");
-
- vm_gic_destroy(&v);
-
- /* step 3 */
-
- v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
- subtest_v3_redist_regions(&v);
-
- ret = __kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, dummy);
- TEST_ASSERT(ret && errno == EFAULT,
- "register a third region allowing to cover the 4 vcpus");
-
- addr = REDIST_REGION_ATTR_ADDR(1, 0x280000, 0, 2);
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
-
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
- KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
-
- ret = run_vcpu(vcpus[3]);
- TEST_ASSERT(!ret, "vcpu run");
-
- vm_gic_destroy(&v);
-}
-
-static void test_v3_typer_accesses(void)
-{
- struct vm_gic v;
- uint64_t addr;
- int ret, i;
-
- v.vm = vm_create(NR_VCPUS);
- (void)vm_vcpu_add(v.vm, 0, guest_code);
-
- v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3);
-
- (void)vm_vcpu_add(v.vm, 3, guest_code);
-
- v3_redist_reg_get_errno(v.gic_fd, 1, GICR_TYPER, EINVAL,
- "attempting to read GICR_TYPER of non created vcpu");
-
- (void)vm_vcpu_add(v.vm, 1, guest_code);
-
- v3_redist_reg_get_errno(v.gic_fd, 1, GICR_TYPER, EBUSY,
- "read GICR_TYPER before GIC initialized");
-
- (void)vm_vcpu_add(v.vm, 2, guest_code);
-
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
- KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
-
- for (i = 0; i < NR_VCPUS ; i++) {
- v3_redist_reg_get(v.gic_fd, i, GICR_TYPER, i * 0x100,
- "read GICR_TYPER before rdist region setting");
- }
-
- addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 0);
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
-
- /* The 2 first rdists should be put there (vcpu 0 and 3) */
- v3_redist_reg_get(v.gic_fd, 0, GICR_TYPER, 0x0, "read typer of rdist #0");
- v3_redist_reg_get(v.gic_fd, 3, GICR_TYPER, 0x310, "read typer of rdist #1");
-
- addr = REDIST_REGION_ATTR_ADDR(10, 0x100000, 0, 1);
- ret = __kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- TEST_ASSERT(ret && errno == EINVAL, "collision with previous rdist region");
-
- v3_redist_reg_get(v.gic_fd, 1, GICR_TYPER, 0x100,
- "no redist region attached to vcpu #1 yet, last cannot be returned");
- v3_redist_reg_get(v.gic_fd, 2, GICR_TYPER, 0x200,
- "no redist region attached to vcpu #2, last cannot be returned");
-
- addr = REDIST_REGION_ATTR_ADDR(10, 0x20000, 0, 1);
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
-
- v3_redist_reg_get(v.gic_fd, 1, GICR_TYPER, 0x100, "read typer of rdist #1");
- v3_redist_reg_get(v.gic_fd, 2, GICR_TYPER, 0x210,
- "read typer of rdist #1, last properly returned");
-
- vm_gic_destroy(&v);
-}
-
-static struct vm_gic vm_gic_v3_create_with_vcpuids(int nr_vcpus,
- uint32_t vcpuids[])
-{
- struct vm_gic v;
- int i;
-
- v.vm = vm_create(nr_vcpus);
- for (i = 0; i < nr_vcpus; i++)
- vm_vcpu_add(v.vm, vcpuids[i], guest_code);
-
- v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3);
-
- return v;
-}
-
-/**
- * Test GICR_TYPER last bit with new redist regions
- * rdist regions #1 and #2 are contiguous
- * rdist region #0 @0x100000 2 rdist capacity
- * rdists: 0, 3 (Last)
- * rdist region #1 @0x240000 2 rdist capacity
- * rdists: 5, 4 (Last)
- * rdist region #2 @0x200000 2 rdist capacity
- * rdists: 1, 2
- */
-static void test_v3_last_bit_redist_regions(void)
-{
- uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 };
- struct vm_gic v;
- uint64_t addr;
-
- v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids);
-
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
- KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
-
- addr = REDIST_REGION_ATTR_ADDR(2, 0x100000, 0, 0);
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
-
- addr = REDIST_REGION_ATTR_ADDR(2, 0x240000, 0, 1);
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
-
- addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 2);
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
-
- v3_redist_reg_get(v.gic_fd, 0, GICR_TYPER, 0x000, "read typer of rdist #0");
- v3_redist_reg_get(v.gic_fd, 1, GICR_TYPER, 0x100, "read typer of rdist #1");
- v3_redist_reg_get(v.gic_fd, 2, GICR_TYPER, 0x200, "read typer of rdist #2");
- v3_redist_reg_get(v.gic_fd, 3, GICR_TYPER, 0x310, "read typer of rdist #3");
- v3_redist_reg_get(v.gic_fd, 5, GICR_TYPER, 0x500, "read typer of rdist #5");
- v3_redist_reg_get(v.gic_fd, 4, GICR_TYPER, 0x410, "read typer of rdist #4");
-
- vm_gic_destroy(&v);
-}
-
-/* Test last bit with legacy region */
-static void test_v3_last_bit_single_rdist(void)
-{
- uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 };
- struct vm_gic v;
- uint64_t addr;
-
- v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids);
-
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
- KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
-
- addr = 0x10000;
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr);
-
- v3_redist_reg_get(v.gic_fd, 0, GICR_TYPER, 0x000, "read typer of rdist #0");
- v3_redist_reg_get(v.gic_fd, 3, GICR_TYPER, 0x300, "read typer of rdist #1");
- v3_redist_reg_get(v.gic_fd, 5, GICR_TYPER, 0x500, "read typer of rdist #2");
- v3_redist_reg_get(v.gic_fd, 1, GICR_TYPER, 0x100, "read typer of rdist #3");
- v3_redist_reg_get(v.gic_fd, 2, GICR_TYPER, 0x210, "read typer of rdist #3");
-
- vm_gic_destroy(&v);
-}
-
-/* Uses the legacy REDIST region API. */
-static void test_v3_redist_ipa_range_check_at_vcpu_run(void)
-{
- struct kvm_vcpu *vcpus[NR_VCPUS];
- struct vm_gic v;
- int ret, i;
- uint64_t addr;
-
- v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, 1, vcpus);
-
- /* Set space for 3 redists, we have 1 vcpu, so this succeeds. */
- addr = max_phys_size - (3 * 2 * 0x10000);
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr);
-
- addr = 0x00000;
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_DIST, &addr);
-
- /* Add the rest of the VCPUs */
- for (i = 1; i < NR_VCPUS; ++i)
- vcpus[i] = vm_vcpu_add(v.vm, i, guest_code);
-
- kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
- KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
-
- /* Attempt to run a vcpu without enough redist space. */
- ret = run_vcpu(vcpus[2]);
- TEST_ASSERT(ret && errno == EINVAL,
- "redist base+size above PA range detected on 1st vcpu run");
-
- vm_gic_destroy(&v);
-}
-
-static void test_v3_its_region(void)
-{
- struct kvm_vcpu *vcpus[NR_VCPUS];
- struct vm_gic v;
- uint64_t addr;
- int its_fd, ret;
-
- v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
- its_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_ITS);
-
- addr = 0x401000;
- ret = __kvm_device_attr_set(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_ITS_ADDR_TYPE, &addr);
- TEST_ASSERT(ret && errno == EINVAL,
- "ITS region with misaligned address");
-
- addr = max_phys_size;
- ret = __kvm_device_attr_set(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_ITS_ADDR_TYPE, &addr);
- TEST_ASSERT(ret && errno == E2BIG,
- "register ITS region with base address beyond IPA range");
-
- addr = max_phys_size - 0x10000;
- ret = __kvm_device_attr_set(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_ITS_ADDR_TYPE, &addr);
- TEST_ASSERT(ret && errno == E2BIG,
- "Half of ITS region is beyond IPA range");
-
- /* This one succeeds setting the ITS base */
- addr = 0x400000;
- kvm_device_attr_set(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_ITS_ADDR_TYPE, &addr);
-
- addr = 0x300000;
- ret = __kvm_device_attr_set(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_ITS_ADDR_TYPE, &addr);
- TEST_ASSERT(ret && errno == EEXIST, "ITS base set again");
-
- close(its_fd);
- vm_gic_destroy(&v);
-}
-
-/*
- * Returns 0 if it's possible to create GIC device of a given type (V2 or V3).
- */
-int test_kvm_device(uint32_t gic_dev_type)
-{
- struct kvm_vcpu *vcpus[NR_VCPUS];
- struct vm_gic v;
- uint32_t other;
- int ret;
-
- v.vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus);
-
- /* try to create a non existing KVM device */
- ret = __kvm_test_create_device(v.vm, 0);
- TEST_ASSERT(ret && errno == ENODEV, "unsupported device");
-
- /* trial mode */
- ret = __kvm_test_create_device(v.vm, gic_dev_type);
- if (ret)
- return ret;
- v.gic_fd = kvm_create_device(v.vm, gic_dev_type);
-
- ret = __kvm_create_device(v.vm, gic_dev_type);
- TEST_ASSERT(ret < 0 && errno == EEXIST, "create GIC device twice");
-
- /* try to create the other gic_dev_type */
- other = VGIC_DEV_IS_V2(gic_dev_type) ? KVM_DEV_TYPE_ARM_VGIC_V3
- : KVM_DEV_TYPE_ARM_VGIC_V2;
-
- if (!__kvm_test_create_device(v.vm, other)) {
- ret = __kvm_create_device(v.vm, other);
- TEST_ASSERT(ret < 0 && (errno == EINVAL || errno == EEXIST),
- "create GIC device while other version exists");
- }
-
- vm_gic_destroy(&v);
-
- return 0;
-}
-
-void run_tests(uint32_t gic_dev_type)
-{
- test_vcpus_then_vgic(gic_dev_type);
- test_vgic_then_vcpus(gic_dev_type);
-
- if (VGIC_DEV_IS_V3(gic_dev_type)) {
- test_v3_new_redist_regions();
- test_v3_typer_accesses();
- test_v3_last_bit_redist_regions();
- test_v3_last_bit_single_rdist();
- test_v3_redist_ipa_range_check_at_vcpu_run();
- test_v3_its_region();
- }
-}
-
-int main(int ac, char **av)
-{
- int ret;
- int pa_bits;
- int cnt_impl = 0;
-
- pa_bits = vm_guest_mode_params[VM_MODE_DEFAULT].pa_bits;
- max_phys_size = 1ULL << pa_bits;
-
- ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V3);
- if (!ret) {
- pr_info("Running GIC_v3 tests.\n");
- run_tests(KVM_DEV_TYPE_ARM_VGIC_V3);
- cnt_impl++;
- }
-
- ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V2);
- if (!ret) {
- pr_info("Running GIC_v2 tests.\n");
- run_tests(KVM_DEV_TYPE_ARM_VGIC_V2);
- cnt_impl++;
- }
-
- if (!cnt_impl) {
- print_skip("No GICv2 nor GICv3 support");
- exit(KSFT_SKIP);
- }
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c
deleted file mode 100644
index 2e64b4856e38..000000000000
--- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c
+++ /dev/null
@@ -1,855 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * vgic_irq.c - Test userspace injection of IRQs
- *
- * This test validates the injection of IRQs from userspace using various
- * methods (e.g., KVM_IRQ_LINE) and modes (e.g., EOI). The guest "asks" the
- * host to inject a specific intid via a GUEST_SYNC call, and then checks that
- * it received it.
- */
-#include <asm/kvm.h>
-#include <asm/kvm_para.h>
-#include <sys/eventfd.h>
-#include <linux/sizes.h>
-
-#include "processor.h"
-#include "test_util.h"
-#include "kvm_util.h"
-#include "gic.h"
-#include "gic_v3.h"
-#include "vgic.h"
-
-#define GICD_BASE_GPA 0x08000000ULL
-#define GICR_BASE_GPA 0x080A0000ULL
-
-/*
- * Stores the user specified args; it's passed to the guest and to every test
- * function.
- */
-struct test_args {
- uint32_t nr_irqs; /* number of KVM supported IRQs. */
- bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */
- bool level_sensitive; /* 1 is level, 0 is edge */
- int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */
- bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */
-};
-
-/*
- * KVM implements 32 priority levels:
- * 0x00 (highest priority) - 0xF8 (lowest priority), in steps of 8
- *
- * Note that these macros will still be correct in the case that KVM implements
- * more priority levels. Also note that 32 is the minimum for GICv3 and GICv2.
- */
-#define KVM_NUM_PRIOS 32
-#define KVM_PRIO_SHIFT 3 /* steps of 8 = 1 << 3 */
-#define KVM_PRIO_STEPS (1 << KVM_PRIO_SHIFT) /* 8 */
-#define LOWEST_PRIO (KVM_NUM_PRIOS - 1)
-#define CPU_PRIO_MASK (LOWEST_PRIO << KVM_PRIO_SHIFT) /* 0xf8 */
-#define IRQ_DEFAULT_PRIO (LOWEST_PRIO - 1)
-#define IRQ_DEFAULT_PRIO_REG (IRQ_DEFAULT_PRIO << KVM_PRIO_SHIFT) /* 0xf0 */
-
-static void *dist = (void *)GICD_BASE_GPA;
-static void *redist = (void *)GICR_BASE_GPA;
-
-/*
- * The kvm_inject_* utilities are used by the guest to ask the host to inject
- * interrupts (e.g., using the KVM_IRQ_LINE ioctl).
- */
-
-typedef enum {
- KVM_INJECT_EDGE_IRQ_LINE = 1,
- KVM_SET_IRQ_LINE,
- KVM_SET_IRQ_LINE_HIGH,
- KVM_SET_LEVEL_INFO_HIGH,
- KVM_INJECT_IRQFD,
- KVM_WRITE_ISPENDR,
- KVM_WRITE_ISACTIVER,
-} kvm_inject_cmd;
-
-struct kvm_inject_args {
- kvm_inject_cmd cmd;
- uint32_t first_intid;
- uint32_t num;
- int level;
- bool expect_failure;
-};
-
-/* Used on the guest side to perform the hypercall. */
-static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
- uint32_t num, int level, bool expect_failure);
-
-/* Used on the host side to get the hypercall info. */
-static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
- struct kvm_inject_args *args);
-
-#define _KVM_INJECT_MULTI(cmd, intid, num, expect_failure) \
- kvm_inject_call(cmd, intid, num, -1 /* not used */, expect_failure)
-
-#define KVM_INJECT_MULTI(cmd, intid, num) \
- _KVM_INJECT_MULTI(cmd, intid, num, false)
-
-#define _KVM_INJECT(cmd, intid, expect_failure) \
- _KVM_INJECT_MULTI(cmd, intid, 1, expect_failure)
-
-#define KVM_INJECT(cmd, intid) \
- _KVM_INJECT_MULTI(cmd, intid, 1, false)
-
-#define KVM_ACTIVATE(cmd, intid) \
- kvm_inject_call(cmd, intid, 1, 1, false);
-
-struct kvm_inject_desc {
- kvm_inject_cmd cmd;
- /* can inject PPIs, PPIs, and/or SPIs. */
- bool sgi, ppi, spi;
-};
-
-static struct kvm_inject_desc inject_edge_fns[] = {
- /* sgi ppi spi */
- { KVM_INJECT_EDGE_IRQ_LINE, false, false, true },
- { KVM_INJECT_IRQFD, false, false, true },
- { KVM_WRITE_ISPENDR, true, false, true },
- { 0, },
-};
-
-static struct kvm_inject_desc inject_level_fns[] = {
- /* sgi ppi spi */
- { KVM_SET_IRQ_LINE_HIGH, false, true, true },
- { KVM_SET_LEVEL_INFO_HIGH, false, true, true },
- { KVM_INJECT_IRQFD, false, false, true },
- { KVM_WRITE_ISPENDR, false, true, true },
- { 0, },
-};
-
-static struct kvm_inject_desc set_active_fns[] = {
- /* sgi ppi spi */
- { KVM_WRITE_ISACTIVER, true, true, true },
- { 0, },
-};
-
-#define for_each_inject_fn(t, f) \
- for ((f) = (t); (f)->cmd; (f)++)
-
-#define for_each_supported_inject_fn(args, t, f) \
- for_each_inject_fn(t, f) \
- if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD)
-
-#define for_each_supported_activate_fn(args, t, f) \
- for_each_supported_inject_fn((args), (t), (f))
-
-/* Shared between the guest main thread and the IRQ handlers. */
-volatile uint64_t irq_handled;
-volatile uint32_t irqnr_received[MAX_SPI + 1];
-
-static void reset_stats(void)
-{
- int i;
-
- irq_handled = 0;
- for (i = 0; i <= MAX_SPI; i++)
- irqnr_received[i] = 0;
-}
-
-static uint64_t gic_read_ap1r0(void)
-{
- uint64_t reg = read_sysreg_s(SYS_ICV_AP1R0_EL1);
-
- dsb(sy);
- return reg;
-}
-
-static void gic_write_ap1r0(uint64_t val)
-{
- write_sysreg_s(val, SYS_ICV_AP1R0_EL1);
- isb();
-}
-
-static void guest_set_irq_line(uint32_t intid, uint32_t level);
-
-static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
-{
- uint32_t intid = gic_get_and_ack_irq();
-
- if (intid == IAR_SPURIOUS)
- return;
-
- GUEST_ASSERT(gic_irq_get_active(intid));
-
- if (!level_sensitive)
- GUEST_ASSERT(!gic_irq_get_pending(intid));
-
- if (level_sensitive)
- guest_set_irq_line(intid, 0);
-
- GUEST_ASSERT(intid < MAX_SPI);
- irqnr_received[intid] += 1;
- irq_handled += 1;
-
- gic_set_eoi(intid);
- GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
- if (eoi_split)
- gic_set_dir(intid);
-
- GUEST_ASSERT(!gic_irq_get_active(intid));
- GUEST_ASSERT(!gic_irq_get_pending(intid));
-}
-
-static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
- uint32_t num, int level, bool expect_failure)
-{
- struct kvm_inject_args args = {
- .cmd = cmd,
- .first_intid = first_intid,
- .num = num,
- .level = level,
- .expect_failure = expect_failure,
- };
- GUEST_SYNC(&args);
-}
-
-#define GUEST_ASSERT_IAR_EMPTY() \
-do { \
- uint32_t _intid; \
- _intid = gic_get_and_ack_irq(); \
- GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS); \
-} while (0)
-
-#define CAT_HELPER(a, b) a ## b
-#define CAT(a, b) CAT_HELPER(a, b)
-#define PREFIX guest_irq_handler_
-#define GUEST_IRQ_HANDLER_NAME(split, lev) CAT(PREFIX, CAT(split, lev))
-#define GENERATE_GUEST_IRQ_HANDLER(split, lev) \
-static void CAT(PREFIX, CAT(split, lev))(struct ex_regs *regs) \
-{ \
- guest_irq_generic_handler(split, lev); \
-}
-
-GENERATE_GUEST_IRQ_HANDLER(0, 0);
-GENERATE_GUEST_IRQ_HANDLER(0, 1);
-GENERATE_GUEST_IRQ_HANDLER(1, 0);
-GENERATE_GUEST_IRQ_HANDLER(1, 1);
-
-static void (*guest_irq_handlers[2][2])(struct ex_regs *) = {
- {GUEST_IRQ_HANDLER_NAME(0, 0), GUEST_IRQ_HANDLER_NAME(0, 1),},
- {GUEST_IRQ_HANDLER_NAME(1, 0), GUEST_IRQ_HANDLER_NAME(1, 1),},
-};
-
-static void reset_priorities(struct test_args *args)
-{
- int i;
-
- for (i = 0; i < args->nr_irqs; i++)
- gic_set_priority(i, IRQ_DEFAULT_PRIO_REG);
-}
-
-static void guest_set_irq_line(uint32_t intid, uint32_t level)
-{
- kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false);
-}
-
-static void test_inject_fail(struct test_args *args,
- uint32_t intid, kvm_inject_cmd cmd)
-{
- reset_stats();
-
- _KVM_INJECT(cmd, intid, true);
- /* no IRQ to handle on entry */
-
- GUEST_ASSERT_EQ(irq_handled, 0);
- GUEST_ASSERT_IAR_EMPTY();
-}
-
-static void guest_inject(struct test_args *args,
- uint32_t first_intid, uint32_t num,
- kvm_inject_cmd cmd)
-{
- uint32_t i;
-
- reset_stats();
-
- /* Cycle over all priorities to make things more interesting. */
- for (i = first_intid; i < num + first_intid; i++)
- gic_set_priority(i, (i % (KVM_NUM_PRIOS - 1)) << 3);
-
- asm volatile("msr daifset, #2" : : : "memory");
- KVM_INJECT_MULTI(cmd, first_intid, num);
-
- while (irq_handled < num) {
- asm volatile("wfi\n"
- "msr daifclr, #2\n"
- /* handle IRQ */
- "msr daifset, #2\n"
- : : : "memory");
- }
- asm volatile("msr daifclr, #2" : : : "memory");
-
- GUEST_ASSERT_EQ(irq_handled, num);
- for (i = first_intid; i < num + first_intid; i++)
- GUEST_ASSERT_EQ(irqnr_received[i], 1);
- GUEST_ASSERT_IAR_EMPTY();
-
- reset_priorities(args);
-}
-
-/*
- * Restore the active state of multiple concurrent IRQs (given by
- * concurrent_irqs). This does what a live-migration would do on the
- * destination side assuming there are some active IRQs that were not
- * deactivated yet.
- */
-static void guest_restore_active(struct test_args *args,
- uint32_t first_intid, uint32_t num,
- kvm_inject_cmd cmd)
-{
- uint32_t prio, intid, ap1r;
- int i;
-
- /*
- * Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
- * in descending order, so intid+1 can preempt intid.
- */
- for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) {
- GUEST_ASSERT(prio >= 0);
- intid = i + first_intid;
- gic_set_priority(intid, prio);
- }
-
- /*
- * In a real migration, KVM would restore all GIC state before running
- * guest code.
- */
- for (i = 0; i < num; i++) {
- intid = i + first_intid;
- KVM_ACTIVATE(cmd, intid);
- ap1r = gic_read_ap1r0();
- ap1r |= 1U << i;
- gic_write_ap1r0(ap1r);
- }
-
- /* This is where the "migration" would occur. */
-
- /* finish handling the IRQs starting with the highest priority one. */
- for (i = 0; i < num; i++) {
- intid = num - i - 1 + first_intid;
- gic_set_eoi(intid);
- if (args->eoi_split)
- gic_set_dir(intid);
- }
-
- for (i = 0; i < num; i++)
- GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
- GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
- GUEST_ASSERT_IAR_EMPTY();
-}
-
-/*
- * Polls the IAR until it's not a spurious interrupt.
- *
- * This function should only be used in test_inject_preemption (with IRQs
- * masked).
- */
-static uint32_t wait_for_and_activate_irq(void)
-{
- uint32_t intid;
-
- do {
- asm volatile("wfi" : : : "memory");
- intid = gic_get_and_ack_irq();
- } while (intid == IAR_SPURIOUS);
-
- return intid;
-}
-
-/*
- * Inject multiple concurrent IRQs (num IRQs starting at first_intid) and
- * handle them without handling the actual exceptions. This is done by masking
- * interrupts for the whole test.
- */
-static void test_inject_preemption(struct test_args *args,
- uint32_t first_intid, int num,
- kvm_inject_cmd cmd)
-{
- uint32_t intid, prio, step = KVM_PRIO_STEPS;
- int i;
-
- /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
- * in descending order, so intid+1 can preempt intid.
- */
- for (i = 0, prio = (num - 1) * step; i < num; i++, prio -= step) {
- GUEST_ASSERT(prio >= 0);
- intid = i + first_intid;
- gic_set_priority(intid, prio);
- }
-
- local_irq_disable();
-
- for (i = 0; i < num; i++) {
- uint32_t tmp;
- intid = i + first_intid;
- KVM_INJECT(cmd, intid);
- /* Each successive IRQ will preempt the previous one. */
- tmp = wait_for_and_activate_irq();
- GUEST_ASSERT_EQ(tmp, intid);
- if (args->level_sensitive)
- guest_set_irq_line(intid, 0);
- }
-
- /* finish handling the IRQs starting with the highest priority one. */
- for (i = 0; i < num; i++) {
- intid = num - i - 1 + first_intid;
- gic_set_eoi(intid);
- if (args->eoi_split)
- gic_set_dir(intid);
- }
-
- local_irq_enable();
-
- for (i = 0; i < num; i++)
- GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
- GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
- GUEST_ASSERT_IAR_EMPTY();
-
- reset_priorities(args);
-}
-
-static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
-{
- uint32_t nr_irqs = args->nr_irqs;
-
- if (f->sgi) {
- guest_inject(args, MIN_SGI, 1, f->cmd);
- guest_inject(args, 0, 16, f->cmd);
- }
-
- if (f->ppi)
- guest_inject(args, MIN_PPI, 1, f->cmd);
-
- if (f->spi) {
- guest_inject(args, MIN_SPI, 1, f->cmd);
- guest_inject(args, nr_irqs - 1, 1, f->cmd);
- guest_inject(args, MIN_SPI, nr_irqs - MIN_SPI, f->cmd);
- }
-}
-
-static void test_injection_failure(struct test_args *args,
- struct kvm_inject_desc *f)
-{
- uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(bad_intid); i++)
- test_inject_fail(args, bad_intid[i], f->cmd);
-}
-
-static void test_preemption(struct test_args *args, struct kvm_inject_desc *f)
-{
- /*
- * Test up to 4 levels of preemption. The reason is that KVM doesn't
- * currently implement the ability to have more than the number-of-LRs
- * number of concurrently active IRQs. The number of LRs implemented is
- * IMPLEMENTATION DEFINED, however, it seems that most implement 4.
- */
- if (f->sgi)
- test_inject_preemption(args, MIN_SGI, 4, f->cmd);
-
- if (f->ppi)
- test_inject_preemption(args, MIN_PPI, 4, f->cmd);
-
- if (f->spi)
- test_inject_preemption(args, MIN_SPI, 4, f->cmd);
-}
-
-static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f)
-{
- /* Test up to 4 active IRQs. Same reason as in test_preemption. */
- if (f->sgi)
- guest_restore_active(args, MIN_SGI, 4, f->cmd);
-
- if (f->ppi)
- guest_restore_active(args, MIN_PPI, 4, f->cmd);
-
- if (f->spi)
- guest_restore_active(args, MIN_SPI, 4, f->cmd);
-}
-
-static void guest_code(struct test_args *args)
-{
- uint32_t i, nr_irqs = args->nr_irqs;
- bool level_sensitive = args->level_sensitive;
- struct kvm_inject_desc *f, *inject_fns;
-
- gic_init(GIC_V3, 1, dist, redist);
-
- for (i = 0; i < nr_irqs; i++)
- gic_irq_enable(i);
-
- for (i = MIN_SPI; i < nr_irqs; i++)
- gic_irq_set_config(i, !level_sensitive);
-
- gic_set_eoi_split(args->eoi_split);
-
- reset_priorities(args);
- gic_set_priority_mask(CPU_PRIO_MASK);
-
- inject_fns = level_sensitive ? inject_level_fns
- : inject_edge_fns;
-
- local_irq_enable();
-
- /* Start the tests. */
- for_each_supported_inject_fn(args, inject_fns, f) {
- test_injection(args, f);
- test_preemption(args, f);
- test_injection_failure(args, f);
- }
-
- /*
- * Restore the active state of IRQs. This would happen when live
- * migrating IRQs in the middle of being handled.
- */
- for_each_supported_activate_fn(args, set_active_fns, f)
- test_restore_active(args, f);
-
- GUEST_DONE();
-}
-
-static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level,
- struct test_args *test_args, bool expect_failure)
-{
- int ret;
-
- if (!expect_failure) {
- kvm_arm_irq_line(vm, intid, level);
- } else {
- /* The interface doesn't allow larger intid's. */
- if (intid > KVM_ARM_IRQ_NUM_MASK)
- return;
-
- ret = _kvm_arm_irq_line(vm, intid, level);
- TEST_ASSERT(ret != 0 && errno == EINVAL,
- "Bad intid %i did not cause KVM_IRQ_LINE "
- "error: rc: %i errno: %i", intid, ret, errno);
- }
-}
-
-void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
- bool expect_failure)
-{
- if (!expect_failure) {
- kvm_irq_set_level_info(gic_fd, intid, level);
- } else {
- int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
- /*
- * The kernel silently fails for invalid SPIs and SGIs (which
- * are not level-sensitive). It only checks for intid to not
- * spill over 1U << 10 (the max reserved SPI). Also, callers
- * are supposed to mask the intid with 0x3ff (1023).
- */
- if (intid > VGIC_MAX_RESERVED)
- TEST_ASSERT(ret != 0 && errno == EINVAL,
- "Bad intid %i did not cause VGIC_GRP_LEVEL_INFO "
- "error: rc: %i errno: %i", intid, ret, errno);
- else
- TEST_ASSERT(!ret, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO "
- "for intid %i failed, rc: %i errno: %i",
- intid, ret, errno);
- }
-}
-
-static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
- uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
- bool expect_failure)
-{
- struct kvm_irq_routing *routing;
- int ret;
- uint64_t i;
-
- assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES);
-
- routing = kvm_gsi_routing_create();
- for (i = intid; i < (uint64_t)intid + num; i++)
- kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI);
-
- if (!expect_failure) {
- kvm_gsi_routing_write(vm, routing);
- } else {
- ret = _kvm_gsi_routing_write(vm, routing);
- /* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */
- if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS)
- TEST_ASSERT(ret != 0 && errno == EINVAL,
- "Bad intid %u did not cause KVM_SET_GSI_ROUTING "
- "error: rc: %i errno: %i", intid, ret, errno);
- else
- TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING "
- "for intid %i failed, rc: %i errno: %i",
- intid, ret, errno);
- }
-}
-
-static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
- struct kvm_vcpu *vcpu,
- bool expect_failure)
-{
- /*
- * Ignore this when expecting failure as invalid intids will lead to
- * either trying to inject SGIs when we configured the test to be
- * level_sensitive (or the reverse), or inject large intids which
- * will lead to writing above the ISPENDR register space (and we
- * don't want to do that either).
- */
- if (!expect_failure)
- kvm_irq_write_ispendr(gic_fd, intid, vcpu);
-}
-
-static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
- uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
- bool expect_failure)
-{
- int fd[MAX_SPI];
- uint64_t val;
- int ret, f;
- uint64_t i;
-
- /*
- * There is no way to try injecting an SGI or PPI as the interface
- * starts counting from the first SPI (above the private ones), so just
- * exit.
- */
- if (INTID_IS_SGI(intid) || INTID_IS_PPI(intid))
- return;
-
- kvm_set_gsi_routing_irqchip_check(vm, intid, num,
- kvm_max_routes, expect_failure);
-
- /*
- * If expect_failure, then just to inject anyway. These
- * will silently fail. And in any case, the guest will check
- * that no actual interrupt was injected for those cases.
- */
-
- for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
- fd[f] = eventfd(0, 0);
- TEST_ASSERT(fd[f] != -1, __KVM_SYSCALL_ERROR("eventfd()", fd[f]));
- }
-
- for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
- struct kvm_irqfd irqfd = {
- .fd = fd[f],
- .gsi = i - MIN_SPI,
- };
- assert(i <= (uint64_t)UINT_MAX);
- vm_ioctl(vm, KVM_IRQFD, &irqfd);
- }
-
- for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
- val = 1;
- ret = write(fd[f], &val, sizeof(uint64_t));
- TEST_ASSERT(ret == sizeof(uint64_t),
- __KVM_SYSCALL_ERROR("write()", ret));
- }
-
- for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
- close(fd[f]);
-}
-
-/* handles the valid case: intid=0xffffffff num=1 */
-#define for_each_intid(first, num, tmp, i) \
- for ((tmp) = (i) = (first); \
- (tmp) < (uint64_t)(first) + (uint64_t)(num); \
- (tmp)++, (i)++)
-
-static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd,
- struct kvm_inject_args *inject_args,
- struct test_args *test_args)
-{
- kvm_inject_cmd cmd = inject_args->cmd;
- uint32_t intid = inject_args->first_intid;
- uint32_t num = inject_args->num;
- int level = inject_args->level;
- bool expect_failure = inject_args->expect_failure;
- struct kvm_vm *vm = vcpu->vm;
- uint64_t tmp;
- uint32_t i;
-
- /* handles the valid case: intid=0xffffffff num=1 */
- assert(intid < UINT_MAX - num || num == 1);
-
- switch (cmd) {
- case KVM_INJECT_EDGE_IRQ_LINE:
- for_each_intid(intid, num, tmp, i)
- kvm_irq_line_check(vm, i, 1, test_args,
- expect_failure);
- for_each_intid(intid, num, tmp, i)
- kvm_irq_line_check(vm, i, 0, test_args,
- expect_failure);
- break;
- case KVM_SET_IRQ_LINE:
- for_each_intid(intid, num, tmp, i)
- kvm_irq_line_check(vm, i, level, test_args,
- expect_failure);
- break;
- case KVM_SET_IRQ_LINE_HIGH:
- for_each_intid(intid, num, tmp, i)
- kvm_irq_line_check(vm, i, 1, test_args,
- expect_failure);
- break;
- case KVM_SET_LEVEL_INFO_HIGH:
- for_each_intid(intid, num, tmp, i)
- kvm_irq_set_level_info_check(gic_fd, i, 1,
- expect_failure);
- break;
- case KVM_INJECT_IRQFD:
- kvm_routing_and_irqfd_check(vm, intid, num,
- test_args->kvm_max_routes,
- expect_failure);
- break;
- case KVM_WRITE_ISPENDR:
- for (i = intid; i < intid + num; i++)
- kvm_irq_write_ispendr_check(gic_fd, i, vcpu,
- expect_failure);
- break;
- case KVM_WRITE_ISACTIVER:
- for (i = intid; i < intid + num; i++)
- kvm_irq_write_isactiver(gic_fd, i, vcpu);
- break;
- default:
- break;
- }
-}
-
-static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
- struct kvm_inject_args *args)
-{
- struct kvm_inject_args *kvm_args_hva;
- vm_vaddr_t kvm_args_gva;
-
- kvm_args_gva = uc->args[1];
- kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva);
- memcpy(args, kvm_args_hva, sizeof(struct kvm_inject_args));
-}
-
-static void print_args(struct test_args *args)
-{
- printf("nr-irqs=%d level-sensitive=%d eoi-split=%d\n",
- args->nr_irqs, args->level_sensitive,
- args->eoi_split);
-}
-
-static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
-{
- struct ucall uc;
- int gic_fd;
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm;
- struct kvm_inject_args inject_args;
- vm_vaddr_t args_gva;
-
- struct test_args args = {
- .nr_irqs = nr_irqs,
- .level_sensitive = level_sensitive,
- .eoi_split = eoi_split,
- .kvm_max_routes = kvm_check_cap(KVM_CAP_IRQ_ROUTING),
- .kvm_supports_irqfd = kvm_check_cap(KVM_CAP_IRQFD),
- };
-
- print_args(&args);
-
- vm = vm_create_with_one_vcpu(&vcpu, guest_code);
-
- vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vcpu);
-
- /* Setup the guest args page (so it gets the args). */
- args_gva = vm_vaddr_alloc_page(vm);
- memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
- vcpu_args_set(vcpu, 1, args_gva);
-
- gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
- GICD_BASE_GPA, GICR_BASE_GPA);
- __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3, skipping");
-
- vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
- guest_irq_handlers[args.eoi_split][args.level_sensitive]);
-
- while (1) {
- vcpu_run(vcpu);
-
- switch (get_ucall(vcpu, &uc)) {
- case UCALL_SYNC:
- kvm_inject_get_call(vm, &uc, &inject_args);
- run_guest_cmd(vcpu, gic_fd, &inject_args, &args);
- break;
- case UCALL_ABORT:
- REPORT_GUEST_ASSERT(uc);
- break;
- case UCALL_DONE:
- goto done;
- default:
- TEST_FAIL("Unknown ucall %lu", uc.cmd);
- }
- }
-
-done:
- close(gic_fd);
- kvm_vm_free(vm);
-}
-
-static void help(const char *name)
-{
- printf(
- "\n"
- "usage: %s [-n num_irqs] [-e eoi_split] [-l level_sensitive]\n", name);
- printf(" -n: specify number of IRQs to setup the vgic with. "
- "It has to be a multiple of 32 and between 64 and 1024.\n");
- printf(" -e: if 1 then EOI is split into a write to DIR on top "
- "of writing EOI.\n");
- printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0).");
- puts("");
- exit(1);
-}
-
-int main(int argc, char **argv)
-{
- uint32_t nr_irqs = 64;
- bool default_args = true;
- bool level_sensitive = false;
- int opt;
- bool eoi_split = false;
-
- while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) {
- switch (opt) {
- case 'n':
- nr_irqs = atoi_non_negative("Number of IRQs", optarg);
- if (nr_irqs > 1024 || nr_irqs % 32)
- help(argv[0]);
- break;
- case 'e':
- eoi_split = (bool)atoi_paranoid(optarg);
- default_args = false;
- break;
- case 'l':
- level_sensitive = (bool)atoi_paranoid(optarg);
- default_args = false;
- break;
- case 'h':
- default:
- help(argv[0]);
- break;
- }
- }
-
- /*
- * If the user just specified nr_irqs and/or gic_version, then run all
- * combinations.
- */
- if (default_args) {
- test_vgic(nr_irqs, false /* level */, false /* eoi_split */);
- test_vgic(nr_irqs, false /* level */, true /* eoi_split */);
- test_vgic(nr_irqs, true /* level */, false /* eoi_split */);
- test_vgic(nr_irqs, true /* level */, true /* eoi_split */);
- } else {
- test_vgic(nr_irqs, level_sensitive, eoi_split);
- }
-
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
deleted file mode 100644
index 5f9713364693..000000000000
--- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
+++ /dev/null
@@ -1,669 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * vpmu_counter_access - Test vPMU event counter access
- *
- * Copyright (c) 2023 Google LLC.
- *
- * This test checks if the guest can see the same number of the PMU event
- * counters (PMCR_EL0.N) that userspace sets, if the guest can access
- * those counters, and if the guest is prevented from accessing any
- * other counters.
- * It also checks if the userspace accesses to the PMU regsisters honor the
- * PMCR.N value that's set for the guest.
- * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host.
- */
-#include <kvm_util.h>
-#include <processor.h>
-#include <test_util.h>
-#include <vgic.h>
-#include <perf/arm_pmuv3.h>
-#include <linux/bitfield.h>
-
-/* The max number of the PMU event counters (excluding the cycle counter) */
-#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
-
-/* The cycle counter bit position that's common among the PMU registers */
-#define ARMV8_PMU_CYCLE_IDX 31
-
-struct vpmu_vm {
- struct kvm_vm *vm;
- struct kvm_vcpu *vcpu;
- int gic_fd;
-};
-
-static struct vpmu_vm vpmu_vm;
-
-struct pmreg_sets {
- uint64_t set_reg_id;
- uint64_t clr_reg_id;
-};
-
-#define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
-
-static uint64_t get_pmcr_n(uint64_t pmcr)
-{
- return FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
-}
-
-static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
-{
- u64p_replace_bits((__u64 *) pmcr, pmcr_n, ARMV8_PMU_PMCR_N);
-}
-
-static uint64_t get_counters_mask(uint64_t n)
-{
- uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
-
- if (n)
- mask |= GENMASK(n - 1, 0);
- return mask;
-}
-
-/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
-static inline unsigned long read_sel_evcntr(int sel)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- return read_sysreg(pmxevcntr_el0);
-}
-
-/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
-static inline void write_sel_evcntr(int sel, unsigned long val)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- write_sysreg(val, pmxevcntr_el0);
- isb();
-}
-
-/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
-static inline unsigned long read_sel_evtyper(int sel)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- return read_sysreg(pmxevtyper_el0);
-}
-
-/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
-static inline void write_sel_evtyper(int sel, unsigned long val)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- write_sysreg(val, pmxevtyper_el0);
- isb();
-}
-
-static inline void enable_counter(int idx)
-{
- uint64_t v = read_sysreg(pmcntenset_el0);
-
- write_sysreg(BIT(idx) | v, pmcntenset_el0);
- isb();
-}
-
-static inline void disable_counter(int idx)
-{
- uint64_t v = read_sysreg(pmcntenset_el0);
-
- write_sysreg(BIT(idx) | v, pmcntenclr_el0);
- isb();
-}
-
-static void pmu_disable_reset(void)
-{
- uint64_t pmcr = read_sysreg(pmcr_el0);
-
- /* Reset all counters, disabling them */
- pmcr &= ~ARMV8_PMU_PMCR_E;
- write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
- isb();
-}
-
-#define RETURN_READ_PMEVCNTRN(n) \
- return read_sysreg(pmevcntr##n##_el0)
-static unsigned long read_pmevcntrn(int n)
-{
- PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
- return 0;
-}
-
-#define WRITE_PMEVCNTRN(n) \
- write_sysreg(val, pmevcntr##n##_el0)
-static void write_pmevcntrn(int n, unsigned long val)
-{
- PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
- isb();
-}
-
-#define READ_PMEVTYPERN(n) \
- return read_sysreg(pmevtyper##n##_el0)
-static unsigned long read_pmevtypern(int n)
-{
- PMEVN_SWITCH(n, READ_PMEVTYPERN);
- return 0;
-}
-
-#define WRITE_PMEVTYPERN(n) \
- write_sysreg(val, pmevtyper##n##_el0)
-static void write_pmevtypern(int n, unsigned long val)
-{
- PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
- isb();
-}
-
-/*
- * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
- * accessors that test cases will use. Each of the accessors will
- * either directly reads/writes PMEV{CNTR,TYPER}<n>_EL0
- * (i.e. {read,write}_pmev{cnt,type}rn()), or reads/writes them through
- * PMXEV{CNTR,TYPER}_EL0 (i.e. {read,write}_sel_ev{cnt,type}r()).
- *
- * This is used to test that combinations of those accessors provide
- * the consistent behavior.
- */
-struct pmc_accessor {
- /* A function to be used to read PMEVTCNTR<n>_EL0 */
- unsigned long (*read_cntr)(int idx);
- /* A function to be used to write PMEVTCNTR<n>_EL0 */
- void (*write_cntr)(int idx, unsigned long val);
- /* A function to be used to read PMEVTYPER<n>_EL0 */
- unsigned long (*read_typer)(int idx);
- /* A function to be used to write PMEVTYPER<n>_EL0 */
- void (*write_typer)(int idx, unsigned long val);
-};
-
-struct pmc_accessor pmc_accessors[] = {
- /* test with all direct accesses */
- { read_pmevcntrn, write_pmevcntrn, read_pmevtypern, write_pmevtypern },
- /* test with all indirect accesses */
- { read_sel_evcntr, write_sel_evcntr, read_sel_evtyper, write_sel_evtyper },
- /* read with direct accesses, and write with indirect accesses */
- { read_pmevcntrn, write_sel_evcntr, read_pmevtypern, write_sel_evtyper },
- /* read with indirect accesses, and write with direct accesses */
- { read_sel_evcntr, write_pmevcntrn, read_sel_evtyper, write_pmevtypern },
-};
-
-/*
- * Convert a pointer of pmc_accessor to an index in pmc_accessors[],
- * assuming that the pointer is one of the entries in pmc_accessors[].
- */
-#define PMC_ACC_TO_IDX(acc) (acc - &pmc_accessors[0])
-
-#define GUEST_ASSERT_BITMAP_REG(regname, mask, set_expected) \
-{ \
- uint64_t _tval = read_sysreg(regname); \
- \
- if (set_expected) \
- __GUEST_ASSERT((_tval & mask), \
- "tval: 0x%lx; mask: 0x%lx; set_expected: 0x%lx", \
- _tval, mask, set_expected); \
- else \
- __GUEST_ASSERT(!(_tval & mask), \
- "tval: 0x%lx; mask: 0x%lx; set_expected: 0x%lx", \
- _tval, mask, set_expected); \
-}
-
-/*
- * Check if @mask bits in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers
- * are set or cleared as specified in @set_expected.
- */
-static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected)
-{
- GUEST_ASSERT_BITMAP_REG(pmcntenset_el0, mask, set_expected);
- GUEST_ASSERT_BITMAP_REG(pmcntenclr_el0, mask, set_expected);
- GUEST_ASSERT_BITMAP_REG(pmintenset_el1, mask, set_expected);
- GUEST_ASSERT_BITMAP_REG(pmintenclr_el1, mask, set_expected);
- GUEST_ASSERT_BITMAP_REG(pmovsset_el0, mask, set_expected);
- GUEST_ASSERT_BITMAP_REG(pmovsclr_el0, mask, set_expected);
-}
-
-/*
- * Check if the bit in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers corresponding
- * to the specified counter (@pmc_idx) can be read/written as expected.
- * When @set_op is true, it tries to set the bit for the counter in
- * those registers by writing the SET registers (the bit won't be set
- * if the counter is not implemented though).
- * Otherwise, it tries to clear the bits in the registers by writing
- * the CLR registers.
- * Then, it checks if the values indicated in the registers are as expected.
- */
-static void test_bitmap_pmu_regs(int pmc_idx, bool set_op)
-{
- uint64_t pmcr_n, test_bit = BIT(pmc_idx);
- bool set_expected = false;
-
- if (set_op) {
- write_sysreg(test_bit, pmcntenset_el0);
- write_sysreg(test_bit, pmintenset_el1);
- write_sysreg(test_bit, pmovsset_el0);
-
- /* The bit will be set only if the counter is implemented */
- pmcr_n = get_pmcr_n(read_sysreg(pmcr_el0));
- set_expected = (pmc_idx < pmcr_n) ? true : false;
- } else {
- write_sysreg(test_bit, pmcntenclr_el0);
- write_sysreg(test_bit, pmintenclr_el1);
- write_sysreg(test_bit, pmovsclr_el0);
- }
- check_bitmap_pmu_regs(test_bit, set_expected);
-}
-
-/*
- * Tests for reading/writing registers for the (implemented) event counter
- * specified by @pmc_idx.
- */
-static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
-{
- uint64_t write_data, read_data;
-
- /* Disable all PMCs and reset all PMCs to zero. */
- pmu_disable_reset();
-
- /*
- * Tests for reading/writing {PMCNTEN,PMINTEN,PMOVS}{SET,CLR}_EL1.
- */
-
- /* Make sure that the bit in those registers are set to 0 */
- test_bitmap_pmu_regs(pmc_idx, false);
- /* Test if setting the bit in those registers works */
- test_bitmap_pmu_regs(pmc_idx, true);
- /* Test if clearing the bit in those registers works */
- test_bitmap_pmu_regs(pmc_idx, false);
-
- /*
- * Tests for reading/writing the event type register.
- */
-
- /*
- * Set the event type register to an arbitrary value just for testing
- * of reading/writing the register.
- * Arm ARM says that for the event from 0x0000 to 0x003F,
- * the value indicated in the PMEVTYPER<n>_EL0.evtCount field is
- * the value written to the field even when the specified event
- * is not supported.
- */
- write_data = (ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMUV3_PERFCTR_INST_RETIRED);
- acc->write_typer(pmc_idx, write_data);
- read_data = acc->read_typer(pmc_idx);
- __GUEST_ASSERT(read_data == write_data,
- "pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
- pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
-
- /*
- * Tests for reading/writing the event count register.
- */
-
- read_data = acc->read_cntr(pmc_idx);
-
- /* The count value must be 0, as it is disabled and reset */
- __GUEST_ASSERT(read_data == 0,
- "pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx",
- pmc_idx, PMC_ACC_TO_IDX(acc), read_data);
-
- write_data = read_data + pmc_idx + 0x12345;
- acc->write_cntr(pmc_idx, write_data);
- read_data = acc->read_cntr(pmc_idx);
- __GUEST_ASSERT(read_data == write_data,
- "pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
- pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
-}
-
-#define INVALID_EC (-1ul)
-uint64_t expected_ec = INVALID_EC;
-
-static void guest_sync_handler(struct ex_regs *regs)
-{
- uint64_t esr, ec;
-
- esr = read_sysreg(esr_el1);
- ec = (esr >> ESR_EC_SHIFT) & ESR_EC_MASK;
-
- __GUEST_ASSERT(expected_ec == ec,
- "PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx; EC expected: 0x%lx",
- regs->pc, esr, ec, expected_ec);
-
- /* skip the trapping instruction */
- regs->pc += 4;
-
- /* Use INVALID_EC to indicate an exception occurred */
- expected_ec = INVALID_EC;
-}
-
-/*
- * Run the given operation that should trigger an exception with the
- * given exception class. The exception handler (guest_sync_handler)
- * will reset op_end_addr to 0, expected_ec to INVALID_EC, and skip
- * the instruction that trapped.
- */
-#define TEST_EXCEPTION(ec, ops) \
-({ \
- GUEST_ASSERT(ec != INVALID_EC); \
- WRITE_ONCE(expected_ec, ec); \
- dsb(ish); \
- ops; \
- GUEST_ASSERT(expected_ec == INVALID_EC); \
-})
-
-/*
- * Tests for reading/writing registers for the unimplemented event counter
- * specified by @pmc_idx (>= PMCR_EL0.N).
- */
-static void test_access_invalid_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
-{
- /*
- * Reading/writing the event count/type registers should cause
- * an UNDEFINED exception.
- */
- TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_cntr(pmc_idx));
- TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_cntr(pmc_idx, 0));
- TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_typer(pmc_idx));
- TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_typer(pmc_idx, 0));
- /*
- * The bit corresponding to the (unimplemented) counter in
- * {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers should be RAZ.
- */
- test_bitmap_pmu_regs(pmc_idx, 1);
- test_bitmap_pmu_regs(pmc_idx, 0);
-}
-
-/*
- * The guest is configured with PMUv3 with @expected_pmcr_n number of
- * event counters.
- * Check if @expected_pmcr_n is consistent with PMCR_EL0.N, and
- * if reading/writing PMU registers for implemented or unimplemented
- * counters works as expected.
- */
-static void guest_code(uint64_t expected_pmcr_n)
-{
- uint64_t pmcr, pmcr_n, unimp_mask;
- int i, pmc;
-
- __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS,
- "Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%lx",
- expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS);
-
- pmcr = read_sysreg(pmcr_el0);
- pmcr_n = get_pmcr_n(pmcr);
-
- /* Make sure that PMCR_EL0.N indicates the value userspace set */
- __GUEST_ASSERT(pmcr_n == expected_pmcr_n,
- "Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx",
- expected_pmcr_n, pmcr_n);
-
- /*
- * Make sure that (RAZ) bits corresponding to unimplemented event
- * counters in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers are reset
- * to zero.
- * (NOTE: bits for implemented event counters are reset to UNKNOWN)
- */
- unimp_mask = GENMASK_ULL(ARMV8_PMU_MAX_GENERAL_COUNTERS - 1, pmcr_n);
- check_bitmap_pmu_regs(unimp_mask, false);
-
- /*
- * Tests for reading/writing PMU registers for implemented counters.
- * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
- */
- for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
- for (pmc = 0; pmc < pmcr_n; pmc++)
- test_access_pmc_regs(&pmc_accessors[i], pmc);
- }
-
- /*
- * Tests for reading/writing PMU registers for unimplemented counters.
- * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
- */
- for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
- for (pmc = pmcr_n; pmc < ARMV8_PMU_MAX_GENERAL_COUNTERS; pmc++)
- test_access_invalid_pmc_regs(&pmc_accessors[i], pmc);
- }
-
- GUEST_DONE();
-}
-
-#define GICD_BASE_GPA 0x8000000ULL
-#define GICR_BASE_GPA 0x80A0000ULL
-
-/* Create a VM that has one vCPU with PMUv3 configured. */
-static void create_vpmu_vm(void *guest_code)
-{
- struct kvm_vcpu_init init;
- uint8_t pmuver, ec;
- uint64_t dfr0, irq = 23;
- struct kvm_device_attr irq_attr = {
- .group = KVM_ARM_VCPU_PMU_V3_CTRL,
- .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
- .addr = (uint64_t)&irq,
- };
- struct kvm_device_attr init_attr = {
- .group = KVM_ARM_VCPU_PMU_V3_CTRL,
- .attr = KVM_ARM_VCPU_PMU_V3_INIT,
- };
-
- /* The test creates the vpmu_vm multiple times. Ensure a clean state */
- memset(&vpmu_vm, 0, sizeof(vpmu_vm));
-
- vpmu_vm.vm = vm_create(1);
- vm_init_descriptor_tables(vpmu_vm.vm);
- for (ec = 0; ec < ESR_EC_NUM; ec++) {
- vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
- guest_sync_handler);
- }
-
- /* Create vCPU with PMUv3 */
- vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
- init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
- vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
- vcpu_init_descriptor_tables(vpmu_vm.vcpu);
- vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64,
- GICD_BASE_GPA, GICR_BASE_GPA);
- __TEST_REQUIRE(vpmu_vm.gic_fd >= 0,
- "Failed to create vgic-v3, skipping");
-
- /* Make sure that PMUv3 support is indicated in the ID register */
- vcpu_get_reg(vpmu_vm.vcpu,
- KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
- pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
- TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
- pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
- "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
-
- /* Initialize vPMU */
- vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
- vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
-}
-
-static void destroy_vpmu_vm(void)
-{
- close(vpmu_vm.gic_fd);
- kvm_vm_free(vpmu_vm.vm);
-}
-
-static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
-{
- struct ucall uc;
-
- vcpu_args_set(vcpu, 1, pmcr_n);
- vcpu_run(vcpu);
- switch (get_ucall(vcpu, &uc)) {
- case UCALL_ABORT:
- REPORT_GUEST_ASSERT(uc);
- break;
- case UCALL_DONE:
- break;
- default:
- TEST_FAIL("Unknown ucall %lu", uc.cmd);
- break;
- }
-}
-
-static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail)
-{
- struct kvm_vcpu *vcpu;
- uint64_t pmcr, pmcr_orig;
-
- create_vpmu_vm(guest_code);
- vcpu = vpmu_vm.vcpu;
-
- vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig);
- pmcr = pmcr_orig;
-
- /*
- * Setting a larger value of PMCR.N should not modify the field, and
- * return a success.
- */
- set_pmcr_n(&pmcr, pmcr_n);
- vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
- vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
-
- if (expect_fail)
- TEST_ASSERT(pmcr_orig == pmcr,
- "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx",
- pmcr, pmcr_n);
- else
- TEST_ASSERT(pmcr_n == get_pmcr_n(pmcr),
- "Failed to update PMCR.N to %lu (received: %lu)",
- pmcr_n, get_pmcr_n(pmcr));
-}
-
-/*
- * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n,
- * and run the test.
- */
-static void run_access_test(uint64_t pmcr_n)
-{
- uint64_t sp;
- struct kvm_vcpu *vcpu;
- struct kvm_vcpu_init init;
-
- pr_debug("Test with pmcr_n %lu\n", pmcr_n);
-
- test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
- vcpu = vpmu_vm.vcpu;
-
- /* Save the initial sp to restore them later to run the guest again */
- vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp);
-
- run_vcpu(vcpu, pmcr_n);
-
- /*
- * Reset and re-initialize the vCPU, and run the guest code again to
- * check if PMCR_EL0.N is preserved.
- */
- vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
- init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
- aarch64_vcpu_setup(vcpu, &init);
- vcpu_init_descriptor_tables(vcpu);
- vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp);
- vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
-
- run_vcpu(vcpu, pmcr_n);
-
- destroy_vpmu_vm();
-}
-
-static struct pmreg_sets validity_check_reg_sets[] = {
- PMREG_SET(SYS_PMCNTENSET_EL0, SYS_PMCNTENCLR_EL0),
- PMREG_SET(SYS_PMINTENSET_EL1, SYS_PMINTENCLR_EL1),
- PMREG_SET(SYS_PMOVSSET_EL0, SYS_PMOVSCLR_EL0),
-};
-
-/*
- * Create a VM, and check if KVM handles the userspace accesses of
- * the PMU register sets in @validity_check_reg_sets[] correctly.
- */
-static void run_pmregs_validity_test(uint64_t pmcr_n)
-{
- int i;
- struct kvm_vcpu *vcpu;
- uint64_t set_reg_id, clr_reg_id, reg_val;
- uint64_t valid_counters_mask, max_counters_mask;
-
- test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
- vcpu = vpmu_vm.vcpu;
-
- valid_counters_mask = get_counters_mask(pmcr_n);
- max_counters_mask = get_counters_mask(ARMV8_PMU_MAX_COUNTERS);
-
- for (i = 0; i < ARRAY_SIZE(validity_check_reg_sets); i++) {
- set_reg_id = validity_check_reg_sets[i].set_reg_id;
- clr_reg_id = validity_check_reg_sets[i].clr_reg_id;
-
- /*
- * Test if the 'set' and 'clr' variants of the registers
- * are initialized based on the number of valid counters.
- */
- vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
- TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
- "Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
- KVM_ARM64_SYS_REG(set_reg_id), reg_val);
-
- vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
- TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
- "Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
- KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
-
- /*
- * Using the 'set' variant, force-set the register to the
- * max number of possible counters and test if KVM discards
- * the bits for unimplemented counters as it should.
- */
- vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), max_counters_mask);
-
- vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
- TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
- "Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
- KVM_ARM64_SYS_REG(set_reg_id), reg_val);
-
- vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
- TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
- "Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
- KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
- }
-
- destroy_vpmu_vm();
-}
-
-/*
- * Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for
- * the vCPU to @pmcr_n, which is larger than the host value.
- * The attempt should fail as @pmcr_n is too big to set for the vCPU.
- */
-static void run_error_test(uint64_t pmcr_n)
-{
- pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
-
- test_create_vpmu_vm_with_pmcr_n(pmcr_n, true);
- destroy_vpmu_vm();
-}
-
-/*
- * Return the default number of implemented PMU event counters excluding
- * the cycle counter (i.e. PMCR_EL0.N value) for the guest.
- */
-static uint64_t get_pmcr_n_limit(void)
-{
- uint64_t pmcr;
-
- create_vpmu_vm(guest_code);
- vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
- destroy_vpmu_vm();
- return get_pmcr_n(pmcr);
-}
-
-int main(void)
-{
- uint64_t i, pmcr_n;
-
- TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
-
- pmcr_n = get_pmcr_n_limit();
- for (i = 0; i <= pmcr_n; i++) {
- run_access_test(i);
- run_pmregs_validity_test(i);
- }
-
- for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++)
- run_error_test(i);
-
- return 0;
-}