summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/arm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/arm.c')
-rw-r--r--arch/arm64/kvm/arm.c65
1 files changed, 20 insertions, 45 deletions
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index d1cb298a58a0..4866b3f7b4ea 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -36,6 +36,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_nested.h>
#include <asm/kvm_pkvm.h>
#include <asm/kvm_emulate.h>
#include <asm/sections.h>
@@ -365,7 +366,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
#endif
/* Force users to call KVM_ARM_VCPU_INIT */
- vcpu->arch.target = -1;
+ vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
@@ -462,7 +463,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu_ptrauth_disable(vcpu);
kvm_arch_vcpu_load_debug_state_flags(vcpu);
- if (!cpumask_test_cpu(smp_processor_id(), vcpu->kvm->arch.supported_cpus))
+ if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
vcpu_set_on_unsupported_cpu(vcpu);
}
@@ -574,7 +575,7 @@ unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.target >= 0;
+ return vcpu_get_flag(vcpu, VCPU_INITIALIZED);
}
/*
@@ -803,6 +804,9 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
kvm_pmu_handle_pmcr(vcpu,
__vcpu_sys_reg(vcpu, PMCR_EL0));
+ if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
+ kvm_vcpu_pmu_restore_guest(vcpu);
+
if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
return kvm_vcpu_suspend(vcpu);
@@ -818,6 +822,9 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
if (likely(!vcpu_mode_is_32bit(vcpu)))
return false;
+ if (vcpu_has_nv(vcpu))
+ return true;
+
return !kvm_supports_32bit_el0();
}
@@ -1058,7 +1065,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* invalid. The VMM can try and fix it by issuing a
* KVM_ARM_VCPU_INIT if it really wants to.
*/
- vcpu->arch.target = -1;
+ vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
ret = ARM_EXCEPTION_IL;
}
@@ -1219,8 +1226,7 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
{
unsigned long features = init->features[0];
- return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES) ||
- vcpu->arch.target != init->target;
+ return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
}
static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
@@ -1236,20 +1242,18 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
!bitmap_equal(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES))
goto out_unlock;
- vcpu->arch.target = init->target;
bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
/* Now we know what it is, we can reset it. */
ret = kvm_reset_vcpu(vcpu);
if (ret) {
- vcpu->arch.target = -1;
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
goto out_unlock;
}
bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
-
+ vcpu_set_flag(vcpu, VCPU_INITIALIZED);
out_unlock:
mutex_unlock(&kvm->arch.config_lock);
return ret;
@@ -1260,14 +1264,15 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
{
int ret;
- if (init->target != kvm_target_cpu())
+ if (init->target != KVM_ARM_TARGET_GENERIC_V8 &&
+ init->target != kvm_target_cpu())
return -EINVAL;
ret = kvm_vcpu_init_check_features(vcpu, init);
if (ret)
return ret;
- if (vcpu->arch.target == -1)
+ if (!kvm_vcpu_initialized(vcpu))
return __kvm_vcpu_set_target(vcpu, init);
if (kvm_vcpu_init_changed(vcpu, init))
@@ -1532,12 +1537,6 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
}
-void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
- const struct kvm_memory_slot *memslot)
-{
- kvm_flush_remote_tlbs(kvm);
-}
-
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
struct kvm_arm_device_addr *dev_addr)
{
@@ -1595,9 +1594,9 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
}
case KVM_ARM_PREFERRED_TARGET: {
- struct kvm_vcpu_init init;
-
- kvm_vcpu_preferred_target(&init);
+ struct kvm_vcpu_init init = {
+ .target = KVM_ARM_TARGET_GENERIC_V8,
+ };
if (copy_to_user(argp, &init, sizeof(init)))
return -EFAULT;
@@ -2276,30 +2275,8 @@ static int __init init_hyp_mode(void)
for_each_possible_cpu(cpu) {
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
- unsigned long hyp_addr;
- /*
- * Allocate a contiguous HYP private VA range for the stack
- * and guard page. The allocation is also aligned based on
- * the order of its size.
- */
- err = hyp_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
- if (err) {
- kvm_err("Cannot allocate hyp stack guard page\n");
- goto out_err;
- }
-
- /*
- * Since the stack grows downwards, map the stack to the page
- * at the higher address and leave the lower guard page
- * unbacked.
- *
- * Any valid stack address now has the PAGE_SHIFT bit as 1
- * and addresses corresponding to the guard page have the
- * PAGE_SHIFT bit as 0 - this is used for overflow detection.
- */
- err = __create_hyp_mappings(hyp_addr + PAGE_SIZE, PAGE_SIZE,
- __pa(stack_page), PAGE_HYP);
+ err = create_hyp_stack(__pa(stack_page), &params->stack_hyp_va);
if (err) {
kvm_err("Cannot map hyp stack\n");
goto out_err;
@@ -2312,8 +2289,6 @@ static int __init init_hyp_mode(void)
* has been mapped in the flexible private VA space.
*/
params->stack_pa = __pa(stack_page);
-
- params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
}
for_each_possible_cpu(cpu) {