diff options
author | Marc Zyngier <maz@kernel.org> | 2021-10-14 11:42:38 +0100 |
---|---|---|
committer | Marc Zyngier <maz@kernel.org> | 2021-12-01 11:51:21 +0000 |
commit | b5aa368abfbf4c0e041c792e3340955554eff97e (patch) | |
tree | 310776b146c2dd74b97dadfb2f7bfbe8c36f77c4 /arch | |
parent | 1408e73d21feffe77680acd4da611295db0dfcd8 (diff) |
KVM: arm64: Merge kvm_arch_vcpu_run_pid_change() and kvm_vcpu_first_run_init()
The kvm_arch_vcpu_run_pid_change() helper gets called on each PID
change. The kvm_vcpu_first_run_init() helper gets run on the...
first run(!) of a vcpu.
As it turns out, the first run of a vcpu also triggers a PID change
event (vcpu->pid is initially NULL).
Use this property to merge these two helpers and get rid of another
arm64-specific oddity.
Reviewed-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/kvm/arm.c | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index b30b05a2f025..f20a265972c7 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -584,22 +584,34 @@ static void update_vmid(struct kvm_vmid *vmid) spin_unlock(&kvm_vmid_lock); } -int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) { - return kvm_arch_vcpu_run_map_fp(vcpu); + return vcpu->arch.target >= 0; } -static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) +/* + * Handle both the initialisation that is being done when the vcpu is + * run for the first time, as well as the updates that must be + * performed each time we get a new thread dealing with this vcpu. + */ +int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - int ret = 0; + int ret; - if (likely(vcpu->arch.has_run_once)) - return 0; + if (!kvm_vcpu_initialized(vcpu)) + return -ENOEXEC; if (!kvm_arm_vcpu_is_finalized(vcpu)) return -EPERM; + ret = kvm_arch_vcpu_run_map_fp(vcpu); + if (ret) + return ret; + + if (likely(vcpu->arch.has_run_once)) + return 0; + kvm_arm_vcpu_init_debug(vcpu); if (likely(irqchip_in_kernel(kvm))) { @@ -688,11 +700,6 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu) smp_rmb(); } -static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.target >= 0; -} - static void check_vcpu_requests(struct kvm_vcpu *vcpu) { if (kvm_request_pending(vcpu)) { @@ -788,13 +795,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) struct kvm_run *run = vcpu->run; int ret; - if (unlikely(!kvm_vcpu_initialized(vcpu))) - return -ENOEXEC; - - ret = kvm_vcpu_first_run_init(vcpu); - if (ret) - return ret; - if (run->exit_reason == KVM_EXIT_MMIO) { ret = kvm_handle_mmio_return(vcpu); if (ret) |