summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/pvtime.c
diff options
context:
space:
mode:
authorAndrew Jones <drjones@redhat.com>2020-08-04 19:06:02 +0200
committerMarc Zyngier <maz@kernel.org>2020-08-21 14:04:14 +0100
commit53f985584e3c2ebe5f2455530fbf87a001528db8 (patch)
tree7ad5be2d2651d8ebc4595c784a13d43b7e6a5a2c /arch/arm64/kvm/pvtime.c
parent4d2d4ce001f283ed8127173543b4cfb65641e357 (diff)
KVM: arm64: pvtime: Fix stolen time accounting across migration
When updating the stolen time we should always read the current stolen time from the user provided memory, not from a kernel cache. If we use a cache then we'll end up resetting stolen time to zero on the first update after migration. Signed-off-by: Andrew Jones <drjones@redhat.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20200804170604.42662-5-drjones@redhat.com
Diffstat (limited to 'arch/arm64/kvm/pvtime.c')
-rw-r--r--arch/arm64/kvm/pvtime.c23
1 files changed, 9 insertions, 14 deletions
diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c
index 241ded7ee0ad..75234321d896 100644
--- a/arch/arm64/kvm/pvtime.c
+++ b/arch/arm64/kvm/pvtime.c
@@ -13,26 +13,22 @@
void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
+ u64 base = vcpu->arch.steal.base;
u64 last_steal = vcpu->arch.steal.last_steal;
- u64 steal;
- __le64 steal_le;
- u64 offset;
+ u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
+ u64 steal = 0;
int idx;
- u64 base = vcpu->arch.steal.base;
if (base == GPA_INVALID)
return;
- /* Let's do the local bookkeeping */
- steal = vcpu->arch.steal.steal;
- vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
- steal += vcpu->arch.steal.last_steal - last_steal;
- vcpu->arch.steal.steal = steal;
-
- steal_le = cpu_to_le64(steal);
idx = srcu_read_lock(&kvm->srcu);
- offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
- kvm_put_guest(kvm, base + offset, steal_le);
+ if (!kvm_get_guest(kvm, base + offset, steal)) {
+ steal = le64_to_cpu(steal);
+ vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
+ steal += vcpu->arch.steal.last_steal - last_steal;
+ kvm_put_guest(kvm, base + offset, cpu_to_le64(steal));
+ }
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -66,7 +62,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
* Start counting stolen time from the time the guest requests
* the feature enabled.
*/
- vcpu->arch.steal.steal = 0;
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
idx = srcu_read_lock(&kvm->srcu);