summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arm.c11
-rw-r--r--virt/kvm/arm/mmu.c6
2 files changed, 9 insertions, 8 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 932e61858c55..49d13510e9c2 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -420,7 +420,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
*/
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
- return ((!!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v))
+ bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
+ return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
&& !v->arch.power_off && !v->arch.pause);
}
@@ -814,18 +815,18 @@ static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
{
int bit_index;
bool set;
- unsigned long *ptr;
+ unsigned long *hcr;
if (number == KVM_ARM_IRQ_CPU_IRQ)
bit_index = __ffs(HCR_VI);
else /* KVM_ARM_IRQ_CPU_FIQ */
bit_index = __ffs(HCR_VF);
- ptr = (unsigned long *)&vcpu->arch.irq_lines;
+ hcr = vcpu_hcr(vcpu);
if (level)
- set = test_and_set_bit(bit_index, ptr);
+ set = test_and_set_bit(bit_index, hcr);
else
- set = test_and_clear_bit(bit_index, ptr);
+ set = test_and_clear_bit(bit_index, hcr);
/*
* If we didn't change anything, no need to wake up or kick other CPUs
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index ec62d1cccab7..9ebff8e530f9 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -2035,7 +2035,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
*/
void kvm_set_way_flush(struct kvm_vcpu *vcpu)
{
- unsigned long hcr = vcpu_get_hcr(vcpu);
+ unsigned long hcr = *vcpu_hcr(vcpu);
/*
* If this is the first time we do a S/W operation
@@ -2050,7 +2050,7 @@ void kvm_set_way_flush(struct kvm_vcpu *vcpu)
trace_kvm_set_way_flush(*vcpu_pc(vcpu),
vcpu_has_cache_enabled(vcpu));
stage2_flush_vm(vcpu->kvm);
- vcpu_set_hcr(vcpu, hcr | HCR_TVM);
+ *vcpu_hcr(vcpu) = hcr | HCR_TVM;
}
}
@@ -2068,7 +2068,7 @@ void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
/* Caches are now on, stop trapping VM ops (until a S/W op) */
if (now_enabled)
- vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
+ *vcpu_hcr(vcpu) &= ~HCR_TVM;
trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
}