diff options
Diffstat (limited to 'arch/loongarch/kvm/timer.c')
-rw-r--r-- | arch/loongarch/kvm/timer.c | 57 |
1 files changed, 14 insertions, 43 deletions
diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index 111328f60872..32dc213374be 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -23,24 +23,6 @@ static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick) return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz); } -/* - * Push timer forward on timeout. - * Handle an hrtimer event by push the hrtimer forward a period. - */ -static enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu) -{ - unsigned long cfg, period; - - /* Add periodic tick to current expire time */ - cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG); - if (cfg & CSR_TCFG_PERIOD) { - period = tick_to_ns(vcpu, cfg & CSR_TCFG_VAL); - hrtimer_add_expires_ns(&vcpu->arch.swtimer, period); - return HRTIMER_RESTART; - } else - return HRTIMER_NORESTART; -} - /* Low level hrtimer wake routine */ enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer) { @@ -50,7 +32,7 @@ enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer) kvm_queue_irq(vcpu, INT_TI); rcuwait_wake_up(&vcpu->wait); - return kvm_count_timeout(vcpu); + return HRTIMER_NORESTART; } /* @@ -93,7 +75,8 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu) /* * Freeze the soft-timer and sync the guest stable timer with it. */ - hrtimer_cancel(&vcpu->arch.swtimer); + if (kvm_vcpu_is_blocking(vcpu)) + hrtimer_cancel(&vcpu->arch.swtimer); /* * From LoongArch Reference Manual Volume 1 Chapter 7.6.2 @@ -168,26 +151,21 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu) * Here judge one-shot timer fired by checking whether TVAL is larger * than TCFG */ - if (ticks < cfg) { + if (ticks < cfg) delta = tick_to_ns(vcpu, ticks); - expire = ktime_add_ns(ktime_get(), delta); - vcpu->arch.expire = expire; + else + delta = 0; + + expire = ktime_add_ns(ktime_get(), delta); + vcpu->arch.expire = expire; + if (kvm_vcpu_is_blocking(vcpu)) { /* - * HRTIMER_MODE_PINNED is suggested since vcpu may run in - * the same physical cpu in next time - */ - hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); - } else if (vcpu->stat.generic.blocking) { - /* - * Inject timer interrupt so that halt polling can dectect and exit. - * VCPU is scheduled out already and sleeps in rcuwait queue and - * will not poll pending events again. kvm_queue_irq() is not enough, - * hrtimer swtimer should be used here. + * HRTIMER_MODE_PINNED_HARD is suggested since vcpu may run in + * the same physical cpu in next time, and the timer should run + * in hardirq context even in the PREEMPT_RT case. */ - expire = ktime_add_ns(ktime_get(), 10); - vcpu->arch.expire = expire; - hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); + hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED_HARD); } } @@ -211,10 +189,3 @@ void kvm_save_timer(struct kvm_vcpu *vcpu) kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); preempt_enable(); } - -void kvm_reset_timer(struct kvm_vcpu *vcpu) -{ - write_gcsr_timercfg(0); - kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0); - hrtimer_cancel(&vcpu->arch.swtimer); -} |