diff options
Diffstat (limited to 'arch/loongarch/kvm/timer.c')
-rw-r--r-- | arch/loongarch/kvm/timer.c | 158 |
1 files changed, 76 insertions, 82 deletions
diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index 284bf553fefe..32dc213374be 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -23,24 +23,6 @@ static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick) return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz); } -/* - * Push timer forward on timeout. - * Handle an hrtimer event by push the hrtimer forward a period. - */ -static enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu) -{ - unsigned long cfg, period; - - /* Add periodic tick to current expire time */ - cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG); - if (cfg & CSR_TCFG_PERIOD) { - period = tick_to_ns(vcpu, cfg & CSR_TCFG_VAL); - hrtimer_add_expires_ns(&vcpu->arch.swtimer, period); - return HRTIMER_RESTART; - } else - return HRTIMER_NORESTART; -} - /* Low level hrtimer wake routine */ enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer) { @@ -50,7 +32,7 @@ enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer) kvm_queue_irq(vcpu, INT_TI); rcuwait_wake_up(&vcpu->wait); - return kvm_count_timeout(vcpu); + return HRTIMER_NORESTART; } /* @@ -65,40 +47,23 @@ void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz) } /* - * Restore hard timer state and enable guest to access timer registers - * without trap, should be called with irq disabled - */ -void kvm_acquire_timer(struct kvm_vcpu *vcpu) -{ - unsigned long cfg; - - cfg = read_csr_gcfg(); - if (!(cfg & CSR_GCFG_TIT)) - return; - - /* Enable guest access to hard timer */ - write_csr_gcfg(cfg & ~CSR_GCFG_TIT); - - /* - * Freeze the soft-timer and sync the guest stable timer with it. We do - * this with interrupts disabled to avoid latency. - */ - hrtimer_cancel(&vcpu->arch.swtimer); -} - -/* * Restore soft timer state from saved context. */ void kvm_restore_timer(struct kvm_vcpu *vcpu) { - unsigned long cfg, delta, period; + unsigned long cfg, estat; + unsigned long ticks, delta, period; ktime_t expire, now; struct loongarch_csrs *csr = vcpu->arch.csr; /* * Set guest stable timer cfg csr + * Disable timer before restore estat CSR register, avoid to + * get invalid timer interrupt for old timer cfg */ cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG); + + write_gcsr_timercfg(0); kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG); if (!(cfg & CSR_TCFG_EN)) { @@ -108,23 +73,56 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu) } /* + * Freeze the soft-timer and sync the guest stable timer with it. + */ + if (kvm_vcpu_is_blocking(vcpu)) + hrtimer_cancel(&vcpu->arch.swtimer); + + /* + * From LoongArch Reference Manual Volume 1 Chapter 7.6.2 + * If oneshot timer is fired, CSR TVAL will be -1, there are two + * conditions: + * 1) timer is fired during exiting to host + * 2) timer is fired and vm is doing timer irq, and then exiting to + * host. Host should not inject timer irq to avoid spurious + * timer interrupt again + */ + ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL); + estat = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT); + if (!(cfg & CSR_TCFG_PERIOD) && (ticks > cfg)) { + /* + * Writing 0 to LOONGARCH_CSR_TVAL will inject timer irq + * and set CSR TVAL with -1 + */ + write_gcsr_timertick(0); + + /* + * Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear + * timer interrupt, and CSR TVAL keeps unchanged with -1, it + * avoids spurious timer interrupt + */ + if (!(estat & CPU_TIMER)) + gcsr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR); + return; + } + + /* * Set remainder tick value if not expired */ + delta = 0; now = ktime_get(); expire = vcpu->arch.expire; if (ktime_before(now, expire)) delta = ktime_to_tick(vcpu, ktime_sub(expire, now)); - else { - if (cfg & CSR_TCFG_PERIOD) { - period = cfg & CSR_TCFG_VAL; - delta = ktime_to_tick(vcpu, ktime_sub(now, expire)); - delta = period - (delta % period); - } else - delta = 0; + else if (cfg & CSR_TCFG_PERIOD) { + period = cfg & CSR_TCFG_VAL; + delta = ktime_to_tick(vcpu, ktime_sub(now, expire)); + delta = period - (delta % period); + /* * Inject timer here though sw timer should inject timer * interrupt async already, since sw timer may be cancelled - * during injecting intr async in function kvm_acquire_timer + * during injecting intr async */ kvm_queue_irq(vcpu, INT_TI); } @@ -139,27 +137,36 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu) */ static void _kvm_save_timer(struct kvm_vcpu *vcpu) { - unsigned long ticks, delta; + unsigned long ticks, delta, cfg; ktime_t expire; struct loongarch_csrs *csr = vcpu->arch.csr; + cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG); ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL); - delta = tick_to_ns(vcpu, ticks); + + /* + * From LoongArch Reference Manual Volume 1 Chapter 7.6.2 + * If period timer is fired, CSR TVAL will be reloaded from CSR TCFG + * If oneshot timer is fired, CSR TVAL will be -1 + * Here judge one-shot timer fired by checking whether TVAL is larger + * than TCFG + */ + if (ticks < cfg) + delta = tick_to_ns(vcpu, ticks); + else + delta = 0; + expire = ktime_add_ns(ktime_get(), delta); vcpu->arch.expire = expire; - if (ticks) { - /* - * Update hrtimer to use new timeout - * HRTIMER_MODE_PINNED is suggested since vcpu may run in - * the same physical cpu in next time - */ - hrtimer_cancel(&vcpu->arch.swtimer); - hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); - } else + if (kvm_vcpu_is_blocking(vcpu)) { + /* - * Inject timer interrupt so that hall polling can dectect and exit + * HRTIMER_MODE_PINNED_HARD is suggested since vcpu may run in + * the same physical cpu in next time, and the timer should run + * in hardirq context even in the PREEMPT_RT case. */ - kvm_queue_irq(vcpu, INT_TI); + hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED_HARD); + } } /* @@ -168,30 +175,17 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu) */ void kvm_save_timer(struct kvm_vcpu *vcpu) { - unsigned long cfg; struct loongarch_csrs *csr = vcpu->arch.csr; preempt_disable(); - cfg = read_csr_gcfg(); - if (!(cfg & CSR_GCFG_TIT)) { - /* Disable guest use of hard timer */ - write_csr_gcfg(cfg | CSR_GCFG_TIT); - - /* Save hard timer state */ - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG); - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL); - if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN) - _kvm_save_timer(vcpu); - } + + /* Save hard timer state */ + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL); + if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN) + _kvm_save_timer(vcpu); /* Save timer-related state to vCPU context */ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); preempt_enable(); } - -void kvm_reset_timer(struct kvm_vcpu *vcpu) -{ - write_gcsr_timercfg(0); - kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0); - hrtimer_cancel(&vcpu->arch.swtimer); -} |