diff options
Diffstat (limited to 'arch/riscv/kvm/vcpu.c')
-rw-r--r-- | arch/riscv/kvm/vcpu.c | 91 |
1 files changed, 91 insertions, 0 deletions
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 840f4586796f..cf6832365345 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -38,6 +38,86 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { sizeof(kvm_vcpu_stats_desc), }; +#ifdef CONFIG_FPU +static void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) +{ + unsigned long isa = vcpu->arch.isa; + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + + cntx->sstatus &= ~SR_FS; + if (riscv_isa_extension_available(&isa, f) || + riscv_isa_extension_available(&isa, d)) + cntx->sstatus |= SR_FS_INITIAL; + else + cntx->sstatus |= SR_FS_OFF; +} + +static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx) +{ + cntx->sstatus &= ~SR_FS; + cntx->sstatus |= SR_FS_CLEAN; +} + +static void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, + unsigned long isa) +{ + if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) { + if (riscv_isa_extension_available(&isa, d)) + __kvm_riscv_fp_d_save(cntx); + else if (riscv_isa_extension_available(&isa, f)) + __kvm_riscv_fp_f_save(cntx); + kvm_riscv_vcpu_fp_clean(cntx); + } +} + +static void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, + unsigned long isa) +{ + if ((cntx->sstatus & SR_FS) != SR_FS_OFF) { + if (riscv_isa_extension_available(&isa, d)) + __kvm_riscv_fp_d_restore(cntx); + else if (riscv_isa_extension_available(&isa, f)) + __kvm_riscv_fp_f_restore(cntx); + kvm_riscv_vcpu_fp_clean(cntx); + } +} + +static void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx) +{ + /* No need to check host sstatus as it can be modified outside */ + if (riscv_isa_extension_available(NULL, d)) + __kvm_riscv_fp_d_save(cntx); + else if (riscv_isa_extension_available(NULL, f)) + __kvm_riscv_fp_f_save(cntx); +} + +static void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx) +{ + if (riscv_isa_extension_available(NULL, d)) + __kvm_riscv_fp_d_restore(cntx); + else if (riscv_isa_extension_available(NULL, f)) + __kvm_riscv_fp_f_restore(cntx); +} +#else +static void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) +{ +} +static void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, + unsigned long isa) +{ +} +static void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, + unsigned long isa) +{ +} +static void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx) +{ +} +static void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx) +{ +} +#endif + #define KVM_RISCV_ISA_ALLOWED (riscv_isa_extension_mask(a) | \ riscv_isa_extension_mask(c) | \ riscv_isa_extension_mask(d) | \ @@ -58,6 +138,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) memcpy(cntx, reset_cntx, sizeof(*cntx)); + kvm_riscv_vcpu_fp_reset(vcpu); + kvm_riscv_vcpu_timer_reset(vcpu); WRITE_ONCE(vcpu->arch.irqs_pending, 0); @@ -192,6 +274,7 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, vcpu->arch.isa = reg_val; vcpu->arch.isa &= riscv_isa_extension_base(NULL); vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED; + kvm_riscv_vcpu_fp_reset(vcpu); } else { return -EOPNOTSUPP; } @@ -593,6 +676,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_riscv_vcpu_timer_restore(vcpu); + kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); + kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, + vcpu->arch.isa); + vcpu->cpu = cpu; } @@ -602,6 +689,10 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->cpu = -1; + kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context, + vcpu->arch.isa); + kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); + csr_write(CSR_HGATP, 0); csr->vsstatus = csr_read(CSR_VSSTATUS); |