diff options
Diffstat (limited to 'arch/riscv/include/asm/kvm_host.h')
| -rw-r--r-- | arch/riscv/include/asm/kvm_host.h | 225 |
1 files changed, 115 insertions, 110 deletions
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 93f43a3e7886..24585304c02b 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -14,10 +14,16 @@ #include <linux/kvm_types.h> #include <linux/spinlock.h> #include <asm/hwcap.h> +#include <asm/kvm_aia.h> +#include <asm/ptrace.h> +#include <asm/kvm_tlb.h> +#include <asm/kvm_vmid.h> #include <asm/kvm_vcpu_fp.h> #include <asm/kvm_vcpu_insn.h> #include <asm/kvm_vcpu_sbi.h> +#include <asm/kvm_vcpu_sbi_fwft.h> #include <asm/kvm_vcpu_timer.h> +#include <asm/kvm_vcpu_pmu.h> #define KVM_MAX_VCPUS 1024 @@ -25,35 +31,36 @@ #define KVM_VCPU_MAX_FEATURES 0 +#define KVM_IRQCHIP_NUM_PINS 1024 + #define KVM_REQ_SLEEP \ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1) #define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2) #define KVM_REQ_FENCE_I \ KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) -#define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH #define KVM_REQ_HFENCE_VVMA_ALL \ KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_HFENCE \ KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(6) -enum kvm_riscv_hfence_type { - KVM_RISCV_HFENCE_UNKNOWN = 0, - KVM_RISCV_HFENCE_GVMA_VMID_GPA, - KVM_RISCV_HFENCE_VVMA_ASID_GVA, - KVM_RISCV_HFENCE_VVMA_ASID_ALL, - KVM_RISCV_HFENCE_VVMA_GVA, -}; +#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE -struct kvm_riscv_hfence { - enum kvm_riscv_hfence_type type; - unsigned long asid; - unsigned long order; - gpa_t addr; - gpa_t size; -}; +#define KVM_HEDELEG_DEFAULT (BIT(EXC_INST_MISALIGNED) | \ + BIT(EXC_INST_ILLEGAL) | \ + BIT(EXC_BREAKPOINT) | \ + BIT(EXC_SYSCALL) | \ + BIT(EXC_INST_PAGE_FAULT) | \ + BIT(EXC_LOAD_PAGE_FAULT) | \ + BIT(EXC_STORE_PAGE_FAULT)) + +#define KVM_HIDELEG_DEFAULT (BIT(IRQ_VS_SOFT) | \ + BIT(IRQ_VS_TIMER) | \ + BIT(IRQ_VS_EXT)) -#define KVM_RISCV_VCPU_MAX_HFENCE 64 +#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ + KVM_DIRTY_LOG_INITIALLY_SET) struct kvm_vm_stat { struct kvm_vm_stat_generic generic; @@ -63,26 +70,23 @@ struct kvm_vcpu_stat { struct kvm_vcpu_stat_generic generic; u64 ecall_exit_stat; u64 wfi_exit_stat; + u64 wrs_exit_stat; u64 mmio_exit_user; u64 mmio_exit_kernel; u64 csr_exit_user; u64 csr_exit_kernel; u64 signal_exits; u64 exits; + u64 instr_illegal_exits; + u64 load_misaligned_exits; + u64 store_misaligned_exits; + u64 load_access_exits; + u64 store_access_exits; }; struct kvm_arch_memory_slot { }; -struct kvm_vmid { - /* - * Writes to vmid_version and vmid happen with vmid_lock held - * whereas reads happen without any lock held. - */ - unsigned long vmid_version; - unsigned long vmid; -}; - struct kvm_arch { /* G-stage vmid */ struct kvm_vmid vmid; @@ -93,6 +97,12 @@ struct kvm_arch { /* Guest Timer */ struct kvm_guest_timer timer; + + /* AIA Guest/VM context */ + struct kvm_aia aia; + + /* KVM_CAP_RISCV_MP_STATE_RESET */ + bool mp_state_reset; }; struct kvm_cpu_trap { @@ -140,6 +150,7 @@ struct kvm_cpu_context { unsigned long sstatus; unsigned long hstatus; union __riscv_fp_state fp; + struct __riscv_v_ext_state vector; }; struct kvm_vcpu_csr { @@ -153,6 +164,23 @@ struct kvm_vcpu_csr { unsigned long hvip; unsigned long vsatp; unsigned long scounteren; + unsigned long senvcfg; +}; + +struct kvm_vcpu_config { + u64 henvcfg; + u64 hstateen0; + unsigned long hedeleg; +}; + +struct kvm_vcpu_smstateen_csr { + unsigned long sstateen0; +}; + +struct kvm_vcpu_reset_state { + spinlock_t lock; + unsigned long pc; + unsigned long a1; }; struct kvm_vcpu_arch { @@ -174,6 +202,8 @@ struct kvm_vcpu_arch { unsigned long host_sscratch; unsigned long host_stvec; unsigned long host_scounteren; + unsigned long host_senvcfg; + unsigned long host_sstateen0; /* CPU context of Host */ struct kvm_cpu_context host_context; @@ -184,11 +214,11 @@ struct kvm_vcpu_arch { /* CPU CSR context of Guest VCPU */ struct kvm_vcpu_csr guest_csr; - /* CPU context upon Guest VCPU reset */ - struct kvm_cpu_context guest_reset_context; + /* CPU Smstateen CSR context of Guest VCPU */ + struct kvm_vcpu_smstateen_csr smstateen_csr; - /* CPU CSR context upon Guest VCPU reset */ - struct kvm_vcpu_csr guest_reset_csr; + /* CPU reset state of Guest VCPU */ + struct kvm_vcpu_reset_state reset_state; /* * VCPU interrupts @@ -199,8 +229,9 @@ struct kvm_vcpu_arch { * in irqs_pending. Our approach is modeled around multiple producer * and single consumer problem where the consumer is the VCPU itself. */ - unsigned long irqs_pending; - unsigned long irqs_pending_mask; +#define KVM_RISCV_VCPU_NR_IRQS 64 + DECLARE_BITMAP(irqs_pending, KVM_RISCV_VCPU_NR_IRQS); + DECLARE_BITMAP(irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS); /* VCPU Timer */ struct kvm_vcpu_timer timer; @@ -220,92 +251,49 @@ struct kvm_vcpu_arch { /* SBI context */ struct kvm_vcpu_sbi_context sbi_context; + /* AIA VCPU context */ + struct kvm_vcpu_aia aia_context; + /* Cache pages needed to program page tables with spinlock held */ struct kvm_mmu_memory_cache mmu_page_cache; - /* VCPU power-off state */ - bool power_off; + /* VCPU power state */ + struct kvm_mp_state mp_state; + spinlock_t mp_state_lock; /* Don't run the VCPU (blocked) */ bool pause; + + /* Performance monitoring context */ + struct kvm_pmu pmu_context; + + /* Firmware feature SBI extension context */ + struct kvm_sbi_fwft fwft_context; + + /* 'static' configurations which are set only once */ + struct kvm_vcpu_config cfg; + + /* SBI steal-time accounting */ + struct { + gpa_t shmem; + u64 last_steal; + } sta; }; -static inline void kvm_arch_hardware_unsetup(void) {} -static inline void kvm_arch_sync_events(struct kvm *kvm) {} -static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} - -#define KVM_ARCH_WANT_MMU_NOTIFIER - -#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12 - -void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, - gpa_t gpa, gpa_t gpsz, - unsigned long order); -void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid); -void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, - unsigned long order); -void kvm_riscv_local_hfence_gvma_all(void); -void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, - unsigned long asid, - unsigned long gva, - unsigned long gvsz, - unsigned long order); -void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid, - unsigned long asid); -void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, - unsigned long gva, unsigned long gvsz, - unsigned long order); -void kvm_riscv_local_hfence_vvma_all(unsigned long vmid); - -void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu); - -void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu); -void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu); -void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu); -void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu); - -void kvm_riscv_fence_i(struct kvm *kvm, - unsigned long hbase, unsigned long hmask); -void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm, - unsigned long hbase, unsigned long hmask, - gpa_t gpa, gpa_t gpsz, - unsigned long order); -void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm, - unsigned long hbase, unsigned long hmask); -void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm, - unsigned long hbase, unsigned long hmask, - unsigned long gva, unsigned long gvsz, - unsigned long order, unsigned long asid); -void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm, - unsigned long hbase, unsigned long hmask, - unsigned long asid); -void kvm_riscv_hfence_vvma_gva(struct kvm *kvm, - unsigned long hbase, unsigned long hmask, - unsigned long gva, unsigned long gvsz, - unsigned long order); -void kvm_riscv_hfence_vvma_all(struct kvm *kvm, - unsigned long hbase, unsigned long hmask); - -int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, - phys_addr_t hpa, unsigned long size, - bool writable, bool in_atomic); -void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, - unsigned long size); -int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, - struct kvm_memory_slot *memslot, - gpa_t gpa, unsigned long hva, bool is_write); -int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm); -void kvm_riscv_gstage_free_pgd(struct kvm *kvm); -void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu); -void kvm_riscv_gstage_mode_detect(void); -unsigned long kvm_riscv_gstage_mode(void); -int kvm_riscv_gstage_gpa_bits(void); - -void kvm_riscv_gstage_vmid_detect(void); -unsigned long kvm_riscv_gstage_vmid_bits(void); -int kvm_riscv_gstage_vmid_init(struct kvm *kvm); -bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid); -void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu); +/* + * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event, + * arrived in guest context. For riscv, any event that arrives while a vCPU is + * loaded is considered to be "in guest". + */ +static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) +{ + return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; +} + +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} + +int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines); void __kvm_riscv_unpriv_trap(void); @@ -320,12 +308,29 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch); +void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu); +unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu, + u64 __user *uindices); +int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); +int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); + int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu); -bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask); +bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask); +void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); +void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); +bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu); + +void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu); + +/* Flags representing implementation specific details */ +DECLARE_STATIC_KEY_FALSE(kvm_riscv_vsstage_tlb_no_gpa); #endif /* __RISCV_KVM_HOST_H__ */ |
