diff options
Diffstat (limited to 'arch/riscv')
27 files changed, 1013 insertions, 132 deletions
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index faf2c2177094..82153960ac00 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -80,6 +80,9 @@ ifeq ($(CONFIG_PERF_EVENTS),y) KBUILD_CFLAGS += -fno-omit-frame-pointer endif +# Avoid generating .eh_frame sections. +KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables + KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax) diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi index 43bed6c0a84f..5235fd1c9cb6 100644 --- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi +++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi @@ -328,7 +328,7 @@ bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x60080000 0x0 0x60080000 0x0 0x10000>, /* I/O */ <0x82000000 0x0 0x60090000 0x0 0x60090000 0x0 0xff70000>, /* mem */ - <0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x1000000>, /* mem */ + <0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x10000000>, /* mem */ <0xc3000000 0x20 0x00000000 0x20 0x00000000 0x20 0x00000000>; /* mem prefetchable */ num-lanes = <0x8>; interrupts = <56>, <57>, <58>, <59>, <60>, <61>, <62>, <63>, <64>; diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h index 7226e2462584..2c0f4c887289 100644 --- a/arch/riscv/include/asm/alternative-macros.h +++ b/arch/riscv/include/asm/alternative-macros.h @@ -46,7 +46,7 @@ .macro ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \ new_c_2, vendor_id_2, errata_id_2, enable_2 - ALTERNATIVE_CFG \old_c, \new_c_1, \vendor_id_1, \errata_id_1, \enable_1 + ALTERNATIVE_CFG "\old_c", "\new_c_1", \vendor_id_1, \errata_id_1, \enable_1 ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2 .endm diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 86328e3acb02..64ad1937e714 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -70,7 +70,6 @@ static_assert(RISCV_ISA_EXT_ID_MAX <= RISCV_ISA_EXT_MAX); */ enum riscv_isa_ext_key { RISCV_ISA_EXT_KEY_FPU, /* For 'F' and 'D' */ - RISCV_ISA_EXT_KEY_ZIHINTPAUSE, RISCV_ISA_EXT_KEY_SVINVAL, RISCV_ISA_EXT_KEY_MAX, }; @@ -91,8 +90,6 @@ static __always_inline int riscv_isa_ext2key(int num) return RISCV_ISA_EXT_KEY_FPU; case RISCV_ISA_EXT_d: return RISCV_ISA_EXT_KEY_FPU; - case RISCV_ISA_EXT_ZIHINTPAUSE: - return RISCV_ISA_EXT_KEY_ZIHINTPAUSE; case RISCV_ISA_EXT_SVINVAL: return RISCV_ISA_EXT_KEY_SVINVAL; default: diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index cf24da85b13c..cc7da66ee0c0 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -18,6 +18,7 @@ #include <asm/kvm_vcpu_insn.h> #include <asm/kvm_vcpu_sbi.h> #include <asm/kvm_vcpu_timer.h> +#include <asm/kvm_vcpu_pmu.h> #define KVM_MAX_VCPUS 1024 @@ -228,6 +229,9 @@ struct kvm_vcpu_arch { /* Don't run the VCPU (blocked) */ bool pause; + + /* Performance monitoring context */ + struct kvm_pmu pmu_context; }; static inline void kvm_arch_sync_events(struct kvm *kvm) {} diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h new file mode 100644 index 000000000000..395518a1664e --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023 Rivos Inc + * + * Authors: + * Atish Patra <atishp@rivosinc.com> + */ + +#ifndef __KVM_VCPU_RISCV_PMU_H +#define __KVM_VCPU_RISCV_PMU_H + +#include <linux/perf/riscv_pmu.h> +#include <asm/sbi.h> + +#ifdef CONFIG_RISCV_PMU_SBI +#define RISCV_KVM_MAX_FW_CTRS 32 +#define RISCV_KVM_MAX_HW_CTRS 32 +#define RISCV_KVM_MAX_COUNTERS (RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS) +static_assert(RISCV_KVM_MAX_COUNTERS <= 64); + +struct kvm_fw_event { + /* Current value of the event */ + unsigned long value; + + /* Event monitoring status */ + bool started; +}; + +/* Per virtual pmu counter data */ +struct kvm_pmc { + u8 idx; + struct perf_event *perf_event; + u64 counter_val; + union sbi_pmu_ctr_info cinfo; + /* Event monitoring status */ + bool started; + /* Monitoring event ID */ + unsigned long event_idx; +}; + +/* PMU data structure per vcpu */ +struct kvm_pmu { + struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS]; + struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS]; + /* Number of the virtual firmware counters available */ + int num_fw_ctrs; + /* Number of the virtual hardware counters available */ + int num_hw_ctrs; + /* A flag to indicate that pmu initialization is done */ + bool init_done; + /* Bit map of all the virtual counter used */ + DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS); +}; + +#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context) +#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu_context)) + +#if defined(CONFIG_32BIT) +#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ +{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ +{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, +#else +#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ +{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, +#endif + +int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid); +int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num, + unsigned long *val, unsigned long new_val, + unsigned long wr_mask); + +int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata); +int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx, + struct kvm_vcpu_sbi_return *retdata); +int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base, + unsigned long ctr_mask, unsigned long flags, u64 ival, + struct kvm_vcpu_sbi_return *retdata); +int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, + unsigned long ctr_mask, unsigned long flags, + struct kvm_vcpu_sbi_return *retdata); +int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base, + unsigned long ctr_mask, unsigned long flags, + unsigned long eidx, u64 evtdata, + struct kvm_vcpu_sbi_return *retdata); +int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, + struct kvm_vcpu_sbi_return *retdata); +void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu); + +#else +struct kvm_pmu { +}; + +#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ +{.base = 0, .count = 0, .func = NULL }, + +static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {} +static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid) +{ + return 0; +} + +static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {} +static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {} +#endif /* CONFIG_RISCV_PMU_SBI */ +#endif /* !__KVM_VCPU_RISCV_PMU_H */ diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h index f79478a85d2d..8425556af7d1 100644 --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -18,6 +18,13 @@ struct kvm_vcpu_sbi_context { int return_handled; }; +struct kvm_vcpu_sbi_return { + unsigned long out_val; + unsigned long err_val; + struct kvm_cpu_trap *utrap; + bool uexit; +}; + struct kvm_vcpu_sbi_extension { unsigned long extid_start; unsigned long extid_end; @@ -27,8 +34,10 @@ struct kvm_vcpu_sbi_extension { * specific error codes. */ int (*handler)(struct kvm_vcpu *vcpu, struct kvm_run *run, - unsigned long *out_val, struct kvm_cpu_trap *utrap, - bool *exit); + struct kvm_vcpu_sbi_return *retdata); + + /* Extension specific probe function */ + unsigned long (*probe)(struct kvm_vcpu *vcpu); }; void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run); diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 4ca7fbacff42..945b7be249c1 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -169,9 +169,9 @@ enum sbi_pmu_fw_generic_events_t { SBI_PMU_FW_ILLEGAL_INSN = 4, SBI_PMU_FW_SET_TIMER = 5, SBI_PMU_FW_IPI_SENT = 6, - SBI_PMU_FW_IPI_RECVD = 7, + SBI_PMU_FW_IPI_RCVD = 7, SBI_PMU_FW_FENCE_I_SENT = 8, - SBI_PMU_FW_FENCE_I_RECVD = 9, + SBI_PMU_FW_FENCE_I_RCVD = 9, SBI_PMU_FW_SFENCE_VMA_SENT = 10, SBI_PMU_FW_SFENCE_VMA_RCVD = 11, SBI_PMU_FW_SFENCE_VMA_ASID_SENT = 12, @@ -215,6 +215,9 @@ enum sbi_pmu_ctr_type { #define SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK 0x06 #define SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK 0x01 +#define SBI_PMU_EVENT_CACHE_ID_SHIFT 3 +#define SBI_PMU_EVENT_CACHE_OP_SHIFT 1 + #define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF /* Flags defined for config matching function */ diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h index fa70cfe507aa..14f5d27783b8 100644 --- a/arch/riscv/include/asm/vdso/processor.h +++ b/arch/riscv/include/asm/vdso/processor.h @@ -4,30 +4,26 @@ #ifndef __ASSEMBLY__ -#include <linux/jump_label.h> #include <asm/barrier.h> -#include <asm/hwcap.h> static inline void cpu_relax(void) { - if (!static_branch_likely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_ZIHINTPAUSE])) { #ifdef __riscv_muldiv - int dummy; - /* In lieu of a halt instruction, induce a long-latency stall. */ - __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); + int dummy; + /* In lieu of a halt instruction, induce a long-latency stall. */ + __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); #endif - } else { - /* - * Reduce instruction retirement. - * This assumes the PC changes. - */ -#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE - __asm__ __volatile__ ("pause"); + +#ifdef __riscv_zihintpause + /* + * Reduce instruction retirement. + * This assumes the PC changes. + */ + __asm__ __volatile__ ("pause"); #else - /* Encoding of the pause instruction */ - __asm__ __volatile__ (".4byte 0x100000F"); + /* Encoding of the pause instruction */ + __asm__ __volatile__ (".4byte 0x100000F"); #endif - } barrier(); } diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index b865046e4dbb..4bf6c449d78b 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -326,7 +326,7 @@ clear_bss_done: call soc_early_init tail start_kernel -#if CONFIG_RISCV_BOOT_SPINWAIT +#ifdef CONFIG_RISCV_BOOT_SPINWAIT .Lsecondary_start: /* Set trap vector to spin forever to help debug */ la a3, .Lsecondary_park diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index f21592d20306..41c7481afde3 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -48,6 +48,21 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) post_kprobe_handler(p, kcb, regs); } +static bool __kprobes arch_check_kprobe(struct kprobe *p) +{ + unsigned long tmp = (unsigned long)p->addr - p->offset; + unsigned long addr = (unsigned long)p->addr; + + while (tmp <= addr) { + if (tmp == addr) + return true; + + tmp += GET_INSN_LENGTH(*(u16 *)tmp); + } + + return false; +} + int __kprobes arch_prepare_kprobe(struct kprobe *p) { unsigned long probe_addr = (unsigned long)p->addr; @@ -55,6 +70,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) if (probe_addr & 0x1) return -EILSEQ; + if (!arch_check_kprobe(p)) + return -EILSEQ; + /* copy instruction */ p->opcode = *p->addr; diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c index d73e96f6ed7c..a20568bd1f1a 100644 --- a/arch/riscv/kernel/probes/simulate-insn.c +++ b/arch/riscv/kernel/probes/simulate-insn.c @@ -71,11 +71,11 @@ bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *reg u32 rd_index = (opcode >> 7) & 0x1f; u32 rs1_index = (opcode >> 15) & 0x1f; - ret = rv_insn_reg_set_val(regs, rd_index, addr + 4); + ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr); if (!ret) return ret; - ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr); + ret = rv_insn_reg_set_val(regs, rd_index, addr + 4); if (!ret) return ret; diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 3373df413c88..ddb2afba6d25 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -39,7 +39,6 @@ static DECLARE_COMPLETION(cpu_running); void __init smp_prepare_boot_cpu(void) { - init_cpu_topology(); } void __init smp_prepare_cpus(unsigned int max_cpus) @@ -48,6 +47,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) int ret; unsigned int curr_cpuid; + init_cpu_topology(); + curr_cpuid = smp_processor_id(); store_cpu_topology(curr_cpuid); numa_store_cpu_info(curr_cpuid); diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 019df9208bdd..278e97c06e0a 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -25,3 +25,4 @@ kvm-y += vcpu_sbi_base.o kvm-y += vcpu_sbi_replace.o kvm-y += vcpu_sbi_hsm.o kvm-y += vcpu_timer.o +kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_pmu.o vcpu_sbi_pmu.o diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index e2da56ed9069..41ad7639a17b 100644 --- a/arch/riscv/kvm/main.c +++ b/arch/riscv/kvm/main.c @@ -39,7 +39,8 @@ int kvm_arch_hardware_enable(void) hideleg |= (1UL << IRQ_VS_EXT); csr_write(CSR_HIDELEG, hideleg); - csr_write(CSR_HCOUNTEREN, -1UL); + /* VS should access only the time counter directly. Everything else should trap */ + csr_write(CSR_HCOUNTEREN, 0x02); csr_write(CSR_HVIP, 0); diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index 66ef19676fe4..78211aed36fa 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -585,7 +585,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) if (!kvm->arch.pgd) return false; - WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE); + WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT, &ptep, &ptep_level)) @@ -603,7 +603,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) if (!kvm->arch.pgd) return false; - WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE); + WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT, &ptep, &ptep_level)) @@ -645,12 +645,12 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, if (logging || (vma->vm_flags & VM_PFNMAP)) vma_pagesize = PAGE_SIZE; - if (vma_pagesize == PMD_SIZE || vma_pagesize == PGDIR_SIZE) + if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; mmap_read_unlock(current->mm); - if (vma_pagesize != PGDIR_SIZE && + if (vma_pagesize != PUD_SIZE && vma_pagesize != PMD_SIZE && vma_pagesize != PAGE_SIZE) { kvm_err("Invalid VMA page size 0x%lx\n", vma_pagesize); diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c index 309d79b3e5cd..b797f7cc0824 100644 --- a/arch/riscv/kvm/tlb.c +++ b/arch/riscv/kvm/tlb.c @@ -181,6 +181,7 @@ void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu) void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu) { + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD); local_flush_icache_all(); } @@ -264,15 +265,18 @@ void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu) d.addr, d.size, d.order); break; case KVM_RISCV_HFENCE_VVMA_ASID_GVA: + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD); kvm_riscv_local_hfence_vvma_asid_gva( READ_ONCE(v->vmid), d.asid, d.addr, d.size, d.order); break; case KVM_RISCV_HFENCE_VVMA_ASID_ALL: + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD); kvm_riscv_local_hfence_vvma_asid_all( READ_ONCE(v->vmid), d.asid); break; case KVM_RISCV_HFENCE_VVMA_GVA: + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD); kvm_riscv_local_hfence_vvma_gva( READ_ONCE(v->vmid), d.addr, d.size, d.order); diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 7c08567097f0..7d010b0be54e 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -138,6 +138,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) WRITE_ONCE(vcpu->arch.irqs_pending, 0); WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0); + kvm_riscv_vcpu_pmu_reset(vcpu); + vcpu->arch.hfence_head = 0; vcpu->arch.hfence_tail = 0; memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue)); @@ -194,6 +196,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* Setup VCPU timer */ kvm_riscv_vcpu_timer_init(vcpu); + /* setup performance monitoring */ + kvm_riscv_vcpu_pmu_init(vcpu); + /* Reset VCPU */ kvm_riscv_reset_vcpu(vcpu); @@ -216,6 +221,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) /* Cleanup VCPU timer */ kvm_riscv_vcpu_timer_deinit(vcpu); + kvm_riscv_vcpu_pmu_deinit(vcpu); + /* Free unused pages pre-allocated for G-stage page table mappings */ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); } diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c index c9f741ab26f5..4ea101a73d8b 100644 --- a/arch/riscv/kvm/vcpu_exit.c +++ b/arch/riscv/kvm/vcpu_exit.c @@ -160,6 +160,9 @@ void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu, /* Set Guest PC to Guest exception vector */ vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC); + + /* Set Guest privilege mode to supervisor */ + vcpu->arch.guest_context.sstatus |= SR_SPP; } /* @@ -179,6 +182,12 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, ret = -EFAULT; run->exit_reason = KVM_EXIT_UNKNOWN; switch (trap->scause) { + case EXC_INST_ILLEGAL: + if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) { + kvm_riscv_vcpu_trap_redirect(vcpu, trap); + ret = 1; + } + break; case EXC_VIRTUAL_INST_FAULT: if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) ret = kvm_riscv_vcpu_virtual_insn(vcpu, run, trap); diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c index 0bb52761a3f7..f689337b78ff 100644 --- a/arch/riscv/kvm/vcpu_insn.c +++ b/arch/riscv/kvm/vcpu_insn.c @@ -213,7 +213,9 @@ struct csr_func { unsigned long wr_mask); }; -static const struct csr_func csr_funcs[] = { }; +static const struct csr_func csr_funcs[] = { + KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS +}; /** * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c new file mode 100644 index 000000000000..86391a5061dd --- /dev/null +++ b/arch/riscv/kvm/vcpu_pmu.c @@ -0,0 +1,633 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 Rivos Inc + * + * Authors: + * Atish Patra <atishp@rivosinc.com> + */ + +#define pr_fmt(fmt) "riscv-kvm-pmu: " fmt +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/kvm_host.h> +#include <linux/perf/riscv_pmu.h> +#include <asm/csr.h> +#include <asm/kvm_vcpu_sbi.h> +#include <asm/kvm_vcpu_pmu.h> +#include <linux/bitops.h> + +#define kvm_pmu_num_counters(pmu) ((pmu)->num_hw_ctrs + (pmu)->num_fw_ctrs) +#define get_event_type(x) (((x) & SBI_PMU_EVENT_IDX_TYPE_MASK) >> 16) +#define get_event_code(x) ((x) & SBI_PMU_EVENT_IDX_CODE_MASK) + +static enum perf_hw_id hw_event_perf_map[SBI_PMU_HW_GENERAL_MAX] = { + [SBI_PMU_HW_CPU_CYCLES] = PERF_COUNT_HW_CPU_CYCLES, + [SBI_PMU_HW_INSTRUCTIONS] = PERF_COUNT_HW_INSTRUCTIONS, + [SBI_PMU_HW_CACHE_REFERENCES] = PERF_COUNT_HW_CACHE_REFERENCES, + [SBI_PMU_HW_CACHE_MISSES] = PERF_COUNT_HW_CACHE_MISSES, + [SBI_PMU_HW_BRANCH_INSTRUCTIONS] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS, + [SBI_PMU_HW_BRANCH_MISSES] = PERF_COUNT_HW_BRANCH_MISSES, + [SBI_PMU_HW_BUS_CYCLES] = PERF_COUNT_HW_BUS_CYCLES, + [SBI_PMU_HW_STALLED_CYCLES_FRONTEND] = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND, + [SBI_PMU_HW_STALLED_CYCLES_BACKEND] = PERF_COUNT_HW_STALLED_CYCLES_BACKEND, + [SBI_PMU_HW_REF_CPU_CYCLES] = PERF_COUNT_HW_REF_CPU_CYCLES, +}; + +static u64 kvm_pmu_get_sample_period(struct kvm_pmc *pmc) +{ + u64 counter_val_mask = GENMASK(pmc->cinfo.width, 0); + u64 sample_period; + + if (!pmc->counter_val) + sample_period = counter_val_mask + 1; + else + sample_period = (-pmc->counter_val) & counter_val_mask; + + return sample_period; +} + +static u32 kvm_pmu_get_perf_event_type(unsigned long eidx) +{ + enum sbi_pmu_event_type etype = get_event_type(eidx); + u32 type = PERF_TYPE_MAX; + + switch (etype) { + case SBI_PMU_EVENT_TYPE_HW: + type = PERF_TYPE_HARDWARE; + break; + case SBI_PMU_EVENT_TYPE_CACHE: + type = PERF_TYPE_HW_CACHE; + break; + case SBI_PMU_EVENT_TYPE_RAW: + case SBI_PMU_EVENT_TYPE_FW: + type = PERF_TYPE_RAW; + break; + default: + break; + } + + return type; +} + +static bool kvm_pmu_is_fw_event(unsigned long eidx) +{ + return get_event_type(eidx) == SBI_PMU_EVENT_TYPE_FW; +} + +static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) +{ + if (pmc->perf_event) { + perf_event_disable(pmc->perf_event); + perf_event_release_kernel(pmc->perf_event); + pmc->perf_event = NULL; + } +} + +static u64 kvm_pmu_get_perf_event_hw_config(u32 sbi_event_code) +{ + return hw_event_perf_map[sbi_event_code]; +} + +static u64 kvm_pmu_get_perf_event_cache_config(u32 sbi_event_code) +{ + u64 config = U64_MAX; + unsigned int cache_type, cache_op, cache_result; + + /* All the cache event masks lie within 0xFF. No separate masking is necessary */ + cache_type = (sbi_event_code & SBI_PMU_EVENT_CACHE_ID_CODE_MASK) >> + SBI_PMU_EVENT_CACHE_ID_SHIFT; + cache_op = (sbi_event_code & SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK) >> + SBI_PMU_EVENT_CACHE_OP_SHIFT; + cache_result = sbi_event_code & SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK; + + if (cache_type >= PERF_COUNT_HW_CACHE_MAX || + cache_op >= PERF_COUNT_HW_CACHE_OP_MAX || + cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return config; + + config = cache_type | (cache_op << 8) | (cache_result << 16); + + return config; +} + +static u64 kvm_pmu_get_perf_event_config(unsigned long eidx, uint64_t evt_data) +{ + enum sbi_pmu_event_type etype = get_event_type(eidx); + u32 ecode = get_event_code(eidx); + u64 config = U64_MAX; + + switch (etype) { + case SBI_PMU_EVENT_TYPE_HW: + if (ecode < SBI_PMU_HW_GENERAL_MAX) + config = kvm_pmu_get_perf_event_hw_config(ecode); + break; + case SBI_PMU_EVENT_TYPE_CACHE: + config = kvm_pmu_get_perf_event_cache_config(ecode); + break; + case SBI_PMU_EVENT_TYPE_RAW: + config = evt_data & RISCV_PMU_RAW_EVENT_MASK; + break; + case SBI_PMU_EVENT_TYPE_FW: + if (ecode < SBI_PMU_FW_MAX) + config = (1ULL << 63) | ecode; + break; + default: + break; + } + + return config; +} + +static int kvm_pmu_get_fixed_pmc_index(unsigned long eidx) +{ + u32 etype = kvm_pmu_get_perf_event_type(eidx); + u32 ecode = get_event_code(eidx); + + if (etype != SBI_PMU_EVENT_TYPE_HW) + return -EINVAL; + + if (ecode == SBI_PMU_HW_CPU_CYCLES) + return 0; + else if (ecode == SBI_PMU_HW_INSTRUCTIONS) + return 2; + else + return -EINVAL; +} + +static int kvm_pmu_get_programmable_pmc_index(struct kvm_pmu *kvpmu, unsigned long eidx, + unsigned long cbase, unsigned long cmask) +{ + int ctr_idx = -1; + int i, pmc_idx; + int min, max; + + if (kvm_pmu_is_fw_event(eidx)) { + /* Firmware counters are mapped 1:1 starting from num_hw_ctrs for simplicity */ + min = kvpmu->num_hw_ctrs; + max = min + kvpmu->num_fw_ctrs; + } else { + /* First 3 counters are reserved for fixed counters */ + min = 3; + max = kvpmu->num_hw_ctrs; + } + + for_each_set_bit(i, &cmask, BITS_PER_LONG) { + pmc_idx = i + cbase; + if ((pmc_idx >= min && pmc_idx < max) && + !test_bit(pmc_idx, kvpmu->pmc_in_use)) { + ctr_idx = pmc_idx; + break; + } + } + + return ctr_idx; +} + +static int pmu_get_pmc_index(struct kvm_pmu *pmu, unsigned long eidx, + unsigned long cbase, unsigned long cmask) +{ + int ret; + + /* Fixed counters need to be have fixed mapping as they have different width */ + ret = kvm_pmu_get_fixed_pmc_index(eidx); + if (ret >= 0) + return ret; + + return kvm_pmu_get_programmable_pmc_index(pmu, eidx, cbase, cmask); +} + +static int pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, + unsigned long *out_val) +{ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + struct kvm_pmc *pmc; + u64 enabled, running; + int fevent_code; + + pmc = &kvpmu->pmc[cidx]; + + if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { + fevent_code = get_event_code(pmc->event_idx); + pmc->counter_val = kvpmu->fw_event[fevent_code].value; + } else if (pmc->perf_event) { + pmc->counter_val += perf_event_read_value(pmc->perf_event, &enabled, &running); + } else { + return -EINVAL; + } + *out_val = pmc->counter_val; + + return 0; +} + +static int kvm_pmu_validate_counter_mask(struct kvm_pmu *kvpmu, unsigned long ctr_base, + unsigned long ctr_mask) +{ + /* Make sure the we have a valid counter mask requested from the caller */ + if (!ctr_mask || (ctr_base + __fls(ctr_mask) >= kvm_pmu_num_counters(kvpmu))) + return -EINVAL; + + return 0; +} + +static int kvm_pmu_create_perf_event(struct kvm_pmc *pmc, struct perf_event_attr *attr, + unsigned long flags, unsigned long eidx, unsigned long evtdata) +{ + struct perf_event *event; + + kvm_pmu_release_perf_event(pmc); + attr->config = kvm_pmu_get_perf_event_config(eidx, evtdata); + if (flags & SBI_PMU_CFG_FLAG_CLEAR_VALUE) { + //TODO: Do we really want to clear the value in hardware counter + pmc->counter_val = 0; + } + + /* + * Set the default sample_period for now. The guest specified value + * will be updated in the start call. + */ + attr->sample_period = kvm_pmu_get_sample_period(pmc); + + event = perf_event_create_kernel_counter(attr, -1, current, NULL, pmc); + if (IS_ERR(event)) { + pr_err("kvm pmu event creation failed for eidx %lx: %ld\n", eidx, PTR_ERR(event)); + return PTR_ERR(event); + } + + pmc->perf_event = event; + if (flags & SBI_PMU_CFG_FLAG_AUTO_START) + perf_event_enable(pmc->perf_event); + + return 0; +} + +int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid) +{ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + struct kvm_fw_event *fevent; + + if (!kvpmu || fid >= SBI_PMU_FW_MAX) + return -EINVAL; + + fevent = &kvpmu->fw_event[fid]; + if (fevent->started) + fevent->value++; + + return 0; +} + +int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num, + unsigned long *val, unsigned long new_val, + unsigned long wr_mask) +{ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + int cidx, ret = KVM_INSN_CONTINUE_NEXT_SEPC; + + if (!kvpmu || !kvpmu->init_done) { + /* + * In absence of sscofpmf in the platform, the guest OS may use + * the legacy PMU driver to read cycle/instret. In that case, + * just return 0 to avoid any illegal trap. However, any other + * hpmcounter access should result in illegal trap as they must + * be access through SBI PMU only. + */ + if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) { + *val = 0; + return ret; + } else { + return KVM_INSN_ILLEGAL_TRAP; + } + } + + /* The counter CSR are read only. Thus, any write should result in illegal traps */ + if (wr_mask) + return KVM_INSN_ILLEGAL_TRAP; + + cidx = csr_num - CSR_CYCLE; + + if (pmu_ctr_read(vcpu, cidx, val) < 0) + return KVM_INSN_ILLEGAL_TRAP; + + return ret; +} + +int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, + struct kvm_vcpu_sbi_return *retdata) +{ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + + retdata->out_val = kvm_pmu_num_counters(kvpmu); + + return 0; +} + +int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx, + struct kvm_vcpu_sbi_return *retdata) +{ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + + if (cidx > RISCV_KVM_MAX_COUNTERS || cidx == 1) { + retdata->err_val = SBI_ERR_INVALID_PARAM; + return 0; + } + + retdata->out_val = kvpmu->pmc[cidx].cinfo.value; + + return 0; +} + +int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base, + unsigned long ctr_mask, unsigned long flags, u64 ival, + struct kvm_vcpu_sbi_return *retdata) +{ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + int i, pmc_index, sbiret = 0; + struct kvm_pmc *pmc; + int fevent_code; + + if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) { + sbiret = SBI_ERR_INVALID_PARAM; + goto out; + } + + /* Start the counters that have been configured and requested by the guest */ + for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { + pmc_index = i + ctr_base; + if (!test_bit(pmc_index, kvpmu->pmc_in_use)) + continue; + pmc = &kvpmu->pmc[pmc_index]; + if (flags & SBI_PMU_START_FLAG_SET_INIT_VALUE) + pmc->counter_val = ival; + if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { + fevent_code = get_event_code(pmc->event_idx); + if (fevent_code >= SBI_PMU_FW_MAX) { + sbiret = SBI_ERR_INVALID_PARAM; + goto out; + } + + /* Check if the counter was already started for some reason */ + if (kvpmu->fw_event[fevent_code].started) { + sbiret = SBI_ERR_ALREADY_STARTED; + continue; + } + + kvpmu->fw_event[fevent_code].started = true; + kvpmu->fw_event[fevent_code].value = pmc->counter_val; + } else if (pmc->perf_event) { + if (unlikely(pmc->started)) { + sbiret = SBI_ERR_ALREADY_STARTED; + continue; + } + perf_event_period(pmc->perf_event, kvm_pmu_get_sample_period(pmc)); + perf_event_enable(pmc->perf_event); + pmc->started = true; + } else { + sbiret = SBI_ERR_INVALID_PARAM; + } + } + +out: + retdata->err_val = sbiret; + + return 0; +} + +int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, + unsigned long ctr_mask, unsigned long flags, + struct kvm_vcpu_sbi_return *retdata) +{ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + int i, pmc_index, sbiret = 0; + u64 enabled, running; + struct kvm_pmc *pmc; + int fevent_code; + + if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) { + sbiret = SBI_ERR_INVALID_PARAM; + goto out; + } + + /* Stop the counters that have been configured and requested by the guest */ + for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { + pmc_index = i + ctr_base; + if (!test_bit(pmc_index, kvpmu->pmc_in_use)) + continue; + pmc = &kvpmu->pmc[pmc_index]; + if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { + fevent_code = get_event_code(pmc->event_idx); + if (fevent_code >= SBI_PMU_FW_MAX) { + sbiret = SBI_ERR_INVALID_PARAM; + goto out; + } + + if (!kvpmu->fw_event[fevent_code].started) + sbiret = SBI_ERR_ALREADY_STOPPED; + + kvpmu->fw_event[fevent_code].started = false; + } else if (pmc->perf_event) { + if (pmc->started) { + /* Stop counting the counter */ + perf_event_disable(pmc->perf_event); + pmc->started = false; + } else { + sbiret = SBI_ERR_ALREADY_STOPPED; + } + + if (flags & SBI_PMU_STOP_FLAG_RESET) { + /* Relase the counter if this is a reset request */ + pmc->counter_val += perf_event_read_value(pmc->perf_event, + &enabled, &running); + kvm_pmu_release_perf_event(pmc); + } + } else { + sbiret = SBI_ERR_INVALID_PARAM; + } + if (flags & SBI_PMU_STOP_FLAG_RESET) { + pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; + clear_bit(pmc_index, kvpmu->pmc_in_use); + } + } + +out: + retdata->err_val = sbiret; + + return 0; +} + +int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base, + unsigned long ctr_mask, unsigned long flags, + unsigned long eidx, u64 evtdata, + struct kvm_vcpu_sbi_return *retdata) +{ + int ctr_idx, ret, sbiret = 0; + bool is_fevent; + unsigned long event_code; + u32 etype = kvm_pmu_get_perf_event_type(eidx); + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + struct kvm_pmc *pmc = NULL; + struct perf_event_attr attr = { + .type = etype, + .size = sizeof(struct perf_event_attr), + .pinned = true, + /* + * It should never reach here if the platform doesn't support the sscofpmf + * extension as mode filtering won't work without it. + */ + .exclude_host = true, + .exclude_hv = true, + .exclude_user = !!(flags & SBI_PMU_CFG_FLAG_SET_UINH), + .exclude_kernel = !!(flags & SBI_PMU_CFG_FLAG_SET_SINH), + .config1 = RISCV_PMU_CONFIG1_GUEST_EVENTS, + }; + + if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) { + sbiret = SBI_ERR_INVALID_PARAM; + goto out; + } + + event_code = get_event_code(eidx); + is_fevent = kvm_pmu_is_fw_event(eidx); + if (is_fevent && event_code >= SBI_PMU_FW_MAX) { + sbiret = SBI_ERR_NOT_SUPPORTED; + goto out; + } + + /* + * SKIP_MATCH flag indicates the caller is aware of the assigned counter + * for this event. Just do a sanity check if it already marked used. + */ + if (flags & SBI_PMU_CFG_FLAG_SKIP_MATCH) { + if (!test_bit(ctr_base + __ffs(ctr_mask), kvpmu->pmc_in_use)) { + sbiret = SBI_ERR_FAILURE; + goto out; + } + ctr_idx = ctr_base + __ffs(ctr_mask); + } else { + ctr_idx = pmu_get_pmc_index(kvpmu, eidx, ctr_base, ctr_mask); + if (ctr_idx < 0) { + sbiret = SBI_ERR_NOT_SUPPORTED; + goto out; + } + } + + pmc = &kvpmu->pmc[ctr_idx]; + pmc->idx = ctr_idx; + + if (is_fevent) { + if (flags & SBI_PMU_CFG_FLAG_AUTO_START) + kvpmu->fw_event[event_code].started = true; + } else { + ret = kvm_pmu_create_perf_event(pmc, &attr, flags, eidx, evtdata); + if (ret) + return ret; + } + + set_bit(ctr_idx, kvpmu->pmc_in_use); + pmc->event_idx = eidx; + retdata->out_val = ctr_idx; +out: + retdata->err_val = sbiret; + + return 0; +} + +int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, + struct kvm_vcpu_sbi_return *retdata) +{ + int ret; + + ret = pmu_ctr_read(vcpu, cidx, &retdata->out_val); + if (ret == -EINVAL) + retdata->err_val = SBI_ERR_INVALID_PARAM; + + return 0; +} + +void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) +{ + int i = 0, ret, num_hw_ctrs = 0, hpm_width = 0; + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + struct kvm_pmc *pmc; + + /* + * PMU functionality should be only available to guests if privilege mode + * filtering is available in the host. Otherwise, guest will always count + * events while the execution is in hypervisor mode. + */ + if (!riscv_isa_extension_available(NULL, SSCOFPMF)) + return; + + ret = riscv_pmu_get_hpm_info(&hpm_width, &num_hw_ctrs); + if (ret < 0 || !hpm_width || !num_hw_ctrs) + return; + + /* + * Increase the number of hardware counters to offset the time counter. + */ + kvpmu->num_hw_ctrs = num_hw_ctrs + 1; + kvpmu->num_fw_ctrs = SBI_PMU_FW_MAX; + memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event)); + + if (kvpmu->num_hw_ctrs > RISCV_KVM_MAX_HW_CTRS) { + pr_warn_once("Limiting the hardware counters to 32 as specified by the ISA"); + kvpmu->num_hw_ctrs = RISCV_KVM_MAX_HW_CTRS; + } + + /* + * There is no correlation between the logical hardware counter and virtual counters. + * However, we need to encode a hpmcounter CSR in the counter info field so that + * KVM can trap n emulate the read. This works well in the migration use case as + * KVM doesn't care if the actual hpmcounter is available in the hardware or not. + */ + for (i = 0; i < kvm_pmu_num_counters(kvpmu); i++) { + /* TIME CSR shouldn't be read from perf interface */ + if (i == 1) + continue; + pmc = &kvpmu->pmc[i]; + pmc->idx = i; + pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; + if (i < kvpmu->num_hw_ctrs) { + pmc->cinfo.type = SBI_PMU_CTR_TYPE_HW; + if (i < 3) + /* CY, IR counters */ + pmc->cinfo.width = 63; + else + pmc->cinfo.width = hpm_width; + /* + * The CSR number doesn't have any relation with the logical + * hardware counters. The CSR numbers are encoded sequentially + * to avoid maintaining a map between the virtual counter + * and CSR number. + */ + pmc->cinfo.csr = CSR_CYCLE + i; + } else { + pmc->cinfo.type = SBI_PMU_CTR_TYPE_FW; + pmc->cinfo.width = BITS_PER_LONG - 1; + } + } + + kvpmu->init_done = true; +} + +void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) +{ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + struct kvm_pmc *pmc; + int i; + + if (!kvpmu) + return; + + for_each_set_bit(i, kvpmu->pmc_in_use, RISCV_MAX_COUNTERS) { + pmc = &kvpmu->pmc[i]; + pmc->counter_val = 0; + kvm_pmu_release_perf_event(pmc); + pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; + } + bitmap_zero(kvpmu->pmc_in_use, RISCV_MAX_COUNTERS); + memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event)); +} + +void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) +{ + kvm_riscv_vcpu_pmu_deinit(vcpu); +} diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index f96991d230bf..15fde15f9fb8 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -12,26 +12,6 @@ #include <asm/sbi.h> #include <asm/kvm_vcpu_sbi.h> -static int kvm_linux_err_map_sbi(int err) -{ - switch (err) { - case 0: - return SBI_SUCCESS; - case -EPERM: - return SBI_ERR_DENIED; - case -EINVAL: - return SBI_ERR_INVALID_PARAM; - case -EFAULT: - return SBI_ERR_INVALID_ADDRESS; - case -EOPNOTSUPP: - return SBI_ERR_NOT_SUPPORTED; - case -EALREADY: - return SBI_ERR_ALREADY_AVAILABLE; - default: - return SBI_ERR_FAILURE; - }; -} - #ifndef CONFIG_RISCV_SBI_V01 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { .extid_start = -1UL, @@ -40,6 +20,16 @@ static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { }; #endif +#ifdef CONFIG_RISCV_PMU_SBI +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu; +#else +static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = { + .extid_start = -1UL, + .extid_end = -1UL, + .handler = NULL, +}; +#endif + static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { &vcpu_sbi_ext_v01, &vcpu_sbi_ext_base, @@ -48,6 +38,7 @@ static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { &vcpu_sbi_ext_rfence, &vcpu_sbi_ext_srst, &vcpu_sbi_ext_hsm, + &vcpu_sbi_ext_pmu, &vcpu_sbi_ext_experimental, &vcpu_sbi_ext_vendor, }; @@ -125,11 +116,14 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) { int ret = 1; bool next_sepc = true; - bool userspace_exit = false; struct kvm_cpu_context *cp = &vcpu->arch.guest_context; const struct kvm_vcpu_sbi_extension *sbi_ext; - struct kvm_cpu_trap utrap = { 0 }; - unsigned long out_val = 0; + struct kvm_cpu_trap utrap = {0}; + struct kvm_vcpu_sbi_return sbi_ret = { + .out_val = 0, + .err_val = 0, + .utrap = &utrap, + }; bool ext_is_v01 = false; sbi_ext = kvm_vcpu_sbi_find_ext(cp->a7); @@ -139,42 +133,46 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) cp->a7 <= SBI_EXT_0_1_SHUTDOWN) ext_is_v01 = true; #endif - ret = sbi_ext->handler(vcpu, run, &out_val, &utrap, &userspace_exit); + ret = sbi_ext->handler(vcpu, run, &sbi_ret); } else { /* Return error for unsupported SBI calls */ cp->a0 = SBI_ERR_NOT_SUPPORTED; goto ecall_done; } + /* + * When the SBI extension returns a Linux error code, it exits the ioctl + * loop and forwards the error to userspace. + */ + if (ret < 0) { + next_sepc = false; + goto ecall_done; + } + /* Handle special error cases i.e trap, exit or userspace forward */ - if (utrap.scause) { + if (sbi_ret.utrap->scause) { /* No need to increment sepc or exit ioctl loop */ ret = 1; - utrap.sepc = cp->sepc; - kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); + sbi_ret.utrap->sepc = cp->sepc; + kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap); next_sepc = false; goto ecall_done; } /* Exit ioctl loop or Propagate the error code the guest */ - if (userspace_exit) { + if (sbi_ret.uexit) { next_sepc = false; ret = 0; } else { - /** - * SBI extension handler always returns an Linux error code. Convert - * it to the SBI specific error code that can be propagated the SBI - * caller. - */ - ret = kvm_linux_err_map_sbi(ret); - cp->a0 = ret; + cp->a0 = sbi_ret.err_val; ret = 1; } ecall_done: if (next_sepc) cp->sepc += 4; - if (!ext_is_v01) - cp->a1 = out_val; + /* a1 should only be updated when we continue the ioctl loop */ + if (!ext_is_v01 && ret == 1) + cp->a1 = sbi_ret.out_val; return ret; } diff --git a/arch/riscv/kvm/vcpu_sbi_base.c b/arch/riscv/kvm/vcpu_sbi_base.c index 5d65c634d301..9945aff34c14 100644 --- a/arch/riscv/kvm/vcpu_sbi_base.c +++ b/arch/riscv/kvm/vcpu_sbi_base.c @@ -14,11 +14,11 @@ #include <asm/kvm_vcpu_sbi.h> static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, - unsigned long *out_val, - struct kvm_cpu_trap *trap, bool *exit) + struct kvm_vcpu_sbi_return *retdata) { - int ret = 0; struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + const struct kvm_vcpu_sbi_extension *sbi_ext; + unsigned long *out_val = &retdata->out_val; switch (cp->a6) { case SBI_EXT_BASE_GET_SPEC_VERSION: @@ -42,9 +42,12 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, * forward it to the userspace */ kvm_riscv_vcpu_sbi_forward(vcpu, run); - *exit = true; - } else - *out_val = kvm_vcpu_sbi_find_ext(cp->a0) ? 1 : 0; + retdata->uexit = true; + } else { + sbi_ext = kvm_vcpu_sbi_find_ext(cp->a0); + *out_val = sbi_ext && sbi_ext->probe ? + sbi_ext->probe(vcpu) : !!sbi_ext; + } break; case SBI_EXT_BASE_GET_MVENDORID: *out_val = vcpu->arch.mvendorid; @@ -56,11 +59,11 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, *out_val = vcpu->arch.mimpid; break; default: - ret = -EOPNOTSUPP; + retdata->err_val = SBI_ERR_NOT_SUPPORTED; break; } - return ret; + return 0; } const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base = { @@ -70,17 +73,15 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base = { }; static int kvm_sbi_ext_forward_handler(struct kvm_vcpu *vcpu, - struct kvm_run *run, - unsigned long *out_val, - struct kvm_cpu_trap *utrap, - bool *exit) + struct kvm_run *run, + struct kvm_vcpu_sbi_return *retdata) { /* * Both SBI experimental and vendor extensions are * unconditionally forwarded to userspace. */ kvm_riscv_vcpu_sbi_forward(vcpu, run); - *exit = true; + retdata->uexit = true; return 0; } diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c index 2e915cafd551..7dca0e9381d9 100644 --- a/arch/riscv/kvm/vcpu_sbi_hsm.c +++ b/arch/riscv/kvm/vcpu_sbi_hsm.c @@ -21,9 +21,9 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu) target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid); if (!target_vcpu) - return -EINVAL; + return SBI_ERR_INVALID_PARAM; if (!target_vcpu->arch.power_off) - return -EALREADY; + return SBI_ERR_ALREADY_AVAILABLE; reset_cntx = &target_vcpu->arch.guest_reset_context; /* start address */ @@ -42,7 +42,7 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu) static int kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu *vcpu) { if (vcpu->arch.power_off) - return -EINVAL; + return SBI_ERR_FAILURE; kvm_riscv_vcpu_power_off(vcpu); @@ -57,7 +57,7 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu) target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid); if (!target_vcpu) - return -EINVAL; + return SBI_ERR_INVALID_PARAM; if (!target_vcpu->arch.power_off) return SBI_HSM_STATE_STARTED; else if (vcpu->stat.generic.blocking) @@ -67,9 +67,7 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu) } static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, - unsigned long *out_val, - struct kvm_cpu_trap *utrap, - bool *exit) + struct kvm_vcpu_sbi_return *retdata) { int ret = 0; struct kvm_cpu_context *cp = &vcpu->arch.guest_context; @@ -88,27 +86,29 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, case SBI_EXT_HSM_HART_STATUS: ret = kvm_sbi_hsm_vcpu_get_status(vcpu); if (ret >= 0) { - *out_val = ret; - ret = 0; + retdata->out_val = ret; + retdata->err_val = 0; } - break; + return 0; case SBI_EXT_HSM_HART_SUSPEND: switch (cp->a0) { case SBI_HSM_SUSPEND_RET_DEFAULT: kvm_riscv_vcpu_wfi(vcpu); break; case SBI_HSM_SUSPEND_NON_RET_DEFAULT: - ret = -EOPNOTSUPP; + ret = SBI_ERR_NOT_SUPPORTED; break; default: - ret = -EINVAL; + ret = SBI_ERR_INVALID_PARAM; } break; default: - ret = -EOPNOTSUPP; + ret = SBI_ERR_NOT_SUPPORTED; } - return ret; + retdata->err_val = ret; + + return 0; } const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm = { diff --git a/arch/riscv/kvm/vcpu_sbi_pmu.c b/arch/riscv/kvm/vcpu_sbi_pmu.c new file mode 100644 index 000000000000..7eca72df2cbd --- /dev/null +++ b/arch/riscv/kvm/vcpu_sbi_pmu.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 Rivos Inc + * + * Authors: + * Atish Patra <atishp@rivosinc.com> + */ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/kvm_host.h> +#include <asm/csr.h> +#include <asm/sbi.h> +#include <asm/kvm_vcpu_sbi.h> + +static int kvm_sbi_ext_pmu_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct kvm_vcpu_sbi_return *retdata) +{ + int ret = 0; + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + unsigned long funcid = cp->a6; + u64 temp; + + if (!kvpmu->init_done) { + retdata->err_val = SBI_ERR_NOT_SUPPORTED; + return 0; + } + + switch (funcid) { + case SBI_EXT_PMU_NUM_COUNTERS: + ret = kvm_riscv_vcpu_pmu_num_ctrs(vcpu, retdata); + break; + case SBI_EXT_PMU_COUNTER_GET_INFO: + ret = kvm_riscv_vcpu_pmu_ctr_info(vcpu, cp->a0, retdata); + break; + case SBI_EXT_PMU_COUNTER_CFG_MATCH: +#if defined(CONFIG_32BIT) + temp = ((uint64_t)cp->a5 << 32) | cp->a4; +#else + temp = cp->a4; +#endif + /* + * This can fail if perf core framework fails to create an event. + * Forward the error to userspace because it's an error which + * happened within the host kernel. The other option would be + * to convert to an SBI error and forward to the guest. + */ + ret = kvm_riscv_vcpu_pmu_ctr_cfg_match(vcpu, cp->a0, cp->a1, + cp->a2, cp->a3, temp, retdata); + break; + case SBI_EXT_PMU_COUNTER_START: +#if defined(CONFIG_32BIT) + temp = ((uint64_t)cp->a4 << 32) | cp->a3; +#else + temp = cp->a3; +#endif + ret = kvm_riscv_vcpu_pmu_ctr_start(vcpu, cp->a0, cp->a1, cp->a2, + temp, retdata); + break; + case SBI_EXT_PMU_COUNTER_STOP: + ret = kvm_riscv_vcpu_pmu_ctr_stop(vcpu, cp->a0, cp->a1, cp->a2, retdata); + break; + case SBI_EXT_PMU_COUNTER_FW_READ: + ret = kvm_riscv_vcpu_pmu_ctr_read(vcpu, cp->a0, retdata); + break; + default: + retdata->err_val = SBI_ERR_NOT_SUPPORTED; + } + + return ret; +} + +static unsigned long kvm_sbi_ext_pmu_probe(struct kvm_vcpu *vcpu) +{ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + + return kvpmu->init_done; +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = { + .extid_start = SBI_EXT_PMU, + .extid_end = SBI_EXT_PMU, + .handler = kvm_sbi_ext_pmu_handler, + .probe = kvm_sbi_ext_pmu_probe, +}; diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c index 03a0198389f0..7c4d5d38a339 100644 --- a/arch/riscv/kvm/vcpu_sbi_replace.c +++ b/arch/riscv/kvm/vcpu_sbi_replace.c @@ -11,19 +11,21 @@ #include <linux/kvm_host.h> #include <asm/sbi.h> #include <asm/kvm_vcpu_timer.h> +#include <asm/kvm_vcpu_pmu.h> #include <asm/kvm_vcpu_sbi.h> static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, - unsigned long *out_val, - struct kvm_cpu_trap *utrap, bool *exit) + struct kvm_vcpu_sbi_return *retdata) { - int ret = 0; struct kvm_cpu_context *cp = &vcpu->arch.guest_context; u64 next_cycle; - if (cp->a6 != SBI_EXT_TIME_SET_TIMER) - return -EINVAL; + if (cp->a6 != SBI_EXT_TIME_SET_TIMER) { + retdata->err_val = SBI_ERR_INVALID_PARAM; + return 0; + } + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_SET_TIMER); #if __riscv_xlen == 32 next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0; #else @@ -31,7 +33,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, #endif kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle); - return ret; + return 0; } const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = { @@ -41,8 +43,7 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = { }; static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, - unsigned long *out_val, - struct kvm_cpu_trap *utrap, bool *exit) + struct kvm_vcpu_sbi_return *retdata) { int ret = 0; unsigned long i; @@ -51,9 +52,12 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, unsigned long hmask = cp->a0; unsigned long hbase = cp->a1; - if (cp->a6 != SBI_EXT_IPI_SEND_IPI) - return -EINVAL; + if (cp->a6 != SBI_EXT_IPI_SEND_IPI) { + retdata->err_val = SBI_ERR_INVALID_PARAM; + return 0; + } + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_IPI_SENT); kvm_for_each_vcpu(i, tmp, vcpu->kvm) { if (hbase != -1UL) { if (tmp->vcpu_id < hbase) @@ -64,6 +68,7 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT); if (ret < 0) break; + kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD); } return ret; @@ -76,10 +81,8 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi = { }; static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, - unsigned long *out_val, - struct kvm_cpu_trap *utrap, bool *exit) + struct kvm_vcpu_sbi_return *retdata) { - int ret = 0; struct kvm_cpu_context *cp = &vcpu->arch.guest_context; unsigned long hmask = cp->a0; unsigned long hbase = cp->a1; @@ -88,6 +91,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run switch (funcid) { case SBI_EXT_RFENCE_REMOTE_FENCE_I: kvm_riscv_fence_i(vcpu->kvm, hbase, hmask); + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA: if (cp->a2 == 0 && cp->a3 == 0) @@ -95,6 +99,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run else kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask, cp->a2, cp->a3, PAGE_SHIFT); + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID: if (cp->a2 == 0 && cp->a3 == 0) @@ -105,6 +110,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run hbase, hmask, cp->a2, cp->a3, PAGE_SHIFT, cp->a4); + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_SENT); break; case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA: case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID: @@ -116,10 +122,10 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run */ break; default: - ret = -EOPNOTSUPP; + retdata->err_val = SBI_ERR_NOT_SUPPORTED; } - return ret; + return 0; } const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = { @@ -130,14 +136,12 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = { static int kvm_sbi_ext_srst_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, - unsigned long *out_val, - struct kvm_cpu_trap *utrap, bool *exit) + struct kvm_vcpu_sbi_return *retdata) { struct kvm_cpu_context *cp = &vcpu->arch.guest_context; unsigned long funcid = cp->a6; u32 reason = cp->a1; u32 type = cp->a0; - int ret = 0; switch (funcid) { case SBI_EXT_SRST_RESET: @@ -146,24 +150,24 @@ static int kvm_sbi_ext_srst_handler(struct kvm_vcpu *vcpu, kvm_riscv_vcpu_sbi_system_reset(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN, reason); - *exit = true; + retdata->uexit = true; break; case SBI_SRST_RESET_TYPE_COLD_REBOOT: case SBI_SRST_RESET_TYPE_WARM_REBOOT: kvm_riscv_vcpu_sbi_system_reset(vcpu, run, KVM_SYSTEM_EVENT_RESET, reason); - *exit = true; + retdata->uexit = true; break; default: - ret = -EOPNOTSUPP; + retdata->err_val = SBI_ERR_NOT_SUPPORTED; } break; default: - ret = -EOPNOTSUPP; + retdata->err_val = SBI_ERR_NOT_SUPPORTED; } - return ret; + return 0; } const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst = { diff --git a/arch/riscv/kvm/vcpu_sbi_v01.c b/arch/riscv/kvm/vcpu_sbi_v01.c index 489f225ee66d..8f4c4fa16227 100644 --- a/arch/riscv/kvm/vcpu_sbi_v01.c +++ b/arch/riscv/kvm/vcpu_sbi_v01.c @@ -14,9 +14,7 @@ #include <asm/kvm_vcpu_sbi.h> static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, - unsigned long *out_val, - struct kvm_cpu_trap *utrap, - bool *exit) + struct kvm_vcpu_sbi_return *retdata) { ulong hmask; int i, ret = 0; @@ -24,6 +22,7 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_vcpu *rvcpu; struct kvm *kvm = vcpu->kvm; struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + struct kvm_cpu_trap *utrap = retdata->utrap; switch (cp->a7) { case SBI_EXT_0_1_CONSOLE_GETCHAR: @@ -33,7 +32,7 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, * handled in kernel so we forward these to user-space */ kvm_riscv_vcpu_sbi_forward(vcpu, run); - *exit = true; + retdata->uexit = true; break; case SBI_EXT_0_1_SET_TIMER: #if __riscv_xlen == 32 @@ -48,8 +47,7 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, break; case SBI_EXT_0_1_SEND_IPI: if (cp->a0) - hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, - utrap); + hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, utrap); else hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; if (utrap->scause) @@ -65,14 +63,13 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, case SBI_EXT_0_1_SHUTDOWN: kvm_riscv_vcpu_sbi_system_reset(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN, 0); - *exit = true; + retdata->uexit = true; break; case SBI_EXT_0_1_REMOTE_FENCE_I: case SBI_EXT_0_1_REMOTE_SFENCE_VMA: case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID: if (cp->a0) - hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, - utrap); + hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, utrap); else hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; if (utrap->scause) @@ -103,7 +100,7 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, } break; default: - ret = -EINVAL; + retdata->err_val = SBI_ERR_NOT_SUPPORTED; break; } |