From 52425a3b3c11cec58cf66e4c897fc1504f3911a9 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Thu, 14 Sep 2023 13:05:51 +1000 Subject: KVM: PPC: Introduce FPR/VR accessor functions Introduce accessor functions for floating point and vector registers like the ones that exist for GPRs. Use these to replace the existing FPR and VR accessor macros. This will be important later for Nested APIv2 support which requires additional functionality for accessing and modifying VCPU state. Signed-off-by: Gautam Menghani Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://msgid.link/20230914030600.16993-3-jniethe5@gmail.com --- arch/powerpc/include/asm/kvm_book3s.h | 55 +++++++++++++++++++++++++++++++++++ arch/powerpc/include/asm/kvm_booke.h | 10 +++++++ 2 files changed, 65 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index bbf5e2c5fe09..109a5f56767a 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -403,6 +403,61 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) return vcpu->arch.fault_dar; } +static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i) +{ + return vcpu->arch.fp.fpr[i][TS_FPROFFSET]; +} + +static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val) +{ + vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val; +} + +static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fp.fpscr; +} + +static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val) +{ + vcpu->arch.fp.fpscr = val; +} + + +static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j) +{ + return vcpu->arch.fp.fpr[i][j]; +} + +static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j, + u64 val) +{ + vcpu->arch.fp.fpr[i][j] = val; +} + +#ifdef CONFIG_ALTIVEC +static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v) +{ + *v = vcpu->arch.vr.vr[i]; +} + +static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i, + vector128 *val) +{ + vcpu->arch.vr.vr[i] = *val; +} + +static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.vr.vscr.u[3]; +} + +static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val) +{ + vcpu->arch.vr.vscr.u[3] = val; +} +#endif + /* Expiry time of vcpu DEC relative to host TB */ static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu) { diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index 0c3401b2e19e..7c3291aa8922 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h @@ -89,6 +89,16 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) return vcpu->arch.regs.nip; } +static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val) +{ + vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val; +} + +static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i) +{ + return vcpu->arch.fp.fpr[i][TS_FPROFFSET]; +} + #ifdef CONFIG_BOOKE static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) { -- cgit From 2a64bc673133346a7a3bd163f2e6048bd1788727 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Thu, 14 Sep 2023 13:05:52 +1000 Subject: KVM: PPC: Rename accessor generator macros More "wrapper" style accessor generating macros will be introduced for the nestedv2 guest support. Rename the existing macros with more descriptive names now so there is a consistent naming convention. Reviewed-by: Nicholas Piggin Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://msgid.link/20230914030600.16993-4-jniethe5@gmail.com --- arch/powerpc/include/asm/kvm_ppc.h | 60 +++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 30 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index b4da8514af43..004998b267e6 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -927,19 +927,19 @@ static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu) #endif } -#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \ +#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \ static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ { \ return mfspr(bookehv_spr); \ } \ -#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \ +#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr) \ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \ { \ mtspr(bookehv_spr, val); \ } \ -#define SHARED_WRAPPER_GET(reg, size) \ +#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size) \ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ { \ if (kvmppc_shared_big_endian(vcpu)) \ @@ -948,7 +948,7 @@ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ return le##size##_to_cpu(vcpu->arch.shared->reg); \ } \ -#define SHARED_WRAPPER_SET(reg, size) \ +#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size) \ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ { \ if (kvmppc_shared_big_endian(vcpu)) \ @@ -957,36 +957,36 @@ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ vcpu->arch.shared->reg = cpu_to_le##size(val); \ } \ -#define SHARED_WRAPPER(reg, size) \ - SHARED_WRAPPER_GET(reg, size) \ - SHARED_WRAPPER_SET(reg, size) \ +#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size) \ + KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size) \ + KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size) \ -#define SPRNG_WRAPPER(reg, bookehv_spr) \ - SPRNG_WRAPPER_GET(reg, bookehv_spr) \ - SPRNG_WRAPPER_SET(reg, bookehv_spr) \ +#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \ + KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \ + KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr) \ #ifdef CONFIG_KVM_BOOKE_HV -#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \ - SPRNG_WRAPPER(reg, bookehv_spr) \ +#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr) \ + KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \ #else -#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \ - SHARED_WRAPPER(reg, size) \ +#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr) \ + KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size) \ #endif -SHARED_WRAPPER(critical, 64) -SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0) -SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1) -SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2) -SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3) -SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0) -SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1) -SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR) -SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR) -SHARED_WRAPPER_GET(msr, 64) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR(critical, 64) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg0, 64, SPRN_GSPRG0) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg1, 64, SPRN_GSPRG1) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg2, 64, SPRN_GSPRG2) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg3, 64, SPRN_GSPRG3) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr0, 64, SPRN_GSRR0) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr1, 64, SPRN_GSRR1) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(dar, 64, SPRN_GDEAR) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(esr, 64, SPRN_GESR) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(msr, 64) static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val) { if (kvmppc_shared_big_endian(vcpu)) @@ -994,12 +994,12 @@ static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val) else vcpu->arch.shared->msr = cpu_to_le64(val); } -SHARED_WRAPPER(dsisr, 32) -SHARED_WRAPPER(int_pending, 32) -SHARED_WRAPPER(sprg4, 64) -SHARED_WRAPPER(sprg5, 64) -SHARED_WRAPPER(sprg6, 64) -SHARED_WRAPPER(sprg7, 64) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR(dsisr, 32) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR(int_pending, 32) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg4, 64) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg5, 64) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg6, 64) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg7, 64) static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr) { -- cgit From 7028ac8d174f28220f0e2de0cb3346cd3c31976d Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Thu, 14 Sep 2023 13:05:53 +1000 Subject: KVM: PPC: Use accessors for VCPU registers Introduce accessor generator macros for VCPU registers. Use the accessor functions to replace direct accesses to this registers. This will be important later for Nested APIv2 support which requires additional functionality for accessing and modifying VCPU state. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://msgid.link/20230914030600.16993-5-jniethe5@gmail.com --- arch/powerpc/include/asm/kvm_book3s.h | 37 ++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 109a5f56767a..1a220cd63227 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -458,10 +458,45 @@ static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val) } #endif +#define KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size) \ +static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ +{ \ + \ + vcpu->arch.reg = val; \ +} + +#define KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size) \ +static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ +{ \ + return vcpu->arch.reg; \ +} + +#define KVMPPC_BOOK3S_VCPU_ACCESSOR(reg, size) \ + KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size) \ + KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size) \ + +KVMPPC_BOOK3S_VCPU_ACCESSOR(pid, 32) +KVMPPC_BOOK3S_VCPU_ACCESSOR(tar, 64) +KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbhr, 64) +KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbrr, 64) +KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64) +KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64) +KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64) + +static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.dec_expires; +} + +static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val) +{ + vcpu->arch.dec_expires = val; +} + /* Expiry time of vcpu DEC relative to host TB */ static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu) { - return vcpu->arch.dec_expires - vcpu->arch.vcore->tb_offset; + return kvmppc_get_dec_expires(vcpu) - vcpu->arch.vcore->tb_offset; } static inline bool is_kvmppc_resume_guest(int r) -- cgit From c8ae9b3c6e7f22a4b71e42edb0fc3942aa7a7c42 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Thu, 14 Sep 2023 13:05:54 +1000 Subject: KVM: PPC: Use accessors for VCORE registers Introduce accessor generator macros for VCORE registers. Use the accessor functions to replace direct accesses to this registers. This will be important later for Nested APIv2 support which requires additional functionality for accessing and modifying VCPU state. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://msgid.link/20230914030600.16993-6-jniethe5@gmail.com --- arch/powerpc/include/asm/kvm_book3s.h | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 1a220cd63227..4c6558d5fefe 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -483,6 +483,29 @@ KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64) KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64) KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64) + +#define KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size) \ +static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ +{ \ + vcpu->arch.vcore->reg = val; \ +} + +#define KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size) \ +static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ +{ \ + return vcpu->arch.vcore->reg; \ +} + +#define KVMPPC_BOOK3S_VCORE_ACCESSOR(reg, size) \ + KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size) \ + KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size) \ + + +KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64) +KVMPPC_BOOK3S_VCORE_ACCESSOR(tb_offset, 64) +KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32) +KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64) + static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu) { return vcpu->arch.dec_expires; @@ -496,7 +519,7 @@ static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val) /* Expiry time of vcpu DEC relative to host TB */ static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu) { - return kvmppc_get_dec_expires(vcpu) - vcpu->arch.vcore->tb_offset; + return kvmppc_get_dec_expires(vcpu) - kvmppc_get_tb_offset(vcpu); } static inline bool is_kvmppc_resume_guest(int r) -- cgit From 6ccbbc33f06adaf79acde18571c6543ad1cb4be6 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Thu, 14 Sep 2023 13:05:57 +1000 Subject: KVM: PPC: Add helper library for Guest State Buffers The PAPR "Nestedv2" guest API introduces the concept of a Guest State Buffer for communication about L2 guests between L1 and L0 hosts. In the new API, the L0 manages the L2 on behalf of the L1. This means that if the L1 needs to change L2 state (e.g. GPRs, SPRs, partition table...), it must request the L0 perform the modification. If the nested host needs to read L2 state likewise this request must go through the L0. The Guest State Buffer is a Type-Length-Value style data format defined in the PAPR which assigns all relevant partition state a unique identity. Unlike a typical TLV format the length is redundant as the length of each identity is fixed but is included for checking correctness. A guest state buffer consists of an element count followed by a stream of elements, where elements are composed of an ID number, data length, then the data: Header: <---4 bytes---> +----------------+----- | Element Count | Elements... +----------------+----- Element: <----2 bytes---> <-2 bytes-> <-Length bytes-> +----------------+-----------+----------------+ | Guest State ID | Length | Data | +----------------+-----------+----------------+ Guest State IDs have other attributes defined in the PAPR such as whether they are per thread or per guest, or read-only. Introduce a library for using guest state buffers. This includes support for actions such as creating buffers, adding elements to buffers, reading the value of elements and parsing buffers. This will be used later by the nestedv2 guest support. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://msgid.link/20230914030600.16993-9-jniethe5@gmail.com --- arch/powerpc/include/asm/guest-state-buffer.h | 904 ++++++++++++++++++++++++++ 1 file changed, 904 insertions(+) create mode 100644 arch/powerpc/include/asm/guest-state-buffer.h (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/guest-state-buffer.h b/arch/powerpc/include/asm/guest-state-buffer.h new file mode 100644 index 000000000000..aaefe1075fc4 --- /dev/null +++ b/arch/powerpc/include/asm/guest-state-buffer.h @@ -0,0 +1,904 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Interface based on include/net/netlink.h + */ +#ifndef _ASM_POWERPC_GUEST_STATE_BUFFER_H +#define _ASM_POWERPC_GUEST_STATE_BUFFER_H + +#include +#include +#include + +/************************************************************************** + * Guest State Buffer Constants + **************************************************************************/ +/* Element without a value and any length */ +#define KVMPPC_GSID_BLANK 0x0000 +/* Size required for the L0's internal VCPU representation */ +#define KVMPPC_GSID_HOST_STATE_SIZE 0x0001 + /* Minimum size for the H_GUEST_RUN_VCPU output buffer */ +#define KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE 0x0002 + /* "Logical" PVR value as defined in the PAPR */ +#define KVMPPC_GSID_LOGICAL_PVR 0x0003 + /* L0 relative timebase offset */ +#define KVMPPC_GSID_TB_OFFSET 0x0004 + /* Partition Scoped Page Table Info */ +#define KVMPPC_GSID_PARTITION_TABLE 0x0005 + /* Process Table Info */ +#define KVMPPC_GSID_PROCESS_TABLE 0x0006 + +/* H_GUEST_RUN_VCPU input buffer Info */ +#define KVMPPC_GSID_RUN_INPUT 0x0C00 +/* H_GUEST_RUN_VCPU output buffer Info */ +#define KVMPPC_GSID_RUN_OUTPUT 0x0C01 +#define KVMPPC_GSID_VPA 0x0C02 + +#define KVMPPC_GSID_GPR(x) (0x1000 + (x)) +#define KVMPPC_GSID_HDEC_EXPIRY_TB 0x1020 +#define KVMPPC_GSID_NIA 0x1021 +#define KVMPPC_GSID_MSR 0x1022 +#define KVMPPC_GSID_LR 0x1023 +#define KVMPPC_GSID_XER 0x1024 +#define KVMPPC_GSID_CTR 0x1025 +#define KVMPPC_GSID_CFAR 0x1026 +#define KVMPPC_GSID_SRR0 0x1027 +#define KVMPPC_GSID_SRR1 0x1028 +#define KVMPPC_GSID_DAR 0x1029 +#define KVMPPC_GSID_DEC_EXPIRY_TB 0x102A +#define KVMPPC_GSID_VTB 0x102B +#define KVMPPC_GSID_LPCR 0x102C +#define KVMPPC_GSID_HFSCR 0x102D +#define KVMPPC_GSID_FSCR 0x102E +#define KVMPPC_GSID_FPSCR 0x102F +#define KVMPPC_GSID_DAWR0 0x1030 +#define KVMPPC_GSID_DAWR1 0x1031 +#define KVMPPC_GSID_CIABR 0x1032 +#define KVMPPC_GSID_PURR 0x1033 +#define KVMPPC_GSID_SPURR 0x1034 +#define KVMPPC_GSID_IC 0x1035 +#define KVMPPC_GSID_SPRG0 0x1036 +#define KVMPPC_GSID_SPRG1 0x1037 +#define KVMPPC_GSID_SPRG2 0x1038 +#define KVMPPC_GSID_SPRG3 0x1039 +#define KVMPPC_GSID_PPR 0x103A +#define KVMPPC_GSID_MMCR(x) (0x103B + (x)) +#define KVMPPC_GSID_MMCRA 0x103F +#define KVMPPC_GSID_SIER(x) (0x1040 + (x)) +#define KVMPPC_GSID_BESCR 0x1043 +#define KVMPPC_GSID_EBBHR 0x1044 +#define KVMPPC_GSID_EBBRR 0x1045 +#define KVMPPC_GSID_AMR 0x1046 +#define KVMPPC_GSID_IAMR 0x1047 +#define KVMPPC_GSID_AMOR 0x1048 +#define KVMPPC_GSID_UAMOR 0x1049 +#define KVMPPC_GSID_SDAR 0x104A +#define KVMPPC_GSID_SIAR 0x104B +#define KVMPPC_GSID_DSCR 0x104C +#define KVMPPC_GSID_TAR 0x104D +#define KVMPPC_GSID_DEXCR 0x104E +#define KVMPPC_GSID_HDEXCR 0x104F +#define KVMPPC_GSID_HASHKEYR 0x1050 +#define KVMPPC_GSID_HASHPKEYR 0x1051 +#define KVMPPC_GSID_CTRL 0x1052 + +#define KVMPPC_GSID_CR 0x2000 +#define KVMPPC_GSID_PIDR 0x2001 +#define KVMPPC_GSID_DSISR 0x2002 +#define KVMPPC_GSID_VSCR 0x2003 +#define KVMPPC_GSID_VRSAVE 0x2004 +#define KVMPPC_GSID_DAWRX0 0x2005 +#define KVMPPC_GSID_DAWRX1 0x2006 +#define KVMPPC_GSID_PMC(x) (0x2007 + (x)) +#define KVMPPC_GSID_WORT 0x200D +#define KVMPPC_GSID_PSPB 0x200E + +#define KVMPPC_GSID_VSRS(x) (0x3000 + (x)) + +#define KVMPPC_GSID_HDAR 0xF000 +#define KVMPPC_GSID_HDSISR 0xF001 +#define KVMPPC_GSID_HEIR 0xF002 +#define KVMPPC_GSID_ASDR 0xF003 + +#define KVMPPC_GSE_GUESTWIDE_START KVMPPC_GSID_BLANK +#define KVMPPC_GSE_GUESTWIDE_END KVMPPC_GSID_PROCESS_TABLE +#define KVMPPC_GSE_GUESTWIDE_COUNT \ + (KVMPPC_GSE_GUESTWIDE_END - KVMPPC_GSE_GUESTWIDE_START + 1) + +#define KVMPPC_GSE_META_START KVMPPC_GSID_RUN_INPUT +#define KVMPPC_GSE_META_END KVMPPC_GSID_VPA +#define KVMPPC_GSE_META_COUNT (KVMPPC_GSE_META_END - KVMPPC_GSE_META_START + 1) + +#define KVMPPC_GSE_DW_REGS_START KVMPPC_GSID_GPR(0) +#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_CTRL +#define KVMPPC_GSE_DW_REGS_COUNT \ + (KVMPPC_GSE_DW_REGS_END - KVMPPC_GSE_DW_REGS_START + 1) + +#define KVMPPC_GSE_W_REGS_START KVMPPC_GSID_CR +#define KVMPPC_GSE_W_REGS_END KVMPPC_GSID_PSPB +#define KVMPPC_GSE_W_REGS_COUNT \ + (KVMPPC_GSE_W_REGS_END - KVMPPC_GSE_W_REGS_START + 1) + +#define KVMPPC_GSE_VSRS_START KVMPPC_GSID_VSRS(0) +#define KVMPPC_GSE_VSRS_END KVMPPC_GSID_VSRS(63) +#define KVMPPC_GSE_VSRS_COUNT (KVMPPC_GSE_VSRS_END - KVMPPC_GSE_VSRS_START + 1) + +#define KVMPPC_GSE_INTR_REGS_START KVMPPC_GSID_HDAR +#define KVMPPC_GSE_INTR_REGS_END KVMPPC_GSID_ASDR +#define KVMPPC_GSE_INTR_REGS_COUNT \ + (KVMPPC_GSE_INTR_REGS_END - KVMPPC_GSE_INTR_REGS_START + 1) + +#define KVMPPC_GSE_IDEN_COUNT \ + (KVMPPC_GSE_GUESTWIDE_COUNT + KVMPPC_GSE_META_COUNT + \ + KVMPPC_GSE_DW_REGS_COUNT + KVMPPC_GSE_W_REGS_COUNT + \ + KVMPPC_GSE_VSRS_COUNT + KVMPPC_GSE_INTR_REGS_COUNT) + +/** + * Ranges of guest state buffer elements + */ +enum { + KVMPPC_GS_CLASS_GUESTWIDE = 0x01, + KVMPPC_GS_CLASS_META = 0x02, + KVMPPC_GS_CLASS_DWORD_REG = 0x04, + KVMPPC_GS_CLASS_WORD_REG = 0x08, + KVMPPC_GS_CLASS_VECTOR = 0x10, + KVMPPC_GS_CLASS_INTR = 0x20, +}; + +/** + * Types of guest state buffer elements + */ +enum { + KVMPPC_GSE_BE32, + KVMPPC_GSE_BE64, + KVMPPC_GSE_VEC128, + KVMPPC_GSE_PARTITION_TABLE, + KVMPPC_GSE_PROCESS_TABLE, + KVMPPC_GSE_BUFFER, + __KVMPPC_GSE_TYPE_MAX, +}; + +/** + * Flags for guest state elements + */ +enum { + KVMPPC_GS_FLAGS_WIDE = 0x01, +}; + +/** + * struct kvmppc_gs_part_table - deserialized partition table information + * element + * @address: start of the partition table + * @ea_bits: number of bits in the effective address + * @gpd_size: root page directory size + */ +struct kvmppc_gs_part_table { + u64 address; + u64 ea_bits; + u64 gpd_size; +}; + +/** + * struct kvmppc_gs_proc_table - deserialized process table information element + * @address: start of the process table + * @gpd_size: process table size + */ +struct kvmppc_gs_proc_table { + u64 address; + u64 gpd_size; +}; + +/** + * struct kvmppc_gs_buff_info - deserialized meta guest state buffer information + * @address: start of the guest state buffer + * @size: size of the guest state buffer + */ +struct kvmppc_gs_buff_info { + u64 address; + u64 size; +}; + +/** + * struct kvmppc_gs_header - serialized guest state buffer header + * @nelem: count of guest state elements in the buffer + * @data: start of the stream of elements in the buffer + */ +struct kvmppc_gs_header { + __be32 nelems; + char data[]; +} __packed; + +/** + * struct kvmppc_gs_elem - serialized guest state buffer element + * @iden: Guest State ID + * @len: length of data + * @data: the guest state buffer element's value + */ +struct kvmppc_gs_elem { + __be16 iden; + __be16 len; + char data[]; +} __packed; + +/** + * struct kvmppc_gs_buff - a guest state buffer with metadata. + * @capacity: total length of the buffer + * @len: current length of the elements and header + * @guest_id: guest id associated with the buffer + * @vcpu_id: vcpu_id associated with the buffer + * @hdr: the serialised guest state buffer + */ +struct kvmppc_gs_buff { + size_t capacity; + size_t len; + unsigned long guest_id; + unsigned long vcpu_id; + struct kvmppc_gs_header *hdr; +}; + +/** + * struct kvmppc_gs_bitmap - a bitmap for element ids + * @bitmap: a bitmap large enough for all Guest State IDs + */ +struct kvmppc_gs_bitmap { + /* private: */ + DECLARE_BITMAP(bitmap, KVMPPC_GSE_IDEN_COUNT); +}; + +/** + * struct kvmppc_gs_parser - a map of element ids to locations in a buffer + * @iterator: bitmap used for iterating + * @gses: contains the pointers to elements + * + * A guest state parser is used for deserialising a guest state buffer. + * Given a buffer, it then allows looking up guest state elements using + * a guest state id. + */ +struct kvmppc_gs_parser { + /* private: */ + struct kvmppc_gs_bitmap iterator; + struct kvmppc_gs_elem *gses[KVMPPC_GSE_IDEN_COUNT]; +}; + +enum { + GSM_GUEST_WIDE = 0x1, + GSM_SEND = 0x2, + GSM_RECEIVE = 0x4, + GSM_GSB_OWNER = 0x8, +}; + +struct kvmppc_gs_msg; + +/** + * struct kvmppc_gs_msg_ops - guest state message behavior + * @get_size: maximum size required for the message data + * @fill_info: serializes to the guest state buffer format + * @refresh_info: dserializes from the guest state buffer format + */ +struct kvmppc_gs_msg_ops { + size_t (*get_size)(struct kvmppc_gs_msg *gsm); + int (*fill_info)(struct kvmppc_gs_buff *gsb, struct kvmppc_gs_msg *gsm); + int (*refresh_info)(struct kvmppc_gs_msg *gsm, + struct kvmppc_gs_buff *gsb); +}; + +/** + * struct kvmppc_gs_msg - a guest state message + * @bitmap: the guest state ids that should be included + * @ops: modify message behavior for reading and writing to buffers + * @flags: guest wide or thread wide + * @data: location where buffer data will be written to or from. + * + * A guest state message is allows flexibility in sending in receiving data + * in a guest state buffer format. + */ +struct kvmppc_gs_msg { + struct kvmppc_gs_bitmap bitmap; + struct kvmppc_gs_msg_ops *ops; + unsigned long flags; + void *data; +}; + +/************************************************************************** + * Guest State IDs + **************************************************************************/ + +u16 kvmppc_gsid_size(u16 iden); +unsigned long kvmppc_gsid_flags(u16 iden); +u64 kvmppc_gsid_mask(u16 iden); + +/************************************************************************** + * Guest State Buffers + **************************************************************************/ +struct kvmppc_gs_buff *kvmppc_gsb_new(size_t size, unsigned long guest_id, + unsigned long vcpu_id, gfp_t flags); +void kvmppc_gsb_free(struct kvmppc_gs_buff *gsb); +void *kvmppc_gsb_put(struct kvmppc_gs_buff *gsb, size_t size); + +/** + * kvmppc_gsb_header() - the header of a guest state buffer + * @gsb: guest state buffer + * + * Returns a pointer to the buffer header. + */ +static inline struct kvmppc_gs_header * +kvmppc_gsb_header(struct kvmppc_gs_buff *gsb) +{ + return gsb->hdr; +} + +/** + * kvmppc_gsb_data() - the elements of a guest state buffer + * @gsb: guest state buffer + * + * Returns a pointer to the first element of the buffer data. + */ +static inline struct kvmppc_gs_elem *kvmppc_gsb_data(struct kvmppc_gs_buff *gsb) +{ + return (struct kvmppc_gs_elem *)kvmppc_gsb_header(gsb)->data; +} + +/** + * kvmppc_gsb_len() - the current length of a guest state buffer + * @gsb: guest state buffer + * + * Returns the length including the header of a buffer. + */ +static inline size_t kvmppc_gsb_len(struct kvmppc_gs_buff *gsb) +{ + return gsb->len; +} + +/** + * kvmppc_gsb_capacity() - the capacity of a guest state buffer + * @gsb: guest state buffer + * + * Returns the capacity of a buffer. + */ +static inline size_t kvmppc_gsb_capacity(struct kvmppc_gs_buff *gsb) +{ + return gsb->capacity; +} + +/** + * kvmppc_gsb_paddress() - the physical address of buffer + * @gsb: guest state buffer + * + * Returns the physical address of the buffer. + */ +static inline u64 kvmppc_gsb_paddress(struct kvmppc_gs_buff *gsb) +{ + return __pa(kvmppc_gsb_header(gsb)); +} + +/** + * kvmppc_gsb_nelems() - the number of elements in a buffer + * @gsb: guest state buffer + * + * Returns the number of elements in a buffer + */ +static inline u32 kvmppc_gsb_nelems(struct kvmppc_gs_buff *gsb) +{ + return be32_to_cpu(kvmppc_gsb_header(gsb)->nelems); +} + +/** + * kvmppc_gsb_reset() - empty a guest state buffer + * @gsb: guest state buffer + * + * Reset the number of elements and length of buffer to empty. + */ +static inline void kvmppc_gsb_reset(struct kvmppc_gs_buff *gsb) +{ + kvmppc_gsb_header(gsb)->nelems = cpu_to_be32(0); + gsb->len = sizeof(struct kvmppc_gs_header); +} + +/** + * kvmppc_gsb_data_len() - the length of a buffer excluding the header + * @gsb: guest state buffer + * + * Returns the length of a buffer excluding the header + */ +static inline size_t kvmppc_gsb_data_len(struct kvmppc_gs_buff *gsb) +{ + return gsb->len - sizeof(struct kvmppc_gs_header); +} + +/** + * kvmppc_gsb_data_cap() - the capacity of a buffer excluding the header + * @gsb: guest state buffer + * + * Returns the capacity of a buffer excluding the header + */ +static inline size_t kvmppc_gsb_data_cap(struct kvmppc_gs_buff *gsb) +{ + return gsb->capacity - sizeof(struct kvmppc_gs_header); +} + +/** + * kvmppc_gsb_for_each_elem - iterate over the elements in a buffer + * @i: loop counter + * @pos: set to current element + * @gsb: guest state buffer + * @rem: initialized to buffer capacity, holds bytes currently remaining in + * stream + */ +#define kvmppc_gsb_for_each_elem(i, pos, gsb, rem) \ + kvmppc_gse_for_each_elem(i, kvmppc_gsb_nelems(gsb), pos, \ + kvmppc_gsb_data(gsb), \ + kvmppc_gsb_data_cap(gsb), rem) + +/************************************************************************** + * Guest State Elements + **************************************************************************/ + +/** + * kvmppc_gse_iden() - guest state ID of element + * @gse: guest state element + * + * Return the guest state ID in host endianness. + */ +static inline u16 kvmppc_gse_iden(const struct kvmppc_gs_elem *gse) +{ + return be16_to_cpu(gse->iden); +} + +/** + * kvmppc_gse_len() - length of guest state element data + * @gse: guest state element + * + * Returns the length of guest state element data + */ +static inline u16 kvmppc_gse_len(const struct kvmppc_gs_elem *gse) +{ + return be16_to_cpu(gse->len); +} + +/** + * kvmppc_gse_total_len() - total length of guest state element + * @gse: guest state element + * + * Returns the length of the data plus the ID and size header. + */ +static inline u16 kvmppc_gse_total_len(const struct kvmppc_gs_elem *gse) +{ + return be16_to_cpu(gse->len) + sizeof(*gse); +} + +/** + * kvmppc_gse_total_size() - space needed for a given data length + * @size: data length + * + * Returns size plus the space needed for the ID and size header. + */ +static inline u16 kvmppc_gse_total_size(u16 size) +{ + return sizeof(struct kvmppc_gs_elem) + size; +} + +/** + * kvmppc_gse_data() - pointer to data of a guest state element + * @gse: guest state element + * + * Returns a pointer to the beginning of guest state element data. + */ +static inline void *kvmppc_gse_data(const struct kvmppc_gs_elem *gse) +{ + return (void *)gse->data; +} + +/** + * kvmppc_gse_ok() - checks space exists for guest state element + * @gse: guest state element + * @remaining: bytes of space remaining + * + * Returns true if the guest state element can fit in remaining space. + */ +static inline bool kvmppc_gse_ok(const struct kvmppc_gs_elem *gse, + int remaining) +{ + return remaining >= kvmppc_gse_total_len(gse); +} + +/** + * kvmppc_gse_next() - iterate to the next guest state element in a stream + * @gse: stream of guest state elements + * @remaining: length of the guest element stream + * + * Returns the next guest state element in a stream of elements. The length of + * the stream is updated in remaining. + */ +static inline struct kvmppc_gs_elem * +kvmppc_gse_next(const struct kvmppc_gs_elem *gse, int *remaining) +{ + int len = sizeof(*gse) + kvmppc_gse_len(gse); + + *remaining -= len; + return (struct kvmppc_gs_elem *)(gse->data + kvmppc_gse_len(gse)); +} + +/** + * kvmppc_gse_for_each_elem - iterate over a stream of guest state elements + * @i: loop counter + * @max: number of elements + * @pos: set to current element + * @head: head of elements + * @len: length of the stream + * @rem: initialized to len, holds bytes currently remaining elements + */ +#define kvmppc_gse_for_each_elem(i, max, pos, head, len, rem) \ + for (i = 0, pos = head, rem = len; kvmppc_gse_ok(pos, rem) && i < max; \ + pos = kvmppc_gse_next(pos, &(rem)), i++) + +int __kvmppc_gse_put(struct kvmppc_gs_buff *gsb, u16 iden, u16 size, + const void *data); +int kvmppc_gse_parse(struct kvmppc_gs_parser *gsp, struct kvmppc_gs_buff *gsb); + +/** + * kvmppc_gse_put_be32() - add a be32 guest state element to a buffer + * @gsb: guest state buffer to add element to + * @iden: guest state ID + * @val: big endian value + */ +static inline int kvmppc_gse_put_be32(struct kvmppc_gs_buff *gsb, u16 iden, + __be32 val) +{ + __be32 tmp; + + tmp = val; + return __kvmppc_gse_put(gsb, iden, sizeof(__be32), &tmp); +} + +/** + * kvmppc_gse_put_u32() - add a host endian 32bit int guest state element to a + * buffer + * @gsb: guest state buffer to add element to + * @iden: guest state ID + * @val: host endian value + */ +static inline int kvmppc_gse_put_u32(struct kvmppc_gs_buff *gsb, u16 iden, + u32 val) +{ + __be32 tmp; + + val &= kvmppc_gsid_mask(iden); + tmp = cpu_to_be32(val); + return kvmppc_gse_put_be32(gsb, iden, tmp); +} + +/** + * kvmppc_gse_put_be64() - add a be64 guest state element to a buffer + * @gsb: guest state buffer to add element to + * @iden: guest state ID + * @val: big endian value + */ +static inline int kvmppc_gse_put_be64(struct kvmppc_gs_buff *gsb, u16 iden, + __be64 val) +{ + __be64 tmp; + + tmp = val; + return __kvmppc_gse_put(gsb, iden, sizeof(__be64), &tmp); +} + +/** + * kvmppc_gse_put_u64() - add a host endian 64bit guest state element to a + * buffer + * @gsb: guest state buffer to add element to + * @iden: guest state ID + * @val: host endian value + */ +static inline int kvmppc_gse_put_u64(struct kvmppc_gs_buff *gsb, u16 iden, + u64 val) +{ + __be64 tmp; + + val &= kvmppc_gsid_mask(iden); + tmp = cpu_to_be64(val); + return kvmppc_gse_put_be64(gsb, iden, tmp); +} + +/** + * __kvmppc_gse_put_reg() - add a register type guest state element to a buffer + * @gsb: guest state buffer to add element to + * @iden: guest state ID + * @val: host endian value + * + * Adds a register type guest state element. Uses the guest state ID for + * determining the length of the guest element. If the guest state ID has + * bits that can not be set they will be cleared. + */ +static inline int __kvmppc_gse_put_reg(struct kvmppc_gs_buff *gsb, u16 iden, + u64 val) +{ + val &= kvmppc_gsid_mask(iden); + if (kvmppc_gsid_size(iden) == sizeof(u64)) + return kvmppc_gse_put_u64(gsb, iden, val); + + if (kvmppc_gsid_size(iden) == sizeof(u32)) { + u32 tmp; + + tmp = (u32)val; + if (tmp != val) + return -EINVAL; + + return kvmppc_gse_put_u32(gsb, iden, tmp); + } + return -EINVAL; +} + +/** + * kvmppc_gse_put_vector128() - add a vector guest state element to a buffer + * @gsb: guest state buffer to add element to + * @iden: guest state ID + * @val: 16 byte vector value + */ +static inline int kvmppc_gse_put_vector128(struct kvmppc_gs_buff *gsb, u16 iden, + vector128 *val) +{ + __be64 tmp[2] = { 0 }; + union { + __vector128 v; + u64 dw[2]; + } u; + + u.v = *val; + tmp[0] = cpu_to_be64(u.dw[TS_FPROFFSET]); +#ifdef CONFIG_VSX + tmp[1] = cpu_to_be64(u.dw[TS_VSRLOWOFFSET]); +#endif + return __kvmppc_gse_put(gsb, iden, sizeof(tmp), &tmp); +} + +/** + * kvmppc_gse_put_part_table() - add a partition table guest state element to a + * buffer + * @gsb: guest state buffer to add element to + * @iden: guest state ID + * @val: partition table value + */ +static inline int kvmppc_gse_put_part_table(struct kvmppc_gs_buff *gsb, + u16 iden, + struct kvmppc_gs_part_table val) +{ + __be64 tmp[3]; + + tmp[0] = cpu_to_be64(val.address); + tmp[1] = cpu_to_be64(val.ea_bits); + tmp[2] = cpu_to_be64(val.gpd_size); + return __kvmppc_gse_put(gsb, KVMPPC_GSID_PARTITION_TABLE, sizeof(tmp), + &tmp); +} + +/** + * kvmppc_gse_put_proc_table() - add a process table guest state element to a + * buffer + * @gsb: guest state buffer to add element to + * @iden: guest state ID + * @val: process table value + */ +static inline int kvmppc_gse_put_proc_table(struct kvmppc_gs_buff *gsb, + u16 iden, + struct kvmppc_gs_proc_table val) +{ + __be64 tmp[2]; + + tmp[0] = cpu_to_be64(val.address); + tmp[1] = cpu_to_be64(val.gpd_size); + return __kvmppc_gse_put(gsb, KVMPPC_GSID_PROCESS_TABLE, sizeof(tmp), + &tmp); +} + +/** + * kvmppc_gse_put_buff_info() - adds a GSB description guest state element to a + * buffer + * @gsb: guest state buffer to add element to + * @iden: guest state ID + * @val: guest state buffer description value + */ +static inline int kvmppc_gse_put_buff_info(struct kvmppc_gs_buff *gsb, u16 iden, + struct kvmppc_gs_buff_info val) +{ + __be64 tmp[2]; + + tmp[0] = cpu_to_be64(val.address); + tmp[1] = cpu_to_be64(val.size); + return __kvmppc_gse_put(gsb, iden, sizeof(tmp), &tmp); +} + +int __kvmppc_gse_put(struct kvmppc_gs_buff *gsb, u16 iden, u16 size, + const void *data); + +/** + * kvmppc_gse_get_be32() - return the data of a be32 element + * @gse: guest state element + */ +static inline __be32 kvmppc_gse_get_be32(const struct kvmppc_gs_elem *gse) +{ + if (WARN_ON(kvmppc_gse_len(gse) != sizeof(__be32))) + return 0; + return *(__be32 *)kvmppc_gse_data(gse); +} + +/** + * kvmppc_gse_get_u32() - return the data of a be32 element in host endianness + * @gse: guest state element + */ +static inline u32 kvmppc_gse_get_u32(const struct kvmppc_gs_elem *gse) +{ + return be32_to_cpu(kvmppc_gse_get_be32(gse)); +} + +/** + * kvmppc_gse_get_be64() - return the data of a be64 element + * @gse: guest state element + */ +static inline __be64 kvmppc_gse_get_be64(const struct kvmppc_gs_elem *gse) +{ + if (WARN_ON(kvmppc_gse_len(gse) != sizeof(__be64))) + return 0; + return *(__be64 *)kvmppc_gse_data(gse); +} + +/** + * kvmppc_gse_get_u64() - return the data of a be64 element in host endianness + * @gse: guest state element + */ +static inline u64 kvmppc_gse_get_u64(const struct kvmppc_gs_elem *gse) +{ + return be64_to_cpu(kvmppc_gse_get_be64(gse)); +} + +/** + * kvmppc_gse_get_vector128() - return the data of a vector element + * @gse: guest state element + */ +static inline void kvmppc_gse_get_vector128(const struct kvmppc_gs_elem *gse, + vector128 *v) +{ + union { + __vector128 v; + u64 dw[2]; + } u = { 0 }; + __be64 *src; + + if (WARN_ON(kvmppc_gse_len(gse) != sizeof(__vector128))) + *v = u.v; + + src = (__be64 *)kvmppc_gse_data(gse); + u.dw[TS_FPROFFSET] = be64_to_cpu(src[0]); +#ifdef CONFIG_VSX + u.dw[TS_VSRLOWOFFSET] = be64_to_cpu(src[1]); +#endif + *v = u.v; +} + +/************************************************************************** + * Guest State Bitmap + **************************************************************************/ + +bool kvmppc_gsbm_test(struct kvmppc_gs_bitmap *gsbm, u16 iden); +void kvmppc_gsbm_set(struct kvmppc_gs_bitmap *gsbm, u16 iden); +void kvmppc_gsbm_clear(struct kvmppc_gs_bitmap *gsbm, u16 iden); +u16 kvmppc_gsbm_next(struct kvmppc_gs_bitmap *gsbm, u16 prev); + +/** + * kvmppc_gsbm_zero - zero the entire bitmap + * @gsbm: guest state buffer bitmap + */ +static inline void kvmppc_gsbm_zero(struct kvmppc_gs_bitmap *gsbm) +{ + bitmap_zero(gsbm->bitmap, KVMPPC_GSE_IDEN_COUNT); +} + +/** + * kvmppc_gsbm_fill - fill the entire bitmap + * @gsbm: guest state buffer bitmap + */ +static inline void kvmppc_gsbm_fill(struct kvmppc_gs_bitmap *gsbm) +{ + bitmap_fill(gsbm->bitmap, KVMPPC_GSE_IDEN_COUNT); + clear_bit(0, gsbm->bitmap); +} + +/** + * kvmppc_gsbm_for_each - iterate the present guest state IDs + * @gsbm: guest state buffer bitmap + * @iden: current guest state ID + */ +#define kvmppc_gsbm_for_each(gsbm, iden) \ + for (iden = kvmppc_gsbm_next(gsbm, 0); iden != 0; \ + iden = kvmppc_gsbm_next(gsbm, iden)) + +/************************************************************************** + * Guest State Parser + **************************************************************************/ + +void kvmppc_gsp_insert(struct kvmppc_gs_parser *gsp, u16 iden, + struct kvmppc_gs_elem *gse); +struct kvmppc_gs_elem *kvmppc_gsp_lookup(struct kvmppc_gs_parser *gsp, + u16 iden); + +/** + * kvmppc_gsp_for_each - iterate the + * pairs + * @gsp: guest state buffer bitmap + * @iden: current guest state ID + * @gse: guest state element + */ +#define kvmppc_gsp_for_each(gsp, iden, gse) \ + for (iden = kvmppc_gsbm_next(&(gsp)->iterator, 0), \ + gse = kvmppc_gsp_lookup((gsp), iden); \ + iden != 0; iden = kvmppc_gsbm_next(&(gsp)->iterator, iden), \ + gse = kvmppc_gsp_lookup((gsp), iden)) + +/************************************************************************** + * Guest State Message + **************************************************************************/ + +/** + * kvmppc_gsm_for_each - iterate the guest state IDs included in a guest state + * message + * @gsp: guest state buffer bitmap + * @iden: current guest state ID + * @gse: guest state element + */ +#define kvmppc_gsm_for_each(gsm, iden) \ + for (iden = kvmppc_gsbm_next(&gsm->bitmap, 0); iden != 0; \ + iden = kvmppc_gsbm_next(&gsm->bitmap, iden)) + +int kvmppc_gsm_init(struct kvmppc_gs_msg *mgs, struct kvmppc_gs_msg_ops *ops, + void *data, unsigned long flags); + +struct kvmppc_gs_msg *kvmppc_gsm_new(struct kvmppc_gs_msg_ops *ops, void *data, + unsigned long flags, gfp_t gfp_flags); +void kvmppc_gsm_free(struct kvmppc_gs_msg *gsm); +size_t kvmppc_gsm_size(struct kvmppc_gs_msg *gsm); +int kvmppc_gsm_fill_info(struct kvmppc_gs_msg *gsm, struct kvmppc_gs_buff *gsb); +int kvmppc_gsm_refresh_info(struct kvmppc_gs_msg *gsm, + struct kvmppc_gs_buff *gsb); + +/** + * kvmppc_gsm_include - indicate a guest state ID should be included when + * serializing + * @gsm: guest state message + * @iden: guest state ID + */ +static inline void kvmppc_gsm_include(struct kvmppc_gs_msg *gsm, u16 iden) +{ + kvmppc_gsbm_set(&gsm->bitmap, iden); +} + +/** + * kvmppc_gsm_includes - check if a guest state ID will be included when + * serializing + * @gsm: guest state message + * @iden: guest state ID + */ +static inline bool kvmppc_gsm_includes(struct kvmppc_gs_msg *gsm, u16 iden) +{ + return kvmppc_gsbm_test(&gsm->bitmap, iden); +} + +/** + * kvmppc_gsm_includes - indicate all guest state IDs should be included when + * serializing + * @gsm: guest state message + * @iden: guest state ID + */ +static inline void kvmppc_gsm_include_all(struct kvmppc_gs_msg *gsm) +{ + kvmppc_gsbm_fill(&gsm->bitmap); +} + +/** + * kvmppc_gsm_include - clear the guest state IDs that should be included when + * serializing + * @gsm: guest state message + */ +static inline void kvmppc_gsm_reset(struct kvmppc_gs_msg *gsm) +{ + kvmppc_gsbm_zero(&gsm->bitmap); +} + +#endif /* _ASM_POWERPC_GUEST_STATE_BUFFER_H */ -- cgit From dfcaacc8f970c6b4ea4e32d2186f2bea4a1d5255 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Thu, 14 Sep 2023 13:05:58 +1000 Subject: KVM: PPC: Book3s HV: Hold LPIDs in an unsigned long The LPID register is 32 bits long. The host keeps the lpids for each guest in an unsigned word struct kvm_arch. Currently, LPIDs are already limited by mmu_lpid_bits and KVM_MAX_NESTED_GUESTS_SHIFT. The nestedv2 API returns a 64 bit "Guest ID" to be used be the L1 host for each L2 guest. This value is used as an lpid, e.g. it is the parameter used by H_RPT_INVALIDATE. To minimize needless special casing it makes sense to keep this "Guest ID" in struct kvm_arch::lpid. This means that struct kvm_arch::lpid is too small so prepare for this and make it an unsigned long. This is not a problem for the KVM-HV and nestedv1 cases as their lpid values are already limited to valid ranges so in those contexts the lpid can be used as an unsigned word safely as needed. In the PAPR, the H_RPT_INVALIDATE pid/lpid parameter is already specified as an unsigned long so change pseries_rpt_invalidate() to match that. Update the callers of pseries_rpt_invalidate() to also take an unsigned long if they take an lpid value. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://msgid.link/20230914030600.16993-10-jniethe5@gmail.com --- arch/powerpc/include/asm/kvm_book3s.h | 10 +++++----- arch/powerpc/include/asm/kvm_book3s_64.h | 2 +- arch/powerpc/include/asm/kvm_host.h | 2 +- arch/powerpc/include/asm/plpar_wrappers.h | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 4c6558d5fefe..831c23e4f121 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -191,14 +191,14 @@ extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite); extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, - unsigned int pshift, unsigned int lpid); + unsigned int pshift, u64 lpid); extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, unsigned int shift, const struct kvm_memory_slot *memslot, - unsigned int lpid); + u64 lpid); extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, unsigned long gpa, - unsigned int lpid); + u64 lpid); extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long gpa, struct kvm_memory_slot *memslot, @@ -207,7 +207,7 @@ extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, extern int kvmppc_init_vm_radix(struct kvm *kvm); extern void kvmppc_free_radix(struct kvm *kvm); extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, - unsigned int lpid); + u64 lpid); extern int kvmppc_radix_init(void); extern void kvmppc_radix_exit(void); extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, @@ -300,7 +300,7 @@ void kvmhv_nested_exit(void); void kvmhv_vm_nested_init(struct kvm *kvm); long kvmhv_set_partition_table(struct kvm_vcpu *vcpu); long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu); -void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1); +void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1); void kvmhv_release_all_nested(struct kvm *kvm); long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index d49065af08e9..572f9bbf1a25 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -624,7 +624,7 @@ static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, unsigned long gpa, unsigned int level, - unsigned long mmu_seq, unsigned int lpid, + unsigned long mmu_seq, u64 lpid, unsigned long *rmapp, struct rmap_nested **n_rmap); extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp, struct rmap_nested **n_rmap); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 14ee0dece853..429b53bc1773 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -276,7 +276,7 @@ struct kvm_resize_hpt; #define KVMPPC_SECURE_INIT_ABORT 0x4 /* H_SVM_INIT_ABORT issued */ struct kvm_arch { - unsigned int lpid; + u64 lpid; unsigned int smt_mode; /* # vcpus per virtual core */ unsigned int emul_smt_mode; /* emualted SMT mode, on P9 */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index fe3d0ea0058a..8d26f2537586 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -355,7 +355,7 @@ static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p) * error recovery of killing the process/guest will be eventually * needed. */ -static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type, +static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type, u64 page_sizes, u64 start, u64 end) { long rc; @@ -401,7 +401,7 @@ static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex, return 0; } -static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type, +static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type, u64 page_sizes, u64 start, u64 end) { return 0; -- cgit From 19d31c5f115754c369c0995df47479c384757f82 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Thu, 14 Sep 2023 13:05:59 +1000 Subject: KVM: PPC: Add support for nestedv2 guests A series of hcalls have been added to the PAPR which allow a regular guest partition to create and manage guest partitions of its own. KVM already had an interface that allowed this on powernv platforms. This existing interface will now be called "nestedv1". The newly added PAPR interface will be called "nestedv2". PHYP will support the nestedv2 interface. At this time the host side of the nestedv2 interface has not been implemented on powernv but there is no technical reason why it could not be added. The nestedv1 interface is still supported. Add support to KVM to utilize these hcalls to enable running nested guests as a pseries guest on PHYP. Overview of the new hcall usage: - L1 and L0 negotiate capabilities with H_GUEST_{G,S}ET_CAPABILITIES() - L1 requests the L0 create a L2 with H_GUEST_CREATE() and receives a handle to use in future hcalls - L1 requests the L0 create a L2 vCPU with H_GUEST_CREATE_VCPU() - L1 sets up the L2 using H_GUEST_SET and the H_GUEST_VCPU_RUN input buffer - L1 requests the L0 runs the L2 vCPU using H_GUEST_VCPU_RUN() - L2 returns to L1 with an exit reason and L1 reads the H_GUEST_VCPU_RUN output buffer populated by the L0 - L1 handles the exit using H_GET_STATE if necessary - L1 reruns L2 vCPU with H_GUEST_VCPU_RUN - L1 frees the L2 in the L0 with H_GUEST_DELETE() Support for the new API is determined by trying H_GUEST_GET_CAPABILITIES. On a successful return, use the nestedv2 interface. Use the vcpu register state setters for tracking modified guest state elements and copy the thread wide values into the H_GUEST_VCPU_RUN input buffer immediately before running a L2. The guest wide elements can not be added to the input buffer so send them with a separate H_GUEST_SET call if necessary. Make the vcpu register getter load the corresponding value from the real host with H_GUEST_GET. To avoid unnecessarily calling H_GUEST_GET, track which values have already been loaded between H_GUEST_VCPU_RUN calls. If an element is present in the H_GUEST_VCPU_RUN output buffer it also does not need to be loaded again. Tested-by: Sachin Sant Signed-off-by: Vaibhav Jain Signed-off-by: Gautam Menghani Signed-off-by: Kautuk Consul Signed-off-by: Amit Machhiwal Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://msgid.link/20230914030600.16993-11-jniethe5@gmail.com --- arch/powerpc/include/asm/guest-state-buffer.h | 91 +++++++++ arch/powerpc/include/asm/hvcall.h | 30 +++ arch/powerpc/include/asm/kvm_book3s.h | 137 ++++++++++++-- arch/powerpc/include/asm/kvm_book3s_64.h | 6 + arch/powerpc/include/asm/kvm_host.h | 20 ++ arch/powerpc/include/asm/kvm_ppc.h | 90 ++++++--- arch/powerpc/include/asm/plpar_wrappers.h | 263 ++++++++++++++++++++++++++ 7 files changed, 592 insertions(+), 45 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/guest-state-buffer.h b/arch/powerpc/include/asm/guest-state-buffer.h index aaefe1075fc4..808149f31576 100644 --- a/arch/powerpc/include/asm/guest-state-buffer.h +++ b/arch/powerpc/include/asm/guest-state-buffer.h @@ -5,6 +5,7 @@ #ifndef _ASM_POWERPC_GUEST_STATE_BUFFER_H #define _ASM_POWERPC_GUEST_STATE_BUFFER_H +#include "asm/hvcall.h" #include #include #include @@ -313,6 +314,8 @@ struct kvmppc_gs_buff *kvmppc_gsb_new(size_t size, unsigned long guest_id, unsigned long vcpu_id, gfp_t flags); void kvmppc_gsb_free(struct kvmppc_gs_buff *gsb); void *kvmppc_gsb_put(struct kvmppc_gs_buff *gsb, size_t size); +int kvmppc_gsb_send(struct kvmppc_gs_buff *gsb, unsigned long flags); +int kvmppc_gsb_recv(struct kvmppc_gs_buff *gsb, unsigned long flags); /** * kvmppc_gsb_header() - the header of a guest state buffer @@ -901,4 +904,92 @@ static inline void kvmppc_gsm_reset(struct kvmppc_gs_msg *gsm) kvmppc_gsbm_zero(&gsm->bitmap); } +/** + * kvmppc_gsb_receive_data - flexibly update values from a guest state buffer + * @gsb: guest state buffer + * @gsm: guest state message + * + * Requests updated values for the guest state values included in the guest + * state message. The guest state message will then deserialize the guest state + * buffer. + */ +static inline int kvmppc_gsb_receive_data(struct kvmppc_gs_buff *gsb, + struct kvmppc_gs_msg *gsm) +{ + int rc; + + kvmppc_gsb_reset(gsb); + rc = kvmppc_gsm_fill_info(gsm, gsb); + if (rc < 0) + return rc; + + rc = kvmppc_gsb_recv(gsb, gsm->flags); + if (rc < 0) + return rc; + + rc = kvmppc_gsm_refresh_info(gsm, gsb); + if (rc < 0) + return rc; + return 0; +} + +/** + * kvmppc_gsb_recv - receive a single guest state ID + * @gsb: guest state buffer + * @gsm: guest state message + * @iden: guest state identity + */ +static inline int kvmppc_gsb_receive_datum(struct kvmppc_gs_buff *gsb, + struct kvmppc_gs_msg *gsm, u16 iden) +{ + int rc; + + kvmppc_gsm_include(gsm, iden); + rc = kvmppc_gsb_receive_data(gsb, gsm); + if (rc < 0) + return rc; + kvmppc_gsm_reset(gsm); + return 0; +} + +/** + * kvmppc_gsb_send_data - flexibly send values from a guest state buffer + * @gsb: guest state buffer + * @gsm: guest state message + * + * Sends the guest state values included in the guest state message. + */ +static inline int kvmppc_gsb_send_data(struct kvmppc_gs_buff *gsb, + struct kvmppc_gs_msg *gsm) +{ + int rc; + + kvmppc_gsb_reset(gsb); + rc = kvmppc_gsm_fill_info(gsm, gsb); + if (rc < 0) + return rc; + rc = kvmppc_gsb_send(gsb, gsm->flags); + + return rc; +} + +/** + * kvmppc_gsb_recv - send a single guest state ID + * @gsb: guest state buffer + * @gsm: guest state message + * @iden: guest state identity + */ +static inline int kvmppc_gsb_send_datum(struct kvmppc_gs_buff *gsb, + struct kvmppc_gs_msg *gsm, u16 iden) +{ + int rc; + + kvmppc_gsm_include(gsm, iden); + rc = kvmppc_gsb_send_data(gsb, gsm); + if (rc < 0) + return rc; + kvmppc_gsm_reset(gsm); + return 0; +} + #endif /* _ASM_POWERPC_GUEST_STATE_BUFFER_H */ diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index c099780385dd..ddb99e982917 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -100,6 +100,18 @@ #define H_COP_HW -74 #define H_STATE -75 #define H_IN_USE -77 + +#define H_INVALID_ELEMENT_ID -79 +#define H_INVALID_ELEMENT_SIZE -80 +#define H_INVALID_ELEMENT_VALUE -81 +#define H_INPUT_BUFFER_NOT_DEFINED -82 +#define H_INPUT_BUFFER_TOO_SMALL -83 +#define H_OUTPUT_BUFFER_NOT_DEFINED -84 +#define H_OUTPUT_BUFFER_TOO_SMALL -85 +#define H_PARTITION_PAGE_TABLE_NOT_DEFINED -86 +#define H_GUEST_VCPU_STATE_NOT_HV_OWNED -87 + + #define H_UNSUPPORTED_FLAG_START -256 #define H_UNSUPPORTED_FLAG_END -511 #define H_MULTI_THREADS_ACTIVE -9005 @@ -381,6 +393,15 @@ #define H_ENTER_NESTED 0xF804 #define H_TLB_INVALIDATE 0xF808 #define H_COPY_TOFROM_GUEST 0xF80C +#define H_GUEST_GET_CAPABILITIES 0x460 +#define H_GUEST_SET_CAPABILITIES 0x464 +#define H_GUEST_CREATE 0x470 +#define H_GUEST_CREATE_VCPU 0x474 +#define H_GUEST_GET_STATE 0x478 +#define H_GUEST_SET_STATE 0x47C +#define H_GUEST_RUN_VCPU 0x480 +#define H_GUEST_COPY_MEMORY 0x484 +#define H_GUEST_DELETE 0x488 /* Flags for H_SVM_PAGE_IN */ #define H_PAGE_IN_SHARED 0x1 @@ -467,6 +488,15 @@ #define H_RPTI_PAGE_1G 0x08 #define H_RPTI_PAGE_ALL (-1UL) +/* Flags for H_GUEST_{S,G}_STATE */ +#define H_GUEST_FLAGS_WIDE (1UL<<(63-0)) + +/* Flag values used for H_{S,G}SET_GUEST_CAPABILITIES */ +#define H_GUEST_CAP_COPY_MEM (1UL<<(63-0)) +#define H_GUEST_CAP_POWER9 (1UL<<(63-1)) +#define H_GUEST_CAP_POWER10 (1UL<<(63-2)) +#define H_GUEST_CAP_BITMAP2 (1UL<<(63-63)) + #ifndef __ASSEMBLY__ #include diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 831c23e4f121..4f527d09c92b 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -12,6 +12,7 @@ #include #include #include +#include struct kvmppc_bat { u64 raw; @@ -295,6 +296,7 @@ static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {} static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} #endif +extern unsigned long nested_capabilities; long kvmhv_nested_init(void); void kvmhv_nested_exit(void); void kvmhv_vm_nested_init(struct kvm *kvm); @@ -316,6 +318,69 @@ long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu); void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); + +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + +extern struct static_key_false __kvmhv_is_nestedv2; + +static inline bool kvmhv_is_nestedv2(void) +{ + return static_branch_unlikely(&__kvmhv_is_nestedv2); +} + +static inline bool kvmhv_is_nestedv1(void) +{ + return !static_branch_likely(&__kvmhv_is_nestedv2); +} + +#else + +static inline bool kvmhv_is_nestedv2(void) +{ + return false; +} + +static inline bool kvmhv_is_nestedv1(void) +{ + return false; +} + +#endif + +int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs); +int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs); +int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden); +int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden); + +static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, + struct pt_regs *regs) +{ + if (kvmhv_is_nestedv2()) + return __kvmhv_nestedv2_reload_ptregs(vcpu, regs); + return 0; +} +static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, + struct pt_regs *regs) +{ + if (kvmhv_is_nestedv2()) + return __kvmhv_nestedv2_mark_dirty_ptregs(vcpu, regs); + return 0; +} + +static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden) +{ + if (kvmhv_is_nestedv2()) + return __kvmhv_nestedv2_mark_dirty(vcpu, iden); + return 0; +} + +static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden) +{ + if (kvmhv_is_nestedv2()) + return __kvmhv_nestedv2_cached_reload(vcpu, iden); + return 0; +} + extern int kvm_irq_bypass; static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) @@ -335,60 +400,72 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) { vcpu->arch.regs.gpr[num] = val; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(num)); } static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) { + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_GPR(num)) < 0); return vcpu->arch.regs.gpr[num]; } static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) { vcpu->arch.regs.ccr = val; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR); } static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) { + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CR) < 0); return vcpu->arch.regs.ccr; } static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) { vcpu->arch.regs.xer = val; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER); } static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) { + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_XER) < 0); return vcpu->arch.regs.xer; } static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) { vcpu->arch.regs.ctr = val; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR); } static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) { + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CTR) < 0); return vcpu->arch.regs.ctr; } static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) { vcpu->arch.regs.link = val; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR); } static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) { + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_LR) < 0); return vcpu->arch.regs.link; } static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) { vcpu->arch.regs.nip = val; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA); } static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) { + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_NIA) < 0); return vcpu->arch.regs.nip; } @@ -405,27 +482,32 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i) { + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0); return vcpu->arch.fp.fpr[i][TS_FPROFFSET]; } static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val) { vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i)); } static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu) { + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_FPSCR) < 0); return vcpu->arch.fp.fpscr; } static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val) { vcpu->arch.fp.fpscr = val; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_FPSCR); } static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j) { + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0); return vcpu->arch.fp.fpr[i][j]; } @@ -433,11 +515,13 @@ static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j, u64 val) { vcpu->arch.fp.fpr[i][j] = val; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i)); } #ifdef CONFIG_ALTIVEC static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v) { + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(32 + i)) < 0); *v = vcpu->arch.vr.vr[i]; } @@ -445,75 +529,86 @@ static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *val) { vcpu->arch.vr.vr[i] = *val; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(32 + i)); } static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu) { + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSCR) < 0); return vcpu->arch.vr.vscr.u[3]; } static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val) { vcpu->arch.vr.vscr.u[3] = val; + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSCR); } #endif -#define KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size) \ +#define KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ { \ \ vcpu->arch.reg = val; \ + kvmhv_nestedv2_mark_dirty(vcpu, iden); \ } -#define KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size) \ +#define KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ { \ + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \ return vcpu->arch.reg; \ } -#define KVMPPC_BOOK3S_VCPU_ACCESSOR(reg, size) \ - KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size) \ - KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size) \ +#define KVMPPC_BOOK3S_VCPU_ACCESSOR(reg, size, iden) \ + KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \ + KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \ -KVMPPC_BOOK3S_VCPU_ACCESSOR(pid, 32) -KVMPPC_BOOK3S_VCPU_ACCESSOR(tar, 64) -KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbhr, 64) -KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbrr, 64) -KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64) -KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64) -KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64) +KVMPPC_BOOK3S_VCPU_ACCESSOR(pid, 32, KVMPPC_GSID_PIDR) +KVMPPC_BOOK3S_VCPU_ACCESSOR(tar, 64, KVMPPC_GSID_TAR) +KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbhr, 64, KVMPPC_GSID_EBBHR) +KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbrr, 64, KVMPPC_GSID_EBBRR) +KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64, KVMPPC_GSID_BESCR) +KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64, KVMPPC_GSID_IC) +KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64, KVMPPC_GSID_VRSAVE) -#define KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size) \ +#define KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ { \ vcpu->arch.vcore->reg = val; \ + kvmhv_nestedv2_mark_dirty(vcpu, iden); \ } -#define KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size) \ +#define KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ { \ + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \ return vcpu->arch.vcore->reg; \ } -#define KVMPPC_BOOK3S_VCORE_ACCESSOR(reg, size) \ - KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size) \ - KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size) \ +#define KVMPPC_BOOK3S_VCORE_ACCESSOR(reg, size, iden) \ + KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \ + KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \ -KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64) -KVMPPC_BOOK3S_VCORE_ACCESSOR(tb_offset, 64) -KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32) -KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64) +KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB) +KVMPPC_BOOK3S_VCORE_ACCESSOR(tb_offset, 64, KVMPPC_GSID_TB_OFFSET) +KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR) +KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR) static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu) { + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0); + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0); return vcpu->arch.dec_expires; } static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val) { vcpu->arch.dec_expires = val; + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0); + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB); } /* Expiry time of vcpu DEC relative to host TB */ diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 572f9bbf1a25..2477021bff54 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -677,6 +677,12 @@ static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq, extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, unsigned long ea, unsigned *hshift); +int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu, struct kvmhv_nestedv2_io *io); +void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu, struct kvmhv_nestedv2_io *io); +int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit); +int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1); +int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu); + #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 429b53bc1773..8799b37be295 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -25,6 +25,7 @@ #include #include #include +#include #define __KVM_HAVE_ARCH_VCPU_DEBUGFS @@ -509,6 +510,23 @@ union xive_tma_w01 { __be64 w01; }; + /* Nestedv2 H_GUEST_RUN_VCPU configuration */ +struct kvmhv_nestedv2_config { + struct kvmppc_gs_buff_info vcpu_run_output_cfg; + struct kvmppc_gs_buff_info vcpu_run_input_cfg; + u64 vcpu_run_output_size; +}; + + /* Nestedv2 L1<->L0 communication state */ +struct kvmhv_nestedv2_io { + struct kvmhv_nestedv2_config cfg; + struct kvmppc_gs_buff *vcpu_run_output; + struct kvmppc_gs_buff *vcpu_run_input; + struct kvmppc_gs_msg *vcpu_message; + struct kvmppc_gs_msg *vcore_message; + struct kvmppc_gs_bitmap valids; +}; + struct kvm_vcpu_arch { ulong host_stack; u32 host_pid; @@ -829,6 +847,8 @@ struct kvm_vcpu_arch { u64 nested_hfscr; /* HFSCR that the L1 requested for the nested guest */ u32 nested_vcpu_id; gpa_t nested_io_gpr; + /* For nested APIv2 guests*/ + struct kvmhv_nestedv2_io nestedv2_io; #endif #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 004998b267e6..db8bfaaa4536 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -615,6 +615,42 @@ static inline bool kvmhv_on_pseries(void) { return false; } + +#endif + +#ifndef CONFIG_PPC_BOOK3S + +static inline bool kvmhv_is_nestedv2(void) +{ + return false; +} + +static inline bool kvmhv_is_nestedv1(void) +{ + return false; +} + +static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, + struct pt_regs *regs) +{ + return 0; +} +static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, + struct pt_regs *regs) +{ + return 0; +} + +static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden) +{ + return 0; +} + +static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden) +{ + return 0; +} + #endif #ifdef CONFIG_KVM_XICS @@ -939,27 +975,32 @@ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \ mtspr(bookehv_spr, val); \ } \ -#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size) \ +#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden) \ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ { \ + if (iden) \ + WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \ if (kvmppc_shared_big_endian(vcpu)) \ return be##size##_to_cpu(vcpu->arch.shared->reg); \ else \ return le##size##_to_cpu(vcpu->arch.shared->reg); \ } \ -#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size) \ +#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden) \ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ { \ if (kvmppc_shared_big_endian(vcpu)) \ vcpu->arch.shared->reg = cpu_to_be##size(val); \ else \ vcpu->arch.shared->reg = cpu_to_le##size(val); \ + \ + if (iden) \ + kvmhv_nestedv2_mark_dirty(vcpu, iden); \ } \ -#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size) \ - KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size) \ - KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size) \ +#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden) \ + KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden) \ + KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden) \ #define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \ KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \ @@ -967,39 +1008,40 @@ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ #ifdef CONFIG_KVM_BOOKE_HV -#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr) \ +#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden) \ KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \ #else -#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr) \ - KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size) \ +#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden) \ + KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden) \ #endif -KVMPPC_VCPU_SHARED_REGS_ACCESSOR(critical, 64) -KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg0, 64, SPRN_GSPRG0) -KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg1, 64, SPRN_GSPRG1) -KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg2, 64, SPRN_GSPRG2) -KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg3, 64, SPRN_GSPRG3) -KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr0, 64, SPRN_GSRR0) -KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr1, 64, SPRN_GSRR1) -KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(dar, 64, SPRN_GDEAR) -KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(esr, 64, SPRN_GESR) -KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(msr, 64) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR(critical, 64, 0) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg0, 64, SPRN_GSPRG0, KVMPPC_GSID_SPRG0) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg1, 64, SPRN_GSPRG1, KVMPPC_GSID_SPRG1) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg2, 64, SPRN_GSPRG2, KVMPPC_GSID_SPRG2) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg3, 64, SPRN_GSPRG3, KVMPPC_GSID_SPRG3) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr0, 64, SPRN_GSRR0, KVMPPC_GSID_SRR0) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr1, 64, SPRN_GSRR1, KVMPPC_GSID_SRR1) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(dar, 64, SPRN_GDEAR, KVMPPC_GSID_DAR) +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(esr, 64, SPRN_GESR, 0) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(msr, 64, KVMPPC_GSID_MSR) static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val) { if (kvmppc_shared_big_endian(vcpu)) vcpu->arch.shared->msr = cpu_to_be64(val); else vcpu->arch.shared->msr = cpu_to_le64(val); + kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_MSR); } -KVMPPC_VCPU_SHARED_REGS_ACCESSOR(dsisr, 32) -KVMPPC_VCPU_SHARED_REGS_ACCESSOR(int_pending, 32) -KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg4, 64) -KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg5, 64) -KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg6, 64) -KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg7, 64) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR(dsisr, 32, KVMPPC_GSID_DSISR) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR(int_pending, 32, 0) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg4, 64, 0) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg5, 64, 0) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg6, 64, 0) +KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg7, 64, 0) static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr) { diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 8d26f2537586..b3ee44a40c2f 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -6,6 +6,7 @@ #include #include +#include #include #include @@ -343,6 +344,212 @@ static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p) return rc; } +static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + unsigned long token; + long rc; + + token = -1UL; + do { + rc = plpar_hcall(H_GUEST_CREATE, retbuf, flags, token); + if (rc == H_SUCCESS) + *guest_id = retbuf[0]; + + if (rc == H_BUSY) { + token = retbuf[0]; + cond_resched(); + } + + if (H_IS_LONG_BUSY(rc)) { + token = retbuf[0]; + msleep(get_longbusy_msecs(rc)); + rc = H_BUSY; + } + + } while (rc == H_BUSY); + + return rc; +} + +static inline long plpar_guest_create_vcpu(unsigned long flags, + unsigned long guest_id, + unsigned long vcpu_id) +{ + long rc; + + do { + rc = plpar_hcall_norets(H_GUEST_CREATE_VCPU, 0, guest_id, vcpu_id); + + if (rc == H_BUSY) + cond_resched(); + + if (H_IS_LONG_BUSY(rc)) { + msleep(get_longbusy_msecs(rc)); + rc = H_BUSY; + } + + } while (rc == H_BUSY); + + return rc; +} + +static inline long plpar_guest_set_state(unsigned long flags, + unsigned long guest_id, + unsigned long vcpu_id, + unsigned long data_buffer, + unsigned long data_size, + unsigned long *failed_index) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + while (true) { + rc = plpar_hcall(H_GUEST_SET_STATE, retbuf, flags, guest_id, + vcpu_id, data_buffer, data_size); + + if (rc == H_BUSY) { + cpu_relax(); + continue; + } + + if (H_IS_LONG_BUSY(rc)) { + mdelay(get_longbusy_msecs(rc)); + continue; + } + + if (rc == H_INVALID_ELEMENT_ID) + *failed_index = retbuf[0]; + else if (rc == H_INVALID_ELEMENT_SIZE) + *failed_index = retbuf[0]; + else if (rc == H_INVALID_ELEMENT_VALUE) + *failed_index = retbuf[0]; + + break; + } + + return rc; +} + +static inline long plpar_guest_get_state(unsigned long flags, + unsigned long guest_id, + unsigned long vcpu_id, + unsigned long data_buffer, + unsigned long data_size, + unsigned long *failed_index) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + while (true) { + rc = plpar_hcall(H_GUEST_GET_STATE, retbuf, flags, guest_id, + vcpu_id, data_buffer, data_size); + + if (rc == H_BUSY) { + cpu_relax(); + continue; + } + + if (H_IS_LONG_BUSY(rc)) { + mdelay(get_longbusy_msecs(rc)); + continue; + } + + if (rc == H_INVALID_ELEMENT_ID) + *failed_index = retbuf[0]; + else if (rc == H_INVALID_ELEMENT_SIZE) + *failed_index = retbuf[0]; + else if (rc == H_INVALID_ELEMENT_VALUE) + *failed_index = retbuf[0]; + + break; + } + + return rc; +} + +static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id, + unsigned long vcpu_id, int *trap, + unsigned long *failed_index) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + rc = plpar_hcall(H_GUEST_RUN_VCPU, retbuf, flags, guest_id, vcpu_id); + if (rc == H_SUCCESS) + *trap = retbuf[0]; + else if (rc == H_INVALID_ELEMENT_ID) + *failed_index = retbuf[0]; + else if (rc == H_INVALID_ELEMENT_SIZE) + *failed_index = retbuf[0]; + else if (rc == H_INVALID_ELEMENT_VALUE) + *failed_index = retbuf[0]; + + return rc; +} + +static inline long plpar_guest_delete(unsigned long flags, u64 guest_id) +{ + long rc; + + do { + rc = plpar_hcall_norets(H_GUEST_DELETE, flags, guest_id); + if (rc == H_BUSY) + cond_resched(); + + if (H_IS_LONG_BUSY(rc)) { + msleep(get_longbusy_msecs(rc)); + rc = H_BUSY; + } + + } while (rc == H_BUSY); + + return rc; +} + +static inline long plpar_guest_set_capabilities(unsigned long flags, + unsigned long capabilities) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + do { + rc = plpar_hcall(H_GUEST_SET_CAPABILITIES, retbuf, flags, capabilities); + if (rc == H_BUSY) + cond_resched(); + + if (H_IS_LONG_BUSY(rc)) { + msleep(get_longbusy_msecs(rc)); + rc = H_BUSY; + } + } while (rc == H_BUSY); + + return rc; +} + +static inline long plpar_guest_get_capabilities(unsigned long flags, + unsigned long *capabilities) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + do { + rc = plpar_hcall(H_GUEST_GET_CAPABILITIES, retbuf, flags); + if (rc == H_BUSY) + cond_resched(); + + if (H_IS_LONG_BUSY(rc)) { + msleep(get_longbusy_msecs(rc)); + rc = H_BUSY; + } + } while (rc == H_BUSY); + + if (rc == H_SUCCESS) + *capabilities = retbuf[0]; + + return rc; +} + /* * Wrapper to H_RPT_INVALIDATE hcall that handles return values appropriately * @@ -407,6 +614,62 @@ static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type, return 0; } +static inline long plpar_guest_create_vcpu(unsigned long flags, + unsigned long guest_id, + unsigned long vcpu_id) +{ + return 0; +} + +static inline long plpar_guest_get_state(unsigned long flags, + unsigned long guest_id, + unsigned long vcpu_id, + unsigned long data_buffer, + unsigned long data_size, + unsigned long *failed_index) +{ + return 0; +} + +static inline long plpar_guest_set_state(unsigned long flags, + unsigned long guest_id, + unsigned long vcpu_id, + unsigned long data_buffer, + unsigned long data_size, + unsigned long *failed_index) +{ + return 0; +} + +static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id, + unsigned long vcpu_id, int *trap, + unsigned long *failed_index) +{ + return 0; +} + +static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id) +{ + return 0; +} + +static inline long plpar_guest_delete(unsigned long flags, u64 guest_id) +{ + return 0; +} + +static inline long plpar_guest_get_capabilities(unsigned long flags, + unsigned long *capabilities) +{ + return 0; +} + +static inline long plpar_guest_set_capabilities(unsigned long flags, + unsigned long capabilities) +{ + return 0; +} + #endif /* CONFIG_PPC_PSERIES */ #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ -- cgit From b7bce570430e42229fb63f775fcbb10f38b83c71 Mon Sep 17 00:00:00 2001 From: Benjamin Gray Date: Wed, 11 Oct 2023 16:37:06 +1100 Subject: powerpc/kvm: Force cast endianness of KVM shared regs Sparse reports endianness mismatches in the KVM shared regs getter and setter helpers. This code has dynamic endianness behind a safe interface, so a force is warranted here to tell sparse this is OK. Signed-off-by: Benjamin Gray Signed-off-by: Michael Ellerman Link: https://msgid.link/20231011053711.93427-8-bgray@linux.ibm.com --- arch/powerpc/include/asm/kvm_ppc.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index db8bfaaa4536..3281215097cc 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -981,18 +981,18 @@ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ if (iden) \ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \ if (kvmppc_shared_big_endian(vcpu)) \ - return be##size##_to_cpu(vcpu->arch.shared->reg); \ + return be##size##_to_cpu((__be##size __force)vcpu->arch.shared->reg); \ else \ - return le##size##_to_cpu(vcpu->arch.shared->reg); \ + return le##size##_to_cpu((__le##size __force)vcpu->arch.shared->reg); \ } \ #define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden) \ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ { \ if (kvmppc_shared_big_endian(vcpu)) \ - vcpu->arch.shared->reg = cpu_to_be##size(val); \ + vcpu->arch.shared->reg = (u##size __force)cpu_to_be##size(val); \ else \ - vcpu->arch.shared->reg = cpu_to_le##size(val); \ + vcpu->arch.shared->reg = (u##size __force)cpu_to_le##size(val); \ \ if (iden) \ kvmhv_nestedv2_mark_dirty(vcpu, iden); \ -- cgit